mirror of https://github.com/ansible/ansible.git
Migrated to azure.azcollection
parent
fe0f4750e1
commit
ab914b9ab6
File diff suppressed because it is too large
Load Diff
@ -1,211 +0,0 @@
|
||||
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
import re
|
||||
from ansible.module_utils.common.dict_transformations import _camel_to_snake, _snake_to_camel
|
||||
from ansible.module_utils.six import string_types
|
||||
|
||||
|
||||
class AzureRMModuleBaseExt(AzureRMModuleBase):
|
||||
|
||||
def inflate_parameters(self, spec, body, level):
|
||||
if isinstance(body, list):
|
||||
for item in body:
|
||||
self.inflate_parameters(spec, item, level)
|
||||
return
|
||||
for name in spec.keys():
|
||||
# first check if option was passed
|
||||
param = body.get(name)
|
||||
if param is None:
|
||||
if spec[name].get('purgeIfNone', False):
|
||||
body.pop(name, None)
|
||||
continue
|
||||
# check if pattern needs to be used
|
||||
pattern = spec[name].get('pattern', None)
|
||||
if pattern:
|
||||
if pattern == 'camelize':
|
||||
param = _snake_to_camel(param, True)
|
||||
elif isinstance(pattern, list):
|
||||
normalized = None
|
||||
for p in pattern:
|
||||
normalized = self.normalize_resource_id(param, p)
|
||||
body[name] = normalized
|
||||
if normalized is not None:
|
||||
break
|
||||
else:
|
||||
param = self.normalize_resource_id(param, pattern)
|
||||
body[name] = param
|
||||
disposition = spec[name].get('disposition', '*')
|
||||
if level == 0 and not disposition.startswith('/'):
|
||||
continue
|
||||
if disposition == '/':
|
||||
disposition = '/*'
|
||||
parts = disposition.split('/')
|
||||
if parts[0] == '':
|
||||
# should fail if level is > 0?
|
||||
parts.pop(0)
|
||||
target_dict = body
|
||||
elem = body.pop(name)
|
||||
while len(parts) > 1:
|
||||
target_dict = target_dict.setdefault(parts.pop(0), {})
|
||||
targetName = parts[0] if parts[0] != '*' else name
|
||||
target_dict[targetName] = elem
|
||||
if spec[name].get('options'):
|
||||
self.inflate_parameters(spec[name].get('options'), target_dict[targetName], level + 1)
|
||||
|
||||
def normalize_resource_id(self, value, pattern):
|
||||
'''
|
||||
Return a proper resource id string..
|
||||
|
||||
:param resource_id: It could be a resource name, resource id or dict containing parts from the pattern.
|
||||
:param pattern: pattern of resource is, just like in Azure Swagger
|
||||
'''
|
||||
value_dict = {}
|
||||
if isinstance(value, string_types):
|
||||
value_parts = value.split('/')
|
||||
if len(value_parts) == 1:
|
||||
value_dict['name'] = value
|
||||
else:
|
||||
pattern_parts = pattern.split('/')
|
||||
if len(value_parts) != len(pattern_parts):
|
||||
return None
|
||||
for i in range(len(value_parts)):
|
||||
if pattern_parts[i].startswith('{'):
|
||||
value_dict[pattern_parts[i][1:-1]] = value_parts[i]
|
||||
elif value_parts[i].lower() != pattern_parts[i].lower():
|
||||
return None
|
||||
elif isinstance(value, dict):
|
||||
value_dict = value
|
||||
else:
|
||||
return None
|
||||
if not value_dict.get('subscription_id'):
|
||||
value_dict['subscription_id'] = self.subscription_id
|
||||
if not value_dict.get('resource_group'):
|
||||
value_dict['resource_group'] = self.resource_group
|
||||
|
||||
# check if any extra values passed
|
||||
for k in value_dict:
|
||||
if not ('{' + k + '}') in pattern:
|
||||
return None
|
||||
# format url
|
||||
return pattern.format(**value_dict)
|
||||
|
||||
def idempotency_check(self, old_params, new_params):
|
||||
'''
|
||||
Return True if something changed. Function will use fields from module_arg_spec to perform dependency checks.
|
||||
:param old_params: old parameters dictionary, body from Get request.
|
||||
:param new_params: new parameters dictionary, unpacked module parameters.
|
||||
'''
|
||||
modifiers = {}
|
||||
result = {}
|
||||
self.create_compare_modifiers(self.module.argument_spec, '', modifiers)
|
||||
self.results['modifiers'] = modifiers
|
||||
return self.default_compare(modifiers, new_params, old_params, '', self.results)
|
||||
|
||||
def create_compare_modifiers(self, arg_spec, path, result):
|
||||
for k in arg_spec.keys():
|
||||
o = arg_spec[k]
|
||||
updatable = o.get('updatable', True)
|
||||
comparison = o.get('comparison', 'default')
|
||||
disposition = o.get('disposition', '*')
|
||||
if disposition == '/':
|
||||
disposition = '/*'
|
||||
p = (path +
|
||||
('/' if len(path) > 0 else '') +
|
||||
disposition.replace('*', k) +
|
||||
('/*' if o['type'] == 'list' else ''))
|
||||
if comparison != 'default' or not updatable:
|
||||
result[p] = {'updatable': updatable, 'comparison': comparison}
|
||||
if o.get('options'):
|
||||
self.create_compare_modifiers(o.get('options'), p, result)
|
||||
|
||||
def default_compare(self, modifiers, new, old, path, result):
|
||||
'''
|
||||
Default dictionary comparison.
|
||||
This function will work well with most of the Azure resources.
|
||||
It correctly handles "location" comparison.
|
||||
|
||||
Value handling:
|
||||
- if "new" value is None, it will be taken from "old" dictionary if "incremental_update"
|
||||
is enabled.
|
||||
List handling:
|
||||
- if list contains "name" field it will be sorted by "name" before comparison is done.
|
||||
- if module has "incremental_update" set, items missing in the new list will be copied
|
||||
from the old list
|
||||
|
||||
Warnings:
|
||||
If field is marked as non-updatable, appropriate warning will be printed out and
|
||||
"new" structure will be updated to old value.
|
||||
|
||||
:modifiers: Optional dictionary of modifiers, where key is the path and value is dict of modifiers
|
||||
:param new: New version
|
||||
:param old: Old version
|
||||
|
||||
Returns True if no difference between structures has been detected.
|
||||
Returns False if difference was detected.
|
||||
'''
|
||||
if new is None:
|
||||
return True
|
||||
elif isinstance(new, dict):
|
||||
comparison_result = True
|
||||
if not isinstance(old, dict):
|
||||
result['compare'].append('changed [' + path + '] old dict is null')
|
||||
comparison_result = False
|
||||
else:
|
||||
for k in set(new.keys()) | set(old.keys()):
|
||||
new_item = new.get(k, None)
|
||||
old_item = old.get(k, None)
|
||||
if new_item is None:
|
||||
if isinstance(old_item, dict):
|
||||
new[k] = old_item
|
||||
result['compare'].append('new item was empty, using old [' + path + '][ ' + k + ' ]')
|
||||
elif not self.default_compare(modifiers, new_item, old_item, path + '/' + k, result):
|
||||
comparison_result = False
|
||||
return comparison_result
|
||||
elif isinstance(new, list):
|
||||
comparison_result = True
|
||||
if not isinstance(old, list) or len(new) != len(old):
|
||||
result['compare'].append('changed [' + path + '] length is different or old value is null')
|
||||
comparison_result = False
|
||||
else:
|
||||
if isinstance(old[0], dict):
|
||||
key = None
|
||||
if 'id' in old[0] and 'id' in new[0]:
|
||||
key = 'id'
|
||||
elif 'name' in old[0] and 'name' in new[0]:
|
||||
key = 'name'
|
||||
else:
|
||||
key = next(iter(old[0]))
|
||||
new = sorted(new, key=lambda x: x.get(key, None))
|
||||
old = sorted(old, key=lambda x: x.get(key, None))
|
||||
else:
|
||||
new = sorted(new)
|
||||
old = sorted(old)
|
||||
for i in range(len(new)):
|
||||
if not self.default_compare(modifiers, new[i], old[i], path + '/*', result):
|
||||
comparison_result = False
|
||||
return comparison_result
|
||||
else:
|
||||
updatable = modifiers.get(path, {}).get('updatable', True)
|
||||
comparison = modifiers.get(path, {}).get('comparison', 'default')
|
||||
if comparison == 'ignore':
|
||||
return True
|
||||
elif comparison == 'default' or comparison == 'sensitive':
|
||||
if isinstance(old, string_types) and isinstance(new, string_types):
|
||||
new = new.lower()
|
||||
old = old.lower()
|
||||
elif comparison == 'location':
|
||||
if isinstance(old, string_types) and isinstance(new, string_types):
|
||||
new = new.replace(' ', '').lower()
|
||||
old = old.replace(' ', '').lower()
|
||||
if str(new) != str(old):
|
||||
result['compare'].append('changed [' + path + '] ' + str(new) + ' != ' + str(old) + ' - ' + str(comparison))
|
||||
if updatable:
|
||||
return False
|
||||
else:
|
||||
self.module.warn("property '" + path + "' cannot be updated (" + str(old) + "->" + str(new) + ")")
|
||||
return True
|
||||
else:
|
||||
return True
|
||||
@ -1,97 +0,0 @@
|
||||
# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrestazure.azure_configuration import AzureConfiguration
|
||||
from msrest.service_client import ServiceClient
|
||||
from msrest.pipeline import ClientRawResponse
|
||||
from msrest.polling import LROPoller
|
||||
from msrestazure.polling.arm_polling import ARMPolling
|
||||
import uuid
|
||||
import json
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
AzureConfiguration = object
|
||||
|
||||
ANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ANSIBLE_VERSION)
|
||||
|
||||
|
||||
class GenericRestClientConfiguration(AzureConfiguration):
|
||||
|
||||
def __init__(self, credentials, subscription_id, base_url=None):
|
||||
|
||||
if credentials is None:
|
||||
raise ValueError("Parameter 'credentials' must not be None.")
|
||||
if subscription_id is None:
|
||||
raise ValueError("Parameter 'subscription_id' must not be None.")
|
||||
if not base_url:
|
||||
base_url = 'https://management.azure.com'
|
||||
|
||||
super(GenericRestClientConfiguration, self).__init__(base_url)
|
||||
|
||||
self.add_user_agent(ANSIBLE_USER_AGENT)
|
||||
|
||||
self.credentials = credentials
|
||||
self.subscription_id = subscription_id
|
||||
|
||||
|
||||
class GenericRestClient(object):
|
||||
|
||||
def __init__(self, credentials, subscription_id, base_url=None):
|
||||
self.config = GenericRestClientConfiguration(credentials, subscription_id, base_url)
|
||||
self._client = ServiceClient(self.config.credentials, self.config)
|
||||
self.models = None
|
||||
|
||||
def query(self, url, method, query_parameters, header_parameters, body, expected_status_codes, polling_timeout, polling_interval):
|
||||
# Construct and send request
|
||||
operation_config = {}
|
||||
|
||||
request = None
|
||||
|
||||
if header_parameters is None:
|
||||
header_parameters = {}
|
||||
|
||||
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
|
||||
|
||||
if method == 'GET':
|
||||
request = self._client.get(url, query_parameters)
|
||||
elif method == 'PUT':
|
||||
request = self._client.put(url, query_parameters)
|
||||
elif method == 'POST':
|
||||
request = self._client.post(url, query_parameters)
|
||||
elif method == 'HEAD':
|
||||
request = self._client.head(url, query_parameters)
|
||||
elif method == 'PATCH':
|
||||
request = self._client.patch(url, query_parameters)
|
||||
elif method == 'DELETE':
|
||||
request = self._client.delete(url, query_parameters)
|
||||
elif method == 'MERGE':
|
||||
request = self._client.merge(url, query_parameters)
|
||||
|
||||
response = self._client.send(request, header_parameters, body, **operation_config)
|
||||
|
||||
if response.status_code not in expected_status_codes:
|
||||
exp = CloudError(response)
|
||||
exp.request_id = response.headers.get('x-ms-request-id')
|
||||
raise exp
|
||||
elif response.status_code == 202 and polling_timeout > 0:
|
||||
def get_long_running_output(response):
|
||||
return response
|
||||
poller = LROPoller(self._client,
|
||||
ClientRawResponse(None, response),
|
||||
get_long_running_output,
|
||||
ARMPolling(polling_interval, **operation_config))
|
||||
response = self.get_poller_result(poller, polling_timeout)
|
||||
|
||||
return response
|
||||
|
||||
def get_poller_result(self, poller, timeout):
|
||||
try:
|
||||
poller.wait(timeout=timeout)
|
||||
return poller.result()
|
||||
except Exception as exc:
|
||||
raise
|
||||
@ -1,745 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*
|
||||
|
||||
# Copyright: (c) 2017, Julien Stroheker <juliens@microsoft.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_acs
|
||||
version_added: "2.4"
|
||||
short_description: Manage an Azure Container Service(ACS) instance
|
||||
description:
|
||||
- Create, update and delete an Azure Container Service(ACS) instance.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- Name of a resource group where the Container Services exists or will be created.
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
- Name of the Azure Container Services(ACS) instance.
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the ACS. Use C(present) to create or update an ACS and C(absent) to delete it.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
location:
|
||||
description:
|
||||
- Valid azure location. Defaults to location of the resource group.
|
||||
orchestration_platform:
|
||||
description:
|
||||
- Specifies the Container Orchestration Platform to use. Currently can be either C(DCOS), C(Kubernetes) or C(Swarm).
|
||||
- The I(service_principal) must be defined if set to C(Kubernetes).
|
||||
choices:
|
||||
- 'DCOS'
|
||||
- 'Kubernetes'
|
||||
- 'Swarm'
|
||||
required: true
|
||||
master_profile:
|
||||
description:
|
||||
- Master profile suboptions.
|
||||
required: true
|
||||
suboptions:
|
||||
count:
|
||||
description:
|
||||
- Number of masters (VMs) in the container service cluster. Allowed values are C(1), C(3), and C(5).
|
||||
required: true
|
||||
choices:
|
||||
- 1
|
||||
- 3
|
||||
- 5
|
||||
vm_size:
|
||||
description:
|
||||
- The VM Size of each of the Agent Pool VM's (e.g. C(Standard_F1) / C(Standard_D2v2)).
|
||||
required: true
|
||||
version_added: 2.5
|
||||
dns_prefix:
|
||||
description:
|
||||
- The DNS Prefix to use for the Container Service master nodes.
|
||||
required: true
|
||||
linux_profile:
|
||||
description:
|
||||
- The Linux profile suboptions.
|
||||
required: true
|
||||
suboptions:
|
||||
admin_username:
|
||||
description:
|
||||
- The Admin Username for the Cluster.
|
||||
required: true
|
||||
ssh_key:
|
||||
description:
|
||||
- The Public SSH Key used to access the cluster.
|
||||
required: true
|
||||
agent_pool_profiles:
|
||||
description:
|
||||
- The agent pool profile suboptions.
|
||||
required: true
|
||||
suboptions:
|
||||
name:
|
||||
description:
|
||||
- Unique name of the agent pool profile in the context of the subscription and resource group.
|
||||
required: true
|
||||
count:
|
||||
description:
|
||||
- Number of agents (VMs) to host docker containers. Allowed values must be in the range of 1 to 100 (inclusive).
|
||||
required: true
|
||||
dns_prefix:
|
||||
description:
|
||||
- The DNS Prefix given to Agents in this Agent Pool.
|
||||
required: true
|
||||
vm_size:
|
||||
description:
|
||||
- The VM Size of each of the Agent Pool VM's (e.g. C(Standard_F1) / C(Standard_D2v2)).
|
||||
required: true
|
||||
service_principal:
|
||||
description:
|
||||
- The service principal suboptions.
|
||||
- Required when I(orchestration_platform=Kubernetes).
|
||||
suboptions:
|
||||
client_id:
|
||||
description:
|
||||
- The ID for the Service Principal.
|
||||
client_secret:
|
||||
description:
|
||||
- The secret password associated with the service principal.
|
||||
diagnostics_profile:
|
||||
description:
|
||||
- Should VM Diagnostics be enabled for the Container Service VM's.
|
||||
required: true
|
||||
type: bool
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Julien Stroheker (@julienstroheker)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create an azure container services instance running Kubernetes
|
||||
azure_rm_acs:
|
||||
name: acctestcontservice1
|
||||
location: eastus
|
||||
resource_group: myResourceGroup
|
||||
orchestration_platform: Kubernetes
|
||||
master_profile:
|
||||
- count: 3
|
||||
dns_prefix: acsk8smasterdns
|
||||
vm_size: Standard_D2_v2
|
||||
linux_profile:
|
||||
- admin_username: azureuser
|
||||
ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAA...
|
||||
service_principal:
|
||||
- client_id: "cf72ca99-f6b9-4004-b0e0-bee10c521948"
|
||||
client_secret: "mySPNp@ssw0rd!"
|
||||
agent_pool_profiles:
|
||||
- name: default
|
||||
count: 5
|
||||
dns_prefix: acsk8sagent
|
||||
vm_size: Standard_D2_v2
|
||||
diagnostics_profile: false
|
||||
tags:
|
||||
Environment: Production
|
||||
|
||||
- name: Create an azure container services instance running DCOS
|
||||
azure_rm_acs:
|
||||
name: acctestcontservice2
|
||||
location: eastus
|
||||
resource_group: myResourceGroup
|
||||
orchestration_platform: DCOS
|
||||
master_profile:
|
||||
- count: 3
|
||||
dns_prefix: acsdcosmasterdns
|
||||
vm_size: Standard_D2_v2
|
||||
linux_profile:
|
||||
- admin_username: azureuser
|
||||
ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAA...
|
||||
agent_pool_profiles:
|
||||
- name: default
|
||||
count: 5
|
||||
dns_prefix: acscdcosagent
|
||||
vm_size: Standard_D2_v2
|
||||
diagnostics_profile: false
|
||||
tags:
|
||||
Environment: Production
|
||||
|
||||
- name: Create an azure container services instance running Swarm
|
||||
azure_rm_acs:
|
||||
name: acctestcontservice3
|
||||
location: eastus
|
||||
resource_group: myResourceGroup
|
||||
orchestration_platform: Swarm
|
||||
master_profile:
|
||||
- count: 3
|
||||
dns_prefix: acsswarmmasterdns
|
||||
vm_size: Standard_D2_v2
|
||||
linux_profile:
|
||||
- admin_username: azureuser
|
||||
ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAA...
|
||||
agent_pool_profiles:
|
||||
- name: default
|
||||
count: 5
|
||||
dns_prefix: acsswarmagent
|
||||
vm_size: Standard_D2_v2
|
||||
diagnostics_profile: false
|
||||
tags:
|
||||
Environment: Production
|
||||
|
||||
# Deletes the specified container service in the specified subscription and resource group.
|
||||
# The operation does not delete other resources created as part of creating a container service,
|
||||
# including storage accounts, VMs, and availability sets. All the other resources created with the container
|
||||
# service are part of the same resource group and can be deleted individually.
|
||||
- name: Remove an azure container services instance
|
||||
azure_rm_acs:
|
||||
name: acctestcontservice3
|
||||
location: eastus
|
||||
resource_group: myResourceGroup
|
||||
state: absent
|
||||
orchestration_platform: Swarm
|
||||
master_profile:
|
||||
- count: 1
|
||||
vm_size: Standard_A0
|
||||
dns_prefix: acstestingmasterdns5
|
||||
linux_profile:
|
||||
- admin_username: azureuser
|
||||
ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAA...
|
||||
agent_pool_profiles:
|
||||
- name: default
|
||||
count: 4
|
||||
dns_prefix: acctestagent15
|
||||
vm_size: Standard_A0
|
||||
diagnostics_profile: false
|
||||
tags:
|
||||
Ansible: azure_rm_acs
|
||||
'''
|
||||
RETURN = '''
|
||||
state:
|
||||
description: Current state of the Azure Container Service(ACS).
|
||||
returned: always
|
||||
type: dict
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.mgmt.containerservice.models import (
|
||||
ContainerService, ContainerServiceOrchestratorProfile, ContainerServiceCustomProfile,
|
||||
ContainerServiceServicePrincipalProfile, ContainerServiceMasterProfile,
|
||||
ContainerServiceAgentPoolProfile, ContainerServiceWindowsProfile,
|
||||
ContainerServiceLinuxProfile, ContainerServiceSshConfiguration,
|
||||
ContainerServiceDiagnosticsProfile, ContainerServiceSshPublicKey,
|
||||
ContainerServiceVMDiagnostics
|
||||
)
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
def create_agent_pool_profile_instance(agentpoolprofile):
|
||||
'''
|
||||
Helper method to serialize a dict to a ContainerServiceAgentPoolProfile
|
||||
:param: agentpoolprofile: dict with the parameters to setup the ContainerServiceAgentPoolProfile
|
||||
:return: ContainerServiceAgentPoolProfile
|
||||
'''
|
||||
return ContainerServiceAgentPoolProfile(
|
||||
name=agentpoolprofile['name'],
|
||||
count=agentpoolprofile['count'],
|
||||
dns_prefix=agentpoolprofile['dns_prefix'],
|
||||
vm_size=agentpoolprofile['vm_size']
|
||||
)
|
||||
|
||||
|
||||
def create_orch_platform_instance(orchestrator):
|
||||
'''
|
||||
Helper method to serialize a dict to a ContainerServiceOrchestratorProfile
|
||||
:param: orchestrator: dict with the parameters to setup the ContainerServiceOrchestratorProfile
|
||||
:return: ContainerServiceOrchestratorProfile
|
||||
'''
|
||||
return ContainerServiceOrchestratorProfile(
|
||||
orchestrator_type=orchestrator,
|
||||
)
|
||||
|
||||
|
||||
def create_service_principal_profile_instance(spnprofile):
|
||||
'''
|
||||
Helper method to serialize a dict to a ContainerServiceServicePrincipalProfile
|
||||
:param: spnprofile: dict with the parameters to setup the ContainerServiceServicePrincipalProfile
|
||||
:return: ContainerServiceServicePrincipalProfile
|
||||
'''
|
||||
return ContainerServiceServicePrincipalProfile(
|
||||
client_id=spnprofile[0]['client_id'],
|
||||
secret=spnprofile[0]['client_secret']
|
||||
)
|
||||
|
||||
|
||||
def create_linux_profile_instance(linuxprofile):
|
||||
'''
|
||||
Helper method to serialize a dict to a ContainerServiceLinuxProfile
|
||||
:param: linuxprofile: dict with the parameters to setup the ContainerServiceLinuxProfile
|
||||
:return: ContainerServiceLinuxProfile
|
||||
'''
|
||||
return ContainerServiceLinuxProfile(
|
||||
admin_username=linuxprofile[0]['admin_username'],
|
||||
ssh=create_ssh_configuration_instance(linuxprofile[0]['ssh_key'])
|
||||
)
|
||||
|
||||
|
||||
def create_ssh_configuration_instance(sshconf):
|
||||
'''
|
||||
Helper method to serialize a dict to a ContainerServiceSshConfiguration
|
||||
:param: sshconf: dict with the parameters to setup the ContainerServiceSshConfiguration
|
||||
:return: ContainerServiceSshConfiguration
|
||||
'''
|
||||
listssh = []
|
||||
key = ContainerServiceSshPublicKey(key_data=str(sshconf))
|
||||
listssh.append(key)
|
||||
return ContainerServiceSshConfiguration(
|
||||
public_keys=listssh
|
||||
)
|
||||
|
||||
|
||||
def create_master_profile_instance(masterprofile):
|
||||
'''
|
||||
Helper method to serialize a dict to a ContainerServiceMasterProfile
|
||||
Note: first_consecutive_static_ip is specifically set to None, for Azure server doesn't accept
|
||||
request body with this property. This should be an inconsistency bug before Azure client SDK
|
||||
and Azure server.
|
||||
:param: masterprofile: dict with the parameters to setup the ContainerServiceMasterProfile
|
||||
:return: ContainerServiceMasterProfile
|
||||
'''
|
||||
return ContainerServiceMasterProfile(
|
||||
count=masterprofile[0]['count'],
|
||||
dns_prefix=masterprofile[0]['dns_prefix'],
|
||||
vm_size=masterprofile[0]['vm_size'],
|
||||
first_consecutive_static_ip=None
|
||||
)
|
||||
|
||||
|
||||
def create_diagnostics_profile_instance(diagprofile):
|
||||
'''
|
||||
Helper method to serialize a dict to a ContainerServiceDiagnosticsProfile
|
||||
:param: diagprofile: dict with the parameters to setup the ContainerServiceDiagnosticsProfile
|
||||
:return: ContainerServiceDiagnosticsProfile
|
||||
'''
|
||||
return ContainerServiceDiagnosticsProfile(
|
||||
vm_diagnostics=create_vm_diagnostics_instance(diagprofile)
|
||||
)
|
||||
|
||||
|
||||
def create_vm_diagnostics_instance(vmdiag):
|
||||
'''
|
||||
Helper method to serialize a dict to a ContainerServiceVMDiagnostics
|
||||
:param: vmdiag: dict with the parameters to setup the ContainerServiceVMDiagnostics
|
||||
:return: ContainerServiceVMDiagnostics
|
||||
'''
|
||||
return ContainerServiceVMDiagnostics(
|
||||
enabled=vmdiag
|
||||
)
|
||||
|
||||
|
||||
def create_acs_dict(acs):
|
||||
'''
|
||||
Helper method to deserialize a ContainerService to a dict
|
||||
:param: acs: ContainerService or AzureOperationPoller with the Azure callback object
|
||||
:return: dict with the state on Azure
|
||||
'''
|
||||
service_principal_profile_dict = None
|
||||
if acs.orchestrator_profile.orchestrator_type == 'Kubernetes':
|
||||
service_principal_profile_dict = create_service_principal_profile_dict(acs.service_principal_profile)
|
||||
|
||||
return dict(
|
||||
id=acs.id,
|
||||
name=acs.name,
|
||||
location=acs.location,
|
||||
tags=acs.tags,
|
||||
orchestrator_profile=create_orchestrator_profile_dict(acs.orchestrator_profile),
|
||||
master_profile=create_master_profile_dict(acs.master_profile),
|
||||
linux_profile=create_linux_profile_dict(acs.linux_profile),
|
||||
service_principal_profile=service_principal_profile_dict,
|
||||
diagnostics_profile=create_diagnotstics_profile_dict(acs.diagnostics_profile),
|
||||
provisioning_state=acs.provisioning_state,
|
||||
agent_pool_profiles=create_agent_pool_profiles_dict(acs.agent_pool_profiles),
|
||||
type=acs.type
|
||||
)
|
||||
|
||||
|
||||
def create_linux_profile_dict(linuxprofile):
|
||||
'''
|
||||
Helper method to deserialize a ContainerServiceLinuxProfile to a dict
|
||||
:param: linuxprofile: ContainerServiceLinuxProfile with the Azure callback object
|
||||
:return: dict with the state on Azure
|
||||
'''
|
||||
return dict(
|
||||
ssh_key=linuxprofile.ssh.public_keys[0].key_data,
|
||||
admin_username=linuxprofile.admin_username
|
||||
)
|
||||
|
||||
|
||||
def create_master_profile_dict(masterprofile):
|
||||
'''
|
||||
Helper method to deserialize a ContainerServiceMasterProfile to a dict
|
||||
:param: masterprofile: ContainerServiceMasterProfile with the Azure callback object
|
||||
:return: dict with the state on Azure
|
||||
'''
|
||||
return dict(
|
||||
count=masterprofile.count,
|
||||
fqdn=masterprofile.fqdn,
|
||||
vm_size=masterprofile.vm_size,
|
||||
dns_prefix=masterprofile.dns_prefix
|
||||
)
|
||||
|
||||
|
||||
def create_service_principal_profile_dict(serviceprincipalprofile):
|
||||
'''
|
||||
Helper method to deserialize a ContainerServiceServicePrincipalProfile to a dict
|
||||
Note: For security reason, the service principal secret is skipped on purpose.
|
||||
:param: serviceprincipalprofile: ContainerServiceServicePrincipalProfile with the Azure callback object
|
||||
:return: dict with the state on Azure
|
||||
'''
|
||||
return dict(
|
||||
client_id=serviceprincipalprofile.client_id
|
||||
)
|
||||
|
||||
|
||||
def create_diagnotstics_profile_dict(diagnosticsprofile):
|
||||
'''
|
||||
Helper method to deserialize a ContainerServiceVMDiagnostics to a dict
|
||||
:param: diagnosticsprofile: ContainerServiceVMDiagnostics with the Azure callback object
|
||||
:return: dict with the state on Azure
|
||||
'''
|
||||
return dict(
|
||||
vm_diagnostics=diagnosticsprofile.vm_diagnostics.enabled
|
||||
)
|
||||
|
||||
|
||||
def create_orchestrator_profile_dict(orchestratorprofile):
|
||||
'''
|
||||
Helper method to deserialize a ContainerServiceOrchestratorProfile to a dict
|
||||
:param: orchestratorprofile: ContainerServiceOrchestratorProfile with the Azure callback object
|
||||
:return: dict with the state on Azure
|
||||
'''
|
||||
return dict(
|
||||
orchestrator_type=str(orchestratorprofile.orchestrator_type)
|
||||
)
|
||||
|
||||
|
||||
def create_agent_pool_profiles_dict(agentpoolprofiles):
|
||||
'''
|
||||
Helper method to deserialize a ContainerServiceAgentPoolProfile to a dict
|
||||
:param: agentpoolprofiles: ContainerServiceAgentPoolProfile with the Azure callback object
|
||||
:return: dict with the state on Azure
|
||||
'''
|
||||
return [dict(
|
||||
count=profile.count,
|
||||
vm_size=profile.vm_size,
|
||||
name=profile.name,
|
||||
dns_prefix=profile.dns_prefix,
|
||||
fqdn=profile.fqdn
|
||||
) for profile in agentpoolprofiles]
|
||||
|
||||
|
||||
class AzureRMContainerService(AzureRMModuleBase):
|
||||
"""Configuration class for an Azure RM container service resource"""
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
),
|
||||
location=dict(
|
||||
type='str'
|
||||
),
|
||||
orchestration_platform=dict(
|
||||
type='str',
|
||||
required=True,
|
||||
choices=['DCOS', 'Kubernetes', 'Swarm']
|
||||
),
|
||||
master_profile=dict(
|
||||
type='list',
|
||||
required=True
|
||||
),
|
||||
linux_profile=dict(
|
||||
type='list',
|
||||
required=True
|
||||
),
|
||||
agent_pool_profiles=dict(
|
||||
type='list',
|
||||
required=True
|
||||
),
|
||||
service_principal=dict(
|
||||
type='list'
|
||||
),
|
||||
diagnostics_profile=dict(
|
||||
type='bool',
|
||||
required=True
|
||||
)
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.location = None
|
||||
self.tags = None
|
||||
self.state = None
|
||||
self.orchestration_platform = None
|
||||
self.master_profile = None
|
||||
self.linux_profile = None
|
||||
self.agent_pool_profiles = None
|
||||
self.service_principal = None
|
||||
self.diagnostics_profile = None
|
||||
|
||||
self.results = dict(changed=False, state=dict())
|
||||
|
||||
super(AzureRMContainerService, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=True)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
"""Main module execution method"""
|
||||
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
resource_group = None
|
||||
response = None
|
||||
results = dict()
|
||||
to_be_updated = False
|
||||
|
||||
resource_group = self.get_resource_group(self.resource_group)
|
||||
if not self.location:
|
||||
self.location = resource_group.location
|
||||
|
||||
# Check if the ACS instance already present in the RG
|
||||
if self.state == 'present':
|
||||
|
||||
if self.orchestration_platform == 'Kubernetes':
|
||||
if not self.service_principal:
|
||||
self.fail('service_principal should be specified when using Kubernetes')
|
||||
if not self.service_principal[0].get('client_id'):
|
||||
self.fail('service_principal.client_id should be specified when using Kubernetes')
|
||||
if not self.service_principal[0].get('client_secret'):
|
||||
self.fail('service_principal.client_secret should be specified when using Kubernetes')
|
||||
|
||||
mastercount = self.master_profile[0].get('count')
|
||||
if mastercount != 1 and mastercount != 3 and mastercount != 5:
|
||||
self.fail('Master Count number wrong : {0} / should be 1 3 or 5'.format(mastercount))
|
||||
|
||||
# For now Agent Pool cannot be more than 1, just remove this part in the future if it change
|
||||
agentpoolcount = len(self.agent_pool_profiles)
|
||||
if agentpoolcount > 1:
|
||||
self.fail('You cannot specify more than agent_pool_profiles')
|
||||
|
||||
response = self.get_acs()
|
||||
self.results['state'] = response
|
||||
if not response:
|
||||
to_be_updated = True
|
||||
|
||||
else:
|
||||
self.log('Results : {0}'.format(response))
|
||||
update_tags, response['tags'] = self.update_tags(response['tags'])
|
||||
|
||||
if response['provisioning_state'] == "Succeeded":
|
||||
if update_tags:
|
||||
to_be_updated = True
|
||||
|
||||
def is_property_changed(profile, property, ignore_case=False):
|
||||
base = response[profile].get(property)
|
||||
new = getattr(self, profile)[0].get(property)
|
||||
if ignore_case:
|
||||
return base.lower() != new.lower()
|
||||
else:
|
||||
return base != new
|
||||
|
||||
# Cannot Update the master count for now // Uncomment this block in the future to support it
|
||||
if is_property_changed('master_profile', 'count'):
|
||||
# self.log(("Master Profile Count Diff, Was {0} / Now {1}"
|
||||
# .format(response['master_profile'].count,
|
||||
# self.master_profile[0].get('count'))))
|
||||
# to_be_updated = True
|
||||
self.module.warn("master_profile.count cannot be updated")
|
||||
|
||||
# Cannot Update the master vm_size for now. Could be a client SDK bug
|
||||
# Uncomment this block in the future to support it
|
||||
if is_property_changed('master_profile', 'vm_size', True):
|
||||
# self.log(("Master Profile VM Size Diff, Was {0} / Now {1}"
|
||||
# .format(response['master_profile'].get('vm_size'),
|
||||
# self.master_profile[0].get('vm_size'))))
|
||||
# to_be_updated = True
|
||||
self.module.warn("master_profile.vm_size cannot be updated")
|
||||
|
||||
# Cannot Update the SSH Key for now // Uncomment this block in the future to support it
|
||||
if is_property_changed('linux_profile', 'ssh_key'):
|
||||
# self.log(("Linux Profile Diff SSH, Was {0} / Now {1}"
|
||||
# .format(response['linux_profile'].ssh.public_keys[0].key_data,
|
||||
# self.linux_profile[0].get('ssh_key'))))
|
||||
# to_be_updated = True
|
||||
self.module.warn("linux_profile.ssh_key cannot be updated")
|
||||
|
||||
# self.log("linux_profile response : {0}".format(response['linux_profile'].get('admin_username')))
|
||||
# self.log("linux_profile self : {0}".format(self.linux_profile[0].get('admin_username')))
|
||||
# Cannot Update the Username for now // Uncomment this block in the future to support it
|
||||
if is_property_changed('linux_profile', 'admin_username'):
|
||||
# self.log(("Linux Profile Diff User, Was {0} / Now {1}"
|
||||
# .format(response['linux_profile'].admin_username,
|
||||
# self.linux_profile[0].get('admin_username'))))
|
||||
# to_be_updated = True
|
||||
self.module.warn("linux_profile.admin_username cannot be updated")
|
||||
|
||||
# Cannot have more that one agent pool profile for now // Uncomment this block in the future to support it
|
||||
# if len(response['agent_pool_profiles']) != len(self.agent_pool_profiles):
|
||||
# self.log("Agent Pool count is diff, need to updated")
|
||||
# to_be_updated = True
|
||||
|
||||
for profile_result in response['agent_pool_profiles']:
|
||||
matched = False
|
||||
for profile_self in self.agent_pool_profiles:
|
||||
if profile_result['name'] == profile_self['name']:
|
||||
matched = True
|
||||
if profile_result['count'] != profile_self['count'] or profile_result['vm_size'] != \
|
||||
profile_self['vm_size']:
|
||||
self.log(("Agent Profile Diff - Count was {0} / Now {1} - Vm_size was {2} / Now {3}"
|
||||
.format(profile_result['count'], profile_self['count'],
|
||||
profile_result['vm_size'], profile_self['vm_size'])))
|
||||
to_be_updated = True
|
||||
if not matched:
|
||||
self.log("Agent Pool not found")
|
||||
to_be_updated = True
|
||||
|
||||
if to_be_updated:
|
||||
self.log("Need to Create / Update the ACS instance")
|
||||
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
self.results['state'] = self.create_update_acs()
|
||||
self.results['changed'] = True
|
||||
|
||||
self.log("Creation / Update done")
|
||||
elif self.state == 'absent':
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
self.delete_acs()
|
||||
self.log("ACS instance deleted")
|
||||
|
||||
return self.results
|
||||
|
||||
def create_update_acs(self):
|
||||
'''
|
||||
Creates or updates a container service with the specified configuration of orchestrator, masters, and agents.
|
||||
|
||||
:return: deserialized ACS instance state dictionary
|
||||
'''
|
||||
self.log("Creating / Updating the ACS instance {0}".format(self.name))
|
||||
|
||||
service_principal_profile = None
|
||||
agentpools = []
|
||||
|
||||
if self.agent_pool_profiles:
|
||||
for profile in self.agent_pool_profiles:
|
||||
self.log("Trying to push the following Profile {0}".format(profile))
|
||||
agentpools.append(create_agent_pool_profile_instance(profile))
|
||||
|
||||
if self.orchestration_platform == 'Kubernetes':
|
||||
service_principal_profile = create_service_principal_profile_instance(self.service_principal)
|
||||
|
||||
parameters = ContainerService(
|
||||
location=self.location,
|
||||
tags=self.tags,
|
||||
orchestrator_profile=create_orch_platform_instance(self.orchestration_platform),
|
||||
service_principal_profile=service_principal_profile,
|
||||
linux_profile=create_linux_profile_instance(self.linux_profile),
|
||||
master_profile=create_master_profile_instance(self.master_profile),
|
||||
agent_pool_profiles=agentpools,
|
||||
diagnostics_profile=create_diagnostics_profile_instance(self.diagnostics_profile)
|
||||
)
|
||||
|
||||
# self.log("orchestrator_profile : {0}".format(parameters.orchestrator_profile))
|
||||
# self.log("service_principal_profile : {0}".format(parameters.service_principal_profile))
|
||||
# self.log("linux_profile : {0}".format(parameters.linux_profile))
|
||||
# self.log("ssh from yaml : {0}".format(results.get('linux_profile')[0]))
|
||||
# self.log("ssh : {0}".format(parameters.linux_profile.ssh))
|
||||
# self.log("master_profile : {0}".format(parameters.master_profile))
|
||||
# self.log("agent_pool_profiles : {0}".format(parameters.agent_pool_profiles))
|
||||
# self.log("vm_diagnostics : {0}".format(parameters.diagnostics_profile.vm_diagnostics))
|
||||
|
||||
try:
|
||||
poller = self.containerservice_client.container_services.create_or_update(self.resource_group, self.name,
|
||||
parameters)
|
||||
response = self.get_poller_result(poller)
|
||||
except CloudError as exc:
|
||||
self.log('Error attempting to create the ACS instance.')
|
||||
self.fail("Error creating the ACS instance: {0}".format(str(exc)))
|
||||
return create_acs_dict(response)
|
||||
|
||||
def delete_acs(self):
|
||||
'''
|
||||
Deletes the specified container service in the specified subscription and resource group.
|
||||
The operation does not delete other resources created as part of creating a container service,
|
||||
including storage accounts, VMs, and availability sets.
|
||||
All the other resources created with the container service are part of the same resource group and can be deleted individually.
|
||||
|
||||
:return: True
|
||||
'''
|
||||
self.log("Deleting the ACS instance {0}".format(self.name))
|
||||
try:
|
||||
poller = self.containerservice_client.container_services.delete(self.resource_group, self.name)
|
||||
self.get_poller_result(poller)
|
||||
except CloudError as e:
|
||||
self.log('Error attempting to delete the ACS instance.')
|
||||
self.fail("Error deleting the ACS instance: {0}".format(str(e)))
|
||||
|
||||
return True
|
||||
|
||||
def get_acs(self):
|
||||
'''
|
||||
Gets the properties of the specified container service.
|
||||
|
||||
:return: deserialized ACS instance state dictionary
|
||||
'''
|
||||
self.log("Checking if the ACS instance {0} is present".format(self.name))
|
||||
found = False
|
||||
try:
|
||||
response = self.containerservice_client.container_services.get(self.resource_group, self.name)
|
||||
found = True
|
||||
self.log("Response : {0}".format(response))
|
||||
self.log("ACS instance : {0} found".format(response.name))
|
||||
except CloudError as e:
|
||||
self.log('Did not find the ACS instance.')
|
||||
if found is True:
|
||||
return create_acs_dict(response)
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution"""
|
||||
AzureRMContainerService()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,841 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2018 Sertac Ozercan, <seozerca@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_aks
|
||||
version_added: "2.6"
|
||||
short_description: Manage a managed Azure Container Service (AKS) instance
|
||||
description:
|
||||
- Create, update and delete a managed Azure Container Service (AKS) instance.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- Name of a resource group where the managed Azure Container Services (AKS) exists or will be created.
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
- Name of the managed Azure Container Services (AKS) instance.
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the AKS. Use C(present) to create or update an AKS and C(absent) to delete it.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
location:
|
||||
description:
|
||||
- Valid azure location. Defaults to location of the resource group.
|
||||
dns_prefix:
|
||||
description:
|
||||
- DNS prefix specified when creating the managed cluster.
|
||||
kubernetes_version:
|
||||
description:
|
||||
- Version of Kubernetes specified when creating the managed cluster.
|
||||
linux_profile:
|
||||
description:
|
||||
- The Linux profile suboptions.
|
||||
suboptions:
|
||||
admin_username:
|
||||
description:
|
||||
- The Admin Username for the cluster.
|
||||
required: true
|
||||
ssh_key:
|
||||
description:
|
||||
- The Public SSH Key used to access the cluster.
|
||||
required: true
|
||||
agent_pool_profiles:
|
||||
description:
|
||||
- The agent pool profile suboptions.
|
||||
suboptions:
|
||||
name:
|
||||
description:
|
||||
- Unique name of the agent pool profile in the context of the subscription and resource group.
|
||||
required: true
|
||||
count:
|
||||
description:
|
||||
- Number of agents (VMs) to host docker containers.
|
||||
- Allowed values must be in the range of C(1) to C(100) (inclusive).
|
||||
required: true
|
||||
vm_size:
|
||||
description:
|
||||
- The VM Size of each of the Agent Pool VM's (e.g. C(Standard_F1) / C(Standard_D2v2)).
|
||||
required: true
|
||||
vnet_subnet_id:
|
||||
description:
|
||||
- Id of subnet for Agent Pool VM's Network Interfaces
|
||||
os_disk_size_gb:
|
||||
description:
|
||||
- Size of the OS disk.
|
||||
service_principal:
|
||||
description:
|
||||
- The service principal suboptions.
|
||||
suboptions:
|
||||
client_id:
|
||||
description:
|
||||
- The ID for the Service Principal.
|
||||
required: true
|
||||
client_secret:
|
||||
description:
|
||||
- The secret password associated with the service principal.
|
||||
required: true
|
||||
enable_rbac:
|
||||
description:
|
||||
- Enable RBAC.
|
||||
- Existing non-RBAC enabled AKS clusters cannot currently be updated for RBAC use.
|
||||
type: bool
|
||||
default: no
|
||||
version_added: "2.8"
|
||||
network_profile:
|
||||
description:
|
||||
- Profile of network configuration.
|
||||
suboptions:
|
||||
network_plugin:
|
||||
description:
|
||||
- Network plugin used for building Kubernetes network.
|
||||
- This property cannot been changed.
|
||||
- With C(kubenet), nodes get an IP address from the Azure virtual network subnet.
|
||||
- AKS features such as Virtual Nodes or network policies aren't supported with C(kubenet).
|
||||
- C(azure) enables Azure Container Networking Interface(CNI), every pod gets an IP address from the subnet and can be accessed directly.
|
||||
default: kubenet
|
||||
choices:
|
||||
- azure
|
||||
- kubenet
|
||||
network_policy:
|
||||
description: Network policy used for building Kubernetes network.
|
||||
choices:
|
||||
- azure
|
||||
- calico
|
||||
pod_cidr:
|
||||
description:
|
||||
- A CIDR notation IP range from which to assign pod IPs when I(network_plugin=kubenet) is used.
|
||||
- It should be a large address space that isn't in use elsewhere in your network environment.
|
||||
- This address range must be large enough to accommodate the number of nodes that you expect to scale up to.
|
||||
default: "10.244.0.0/16"
|
||||
service_cidr:
|
||||
description:
|
||||
- A CIDR notation IP range from which to assign service cluster IPs.
|
||||
- It must not overlap with any Subnet IP ranges.
|
||||
- It should be the *.10 address of your service IP address range.
|
||||
default: "10.0.0.0/16"
|
||||
dns_service_ip:
|
||||
description:
|
||||
- An IP address assigned to the Kubernetes DNS service.
|
||||
- It must be within the Kubernetes service address range specified in serviceCidr.
|
||||
default: "10.0.0.10"
|
||||
docker_bridge_cidr:
|
||||
description:
|
||||
- A CIDR notation IP range assigned to the Docker bridge network.
|
||||
- It must not overlap with any Subnet IP ranges or the Kubernetes service address range.
|
||||
default: "172.17.0.1/16"
|
||||
version_added: "2.8"
|
||||
aad_profile:
|
||||
description:
|
||||
- Profile of Azure Active Directory configuration.
|
||||
suboptions:
|
||||
client_app_id:
|
||||
description: The client AAD application ID.
|
||||
server_app_id:
|
||||
description: The server AAD application ID.
|
||||
server_app_secret:
|
||||
description: The server AAD application secret.
|
||||
tenant_id:
|
||||
description:
|
||||
- The AAD tenant ID to use for authentication.
|
||||
- If not specified, will use the tenant of the deployment subscription.
|
||||
version_added: "2.8"
|
||||
addon:
|
||||
description:
|
||||
- Profile of managed cluster add-on.
|
||||
- Key can be C(http_application_routing), C(monitoring), C(virtual_node).
|
||||
- Value must be a dict contains a bool variable C(enabled).
|
||||
type: dict
|
||||
suboptions:
|
||||
http_application_routing:
|
||||
description:
|
||||
- The HTTP application routing solution makes it easy to access applications that are deployed to your cluster.
|
||||
type: dict
|
||||
suboptions:
|
||||
enabled:
|
||||
description:
|
||||
- Whether the solution enabled.
|
||||
type: bool
|
||||
monitoring:
|
||||
description:
|
||||
- It gives you performance visibility by collecting memory and processor metrics from controllers, nodes,
|
||||
and containers that are available in Kubernetes through the Metrics API.
|
||||
type: dict
|
||||
suboptions:
|
||||
enabled:
|
||||
description:
|
||||
- Whether the solution enabled.
|
||||
type: bool
|
||||
log_analytics_workspace_resource_id:
|
||||
description:
|
||||
- Where to store the container metrics.
|
||||
virtual_node:
|
||||
description:
|
||||
- With virtual nodes, you have quick provisioning of pods, and only pay per second for their execution time.
|
||||
- You don't need to wait for Kubernetes cluster autoscaler to deploy VM compute nodes to run the additional pods.
|
||||
type: dict
|
||||
suboptions:
|
||||
enabled:
|
||||
description:
|
||||
- Whether the solution enabled.
|
||||
type: bool
|
||||
subnet_resource_id:
|
||||
description:
|
||||
- Subnet associated to the cluster.
|
||||
version_added: "2.8"
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Sertac Ozercan (@sozercan)
|
||||
- Yuwei Zhou (@yuwzho)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a managed Azure Container Services (AKS) instance
|
||||
azure_rm_aks:
|
||||
name: myAKS
|
||||
location: eastus
|
||||
resource_group: myResourceGroup
|
||||
dns_prefix: akstest
|
||||
kubernetes_version: 1.14.6
|
||||
linux_profile:
|
||||
admin_username: azureuser
|
||||
ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAA...
|
||||
service_principal:
|
||||
client_id: "cf72ca99-f6b9-4004-b0e0-bee10c521948"
|
||||
client_secret: "mySPNp@ssw0rd!"
|
||||
agent_pool_profiles:
|
||||
- name: default
|
||||
count: 5
|
||||
vm_size: Standard_D2_v2
|
||||
tags:
|
||||
Environment: Production
|
||||
|
||||
- name: Remove a managed Azure Container Services (AKS) instance
|
||||
azure_rm_aks:
|
||||
name: myAKS
|
||||
resource_group: myResourceGroup
|
||||
state: absent
|
||||
'''
|
||||
RETURN = '''
|
||||
state:
|
||||
description: Current state of the Azure Container Service (AKS).
|
||||
returned: always
|
||||
type: dict
|
||||
example:
|
||||
agent_pool_profiles:
|
||||
- count: 1
|
||||
dns_prefix: Null
|
||||
name: default
|
||||
os_disk_size_gb: Null
|
||||
os_type: Linux
|
||||
ports: Null
|
||||
storage_profile: ManagedDisks
|
||||
vm_size: Standard_DS1_v2
|
||||
vnet_subnet_id: Null
|
||||
changed: false
|
||||
dns_prefix: aks9860bdcd89
|
||||
id: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/Microsoft.ContainerService/managedClusters/aks9860bdc"
|
||||
kube_config: "......"
|
||||
kubernetes_version: 1.14.6
|
||||
linux_profile:
|
||||
admin_username: azureuser
|
||||
ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADA.....
|
||||
location: eastus
|
||||
name: aks9860bdc
|
||||
provisioning_state: Succeeded
|
||||
service_principal_profile:
|
||||
client_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
tags: {}
|
||||
type: Microsoft.ContainerService/ManagedClusters
|
||||
'''
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
def create_aks_dict(aks):
|
||||
'''
|
||||
Helper method to deserialize a ContainerService to a dict
|
||||
:param: aks: ContainerService or AzureOperationPoller with the Azure callback object
|
||||
:return: dict with the state on Azure
|
||||
'''
|
||||
|
||||
return dict(
|
||||
id=aks.id,
|
||||
name=aks.name,
|
||||
location=aks.location,
|
||||
dns_prefix=aks.dns_prefix,
|
||||
kubernetes_version=aks.kubernetes_version,
|
||||
tags=aks.tags,
|
||||
linux_profile=create_linux_profile_dict(aks.linux_profile),
|
||||
service_principal_profile=create_service_principal_profile_dict(
|
||||
aks.service_principal_profile),
|
||||
provisioning_state=aks.provisioning_state,
|
||||
agent_pool_profiles=create_agent_pool_profiles_dict(
|
||||
aks.agent_pool_profiles),
|
||||
type=aks.type,
|
||||
kube_config=aks.kube_config,
|
||||
enable_rbac=aks.enable_rbac,
|
||||
network_profile=create_network_profiles_dict(aks.network_profile),
|
||||
aad_profile=create_aad_profiles_dict(aks.aad_profile),
|
||||
addon=create_addon_dict(aks.addon_profiles),
|
||||
fqdn=aks.fqdn,
|
||||
node_resource_group=aks.node_resource_group
|
||||
)
|
||||
|
||||
|
||||
def create_network_profiles_dict(network):
|
||||
return dict(
|
||||
network_plugin=network.network_plugin,
|
||||
network_policy=network.network_policy,
|
||||
pod_cidr=network.pod_cidr,
|
||||
service_cidr=network.service_cidr,
|
||||
dns_service_ip=network.dns_service_ip,
|
||||
docker_bridge_cidr=network.docker_bridge_cidr
|
||||
) if network else dict()
|
||||
|
||||
|
||||
def create_aad_profiles_dict(aad):
|
||||
return aad.as_dict() if aad else dict()
|
||||
|
||||
|
||||
def create_addon_dict(addon):
|
||||
result = dict()
|
||||
addon = addon or dict()
|
||||
for key in addon.keys():
|
||||
result[key] = addon[key].config
|
||||
result[key]['enabled'] = addon[key].enabled
|
||||
return result
|
||||
|
||||
|
||||
def create_linux_profile_dict(linuxprofile):
|
||||
'''
|
||||
Helper method to deserialize a ContainerServiceLinuxProfile to a dict
|
||||
:param: linuxprofile: ContainerServiceLinuxProfile with the Azure callback object
|
||||
:return: dict with the state on Azure
|
||||
'''
|
||||
return dict(
|
||||
ssh_key=linuxprofile.ssh.public_keys[0].key_data,
|
||||
admin_username=linuxprofile.admin_username
|
||||
)
|
||||
|
||||
|
||||
def create_service_principal_profile_dict(serviceprincipalprofile):
|
||||
'''
|
||||
Helper method to deserialize a ContainerServiceServicePrincipalProfile to a dict
|
||||
Note: For security reason, the service principal secret is skipped on purpose.
|
||||
:param: serviceprincipalprofile: ContainerServiceServicePrincipalProfile with the Azure callback object
|
||||
:return: dict with the state on Azure
|
||||
'''
|
||||
return dict(
|
||||
client_id=serviceprincipalprofile.client_id
|
||||
)
|
||||
|
||||
|
||||
def create_agent_pool_profiles_dict(agentpoolprofiles):
|
||||
'''
|
||||
Helper method to deserialize a ContainerServiceAgentPoolProfile to a dict
|
||||
:param: agentpoolprofiles: ContainerServiceAgentPoolProfile with the Azure callback object
|
||||
:return: dict with the state on Azure
|
||||
'''
|
||||
return [dict(
|
||||
count=profile.count,
|
||||
vm_size=profile.vm_size,
|
||||
name=profile.name,
|
||||
os_disk_size_gb=profile.os_disk_size_gb,
|
||||
storage_profile=profile.storage_profile,
|
||||
vnet_subnet_id=profile.vnet_subnet_id,
|
||||
os_type=profile.os_type
|
||||
) for profile in agentpoolprofiles] if agentpoolprofiles else None
|
||||
|
||||
|
||||
def create_addon_profiles_spec():
|
||||
'''
|
||||
Helper method to parse the ADDONS dictionary and generate the addon spec
|
||||
'''
|
||||
spec = dict()
|
||||
for key in ADDONS.keys():
|
||||
values = ADDONS[key]
|
||||
addon_spec = dict(
|
||||
enabled=dict(type='bool', default=True)
|
||||
)
|
||||
configs = values.get('config') or {}
|
||||
for item in configs.keys():
|
||||
addon_spec[item] = dict(type='str', aliases=[configs[item]], required=True)
|
||||
spec[key] = dict(type='dict', options=addon_spec, aliases=[values['name']])
|
||||
return spec
|
||||
|
||||
|
||||
ADDONS = {
|
||||
'http_application_routing': dict(name='httpApplicationRouting'),
|
||||
'monitoring': dict(name='omsagent', config={'log_analytics_workspace_resource_id': 'logAnalyticsWorkspaceResourceID'}),
|
||||
'virtual_node': dict(name='aciConnector', config={'subnet_resource_id': 'SubnetName'})
|
||||
}
|
||||
|
||||
|
||||
linux_profile_spec = dict(
|
||||
admin_username=dict(type='str', required=True),
|
||||
ssh_key=dict(type='str', required=True)
|
||||
)
|
||||
|
||||
|
||||
service_principal_spec = dict(
|
||||
client_id=dict(type='str', required=True),
|
||||
client_secret=dict(type='str', no_log=True)
|
||||
)
|
||||
|
||||
|
||||
agent_pool_profile_spec = dict(
|
||||
name=dict(type='str', required=True),
|
||||
count=dict(type='int', required=True),
|
||||
vm_size=dict(type='str', required=True),
|
||||
os_disk_size_gb=dict(type='int'),
|
||||
dns_prefix=dict(type='str'),
|
||||
ports=dict(type='list', elements='int'),
|
||||
storage_profiles=dict(type='str', choices=[
|
||||
'StorageAccount', 'ManagedDisks']),
|
||||
vnet_subnet_id=dict(type='str'),
|
||||
os_type=dict(type='str', choices=['Linux', 'Windows'])
|
||||
)
|
||||
|
||||
|
||||
network_profile_spec = dict(
|
||||
network_plugin=dict(type='str', choices=['azure', 'kubenet']),
|
||||
network_policy=dict(type='str'),
|
||||
pod_cidr=dict(type='str'),
|
||||
service_cidr=dict(type='str'),
|
||||
dns_service_ip=dict(type='str'),
|
||||
docker_bridge_cidr=dict(type='str')
|
||||
)
|
||||
|
||||
|
||||
aad_profile_spec = dict(
|
||||
client_app_id=dict(type='str'),
|
||||
server_app_id=dict(type='str'),
|
||||
server_app_secret=dict(type='str', no_log=True),
|
||||
tenant_id=dict(type='str')
|
||||
)
|
||||
|
||||
|
||||
class AzureRMManagedCluster(AzureRMModuleBase):
|
||||
"""Configuration class for an Azure RM container service (AKS) resource"""
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
),
|
||||
location=dict(
|
||||
type='str'
|
||||
),
|
||||
dns_prefix=dict(
|
||||
type='str'
|
||||
),
|
||||
kubernetes_version=dict(
|
||||
type='str'
|
||||
),
|
||||
linux_profile=dict(
|
||||
type='dict',
|
||||
options=linux_profile_spec
|
||||
),
|
||||
agent_pool_profiles=dict(
|
||||
type='list',
|
||||
elements='dict',
|
||||
options=agent_pool_profile_spec
|
||||
),
|
||||
service_principal=dict(
|
||||
type='dict',
|
||||
options=service_principal_spec
|
||||
),
|
||||
enable_rbac=dict(
|
||||
type='bool',
|
||||
default=False
|
||||
),
|
||||
network_profile=dict(
|
||||
type='dict',
|
||||
options=network_profile_spec
|
||||
),
|
||||
aad_profile=dict(
|
||||
type='dict',
|
||||
options=aad_profile_spec
|
||||
),
|
||||
addon=dict(
|
||||
type='dict',
|
||||
options=create_addon_profiles_spec()
|
||||
)
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.location = None
|
||||
self.dns_prefix = None
|
||||
self.kubernetes_version = None
|
||||
self.tags = None
|
||||
self.state = None
|
||||
self.linux_profile = None
|
||||
self.agent_pool_profiles = None
|
||||
self.service_principal = None
|
||||
self.enable_rbac = False
|
||||
self.network_profile = None
|
||||
self.aad_profile = None
|
||||
self.addon = None
|
||||
|
||||
required_if = [
|
||||
('state', 'present', [
|
||||
'dns_prefix', 'linux_profile', 'agent_pool_profiles', 'service_principal'])
|
||||
]
|
||||
|
||||
self.results = dict(changed=False)
|
||||
|
||||
super(AzureRMManagedCluster, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=True,
|
||||
required_if=required_if)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
"""Main module execution method"""
|
||||
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
resource_group = None
|
||||
to_be_updated = False
|
||||
update_tags = False
|
||||
|
||||
resource_group = self.get_resource_group(self.resource_group)
|
||||
if not self.location:
|
||||
self.location = resource_group.location
|
||||
|
||||
response = self.get_aks()
|
||||
|
||||
# Check if the AKS instance already present in the RG
|
||||
if self.state == 'present':
|
||||
# For now Agent Pool cannot be more than 1, just remove this part in the future if it change
|
||||
agentpoolcount = len(self.agent_pool_profiles)
|
||||
if agentpoolcount > 1:
|
||||
self.fail('You cannot specify more than one agent_pool_profiles currently')
|
||||
|
||||
available_versions = self.get_all_versions()
|
||||
if not response:
|
||||
to_be_updated = True
|
||||
if self.kubernetes_version not in available_versions.keys():
|
||||
self.fail("Unsupported kubernetes version. Expected one of {0} but got {1}".format(available_versions.keys(), self.kubernetes_version))
|
||||
else:
|
||||
self.results = response
|
||||
self.results['changed'] = False
|
||||
self.log('Results : {0}'.format(response))
|
||||
update_tags, response['tags'] = self.update_tags(response['tags'])
|
||||
|
||||
if response['provisioning_state'] == "Succeeded":
|
||||
|
||||
def is_property_changed(profile, property, ignore_case=False):
|
||||
base = response[profile].get(property)
|
||||
new = getattr(self, profile).get(property)
|
||||
if ignore_case:
|
||||
return base.lower() != new.lower()
|
||||
else:
|
||||
return base != new
|
||||
|
||||
# Cannot Update the SSH Key for now // Let service to handle it
|
||||
if is_property_changed('linux_profile', 'ssh_key'):
|
||||
self.log(("Linux Profile Diff SSH, Was {0} / Now {1}"
|
||||
.format(response['linux_profile']['ssh_key'], self.linux_profile.get('ssh_key'))))
|
||||
to_be_updated = True
|
||||
# self.module.warn("linux_profile.ssh_key cannot be updated")
|
||||
|
||||
# self.log("linux_profile response : {0}".format(response['linux_profile'].get('admin_username')))
|
||||
# self.log("linux_profile self : {0}".format(self.linux_profile[0].get('admin_username')))
|
||||
# Cannot Update the Username for now // Let service to handle it
|
||||
if is_property_changed('linux_profile', 'admin_username'):
|
||||
self.log(("Linux Profile Diff User, Was {0} / Now {1}"
|
||||
.format(response['linux_profile']['admin_username'], self.linux_profile.get('admin_username'))))
|
||||
to_be_updated = True
|
||||
# self.module.warn("linux_profile.admin_username cannot be updated")
|
||||
|
||||
# Cannot have more that one agent pool profile for now
|
||||
if len(response['agent_pool_profiles']) != len(self.agent_pool_profiles):
|
||||
self.log("Agent Pool count is diff, need to updated")
|
||||
to_be_updated = True
|
||||
|
||||
if response['kubernetes_version'] != self.kubernetes_version:
|
||||
upgrade_versions = available_versions.get(response['kubernetes_version']) or available_versions.keys()
|
||||
if upgrade_versions and self.kubernetes_version not in upgrade_versions:
|
||||
self.fail('Cannot upgrade kubernetes version to {0}, supported value are {1}'.format(self.kubernetes_version, upgrade_versions))
|
||||
to_be_updated = True
|
||||
|
||||
if response['enable_rbac'] != self.enable_rbac:
|
||||
to_be_updated = True
|
||||
|
||||
if self.network_profile:
|
||||
for key in self.network_profile.keys():
|
||||
original = response['network_profile'].get(key) or ''
|
||||
if self.network_profile[key] and self.network_profile[key].lower() != original.lower():
|
||||
to_be_updated = True
|
||||
|
||||
def compare_addon(origin, patch, config):
|
||||
if not patch:
|
||||
return True
|
||||
if not origin:
|
||||
return False
|
||||
if origin['enabled'] != patch['enabled']:
|
||||
return False
|
||||
config = config or dict()
|
||||
for key in config.keys():
|
||||
if origin.get(config[key]) != patch.get(key):
|
||||
return False
|
||||
return True
|
||||
|
||||
if self.addon:
|
||||
for key in ADDONS.keys():
|
||||
addon_name = ADDONS[key]['name']
|
||||
if not compare_addon(response['addon'].get(addon_name), self.addon.get(key), ADDONS[key].get('config')):
|
||||
to_be_updated = True
|
||||
|
||||
for profile_result in response['agent_pool_profiles']:
|
||||
matched = False
|
||||
for profile_self in self.agent_pool_profiles:
|
||||
if profile_result['name'] == profile_self['name']:
|
||||
matched = True
|
||||
os_disk_size_gb = profile_self.get('os_disk_size_gb') or profile_result['os_disk_size_gb']
|
||||
if profile_result['count'] != profile_self['count'] \
|
||||
or profile_result['vm_size'] != profile_self['vm_size'] \
|
||||
or profile_result['os_disk_size_gb'] != os_disk_size_gb \
|
||||
or profile_result['vnet_subnet_id'] != profile_self.get('vnet_subnet_id', profile_result['vnet_subnet_id']):
|
||||
self.log(("Agent Profile Diff - Origin {0} / Update {1}".format(str(profile_result), str(profile_self))))
|
||||
to_be_updated = True
|
||||
if not matched:
|
||||
self.log("Agent Pool not found")
|
||||
to_be_updated = True
|
||||
|
||||
if to_be_updated:
|
||||
self.log("Need to Create / Update the AKS instance")
|
||||
|
||||
if not self.check_mode:
|
||||
self.results = self.create_update_aks()
|
||||
self.log("Creation / Update done")
|
||||
|
||||
self.results['changed'] = True
|
||||
elif update_tags:
|
||||
self.log("Need to Update the AKS tags")
|
||||
|
||||
if not self.check_mode:
|
||||
self.results['tags'] = self.update_aks_tags()
|
||||
self.results['changed'] = True
|
||||
return self.results
|
||||
|
||||
elif self.state == 'absent' and response:
|
||||
self.log("Need to Delete the AKS instance")
|
||||
self.results['changed'] = True
|
||||
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
self.delete_aks()
|
||||
|
||||
self.log("AKS instance deleted")
|
||||
|
||||
return self.results
|
||||
|
||||
def create_update_aks(self):
|
||||
'''
|
||||
Creates or updates a managed Azure container service (AKS) with the specified configuration of agents.
|
||||
|
||||
:return: deserialized AKS instance state dictionary
|
||||
'''
|
||||
self.log("Creating / Updating the AKS instance {0}".format(self.name))
|
||||
|
||||
agentpools = []
|
||||
|
||||
if self.agent_pool_profiles:
|
||||
agentpools = [self.create_agent_pool_profile_instance(profile) for profile in self.agent_pool_profiles]
|
||||
|
||||
service_principal_profile = self.create_service_principal_profile_instance(self.service_principal)
|
||||
|
||||
parameters = self.managedcluster_models.ManagedCluster(
|
||||
location=self.location,
|
||||
dns_prefix=self.dns_prefix,
|
||||
kubernetes_version=self.kubernetes_version,
|
||||
tags=self.tags,
|
||||
service_principal_profile=service_principal_profile,
|
||||
agent_pool_profiles=agentpools,
|
||||
linux_profile=self.create_linux_profile_instance(self.linux_profile),
|
||||
enable_rbac=self.enable_rbac,
|
||||
network_profile=self.create_network_profile_instance(self.network_profile),
|
||||
aad_profile=self.create_aad_profile_instance(self.aad_profile),
|
||||
addon_profiles=self.create_addon_profile_instance(self.addon)
|
||||
)
|
||||
|
||||
# self.log("service_principal_profile : {0}".format(parameters.service_principal_profile))
|
||||
# self.log("linux_profile : {0}".format(parameters.linux_profile))
|
||||
# self.log("ssh from yaml : {0}".format(results.get('linux_profile')[0]))
|
||||
# self.log("ssh : {0}".format(parameters.linux_profile.ssh))
|
||||
# self.log("agent_pool_profiles : {0}".format(parameters.agent_pool_profiles))
|
||||
|
||||
try:
|
||||
poller = self.managedcluster_client.managed_clusters.create_or_update(self.resource_group, self.name, parameters)
|
||||
response = self.get_poller_result(poller)
|
||||
response.kube_config = self.get_aks_kubeconfig()
|
||||
return create_aks_dict(response)
|
||||
except CloudError as exc:
|
||||
self.log('Error attempting to create the AKS instance.')
|
||||
self.fail("Error creating the AKS instance: {0}".format(exc.message))
|
||||
|
||||
def update_aks_tags(self):
|
||||
try:
|
||||
poller = self.managedcluster_client.managed_clusters.update_tags(self.resource_group, self.name, self.tags)
|
||||
response = self.get_poller_result(poller)
|
||||
return response.tags
|
||||
except CloudError as exc:
|
||||
self.fail("Error attempting to update AKS tags: {0}".format(exc.message))
|
||||
|
||||
def delete_aks(self):
|
||||
'''
|
||||
Deletes the specified managed container service (AKS) in the specified subscription and resource group.
|
||||
|
||||
:return: True
|
||||
'''
|
||||
self.log("Deleting the AKS instance {0}".format(self.name))
|
||||
try:
|
||||
poller = self.managedcluster_client.managed_clusters.delete(self.resource_group, self.name)
|
||||
self.get_poller_result(poller)
|
||||
return True
|
||||
except CloudError as e:
|
||||
self.log('Error attempting to delete the AKS instance.')
|
||||
self.fail("Error deleting the AKS instance: {0}".format(e.message))
|
||||
return False
|
||||
|
||||
def get_aks(self):
|
||||
'''
|
||||
Gets the properties of the specified container service.
|
||||
|
||||
:return: deserialized AKS instance state dictionary
|
||||
'''
|
||||
self.log("Checking if the AKS instance {0} is present".format(self.name))
|
||||
try:
|
||||
response = self.managedcluster_client.managed_clusters.get(self.resource_group, self.name)
|
||||
self.log("Response : {0}".format(response))
|
||||
self.log("AKS instance : {0} found".format(response.name))
|
||||
response.kube_config = self.get_aks_kubeconfig()
|
||||
return create_aks_dict(response)
|
||||
except CloudError:
|
||||
self.log('Did not find the AKS instance.')
|
||||
return False
|
||||
|
||||
def get_all_versions(self):
|
||||
try:
|
||||
result = dict()
|
||||
response = self.containerservice_client.container_services.list_orchestrators(self.location, resource_type='managedClusters')
|
||||
orchestrators = response.orchestrators
|
||||
for item in orchestrators:
|
||||
result[item.orchestrator_version] = [x.orchestrator_version for x in item.upgrades] if item.upgrades else []
|
||||
return result
|
||||
except Exception as exc:
|
||||
self.fail('Error when getting AKS supported kubernetes version list for location {0} - {1}'.format(self.location, exc.message or str(exc)))
|
||||
|
||||
def get_aks_kubeconfig(self):
|
||||
'''
|
||||
Gets kubeconfig for the specified AKS instance.
|
||||
|
||||
:return: AKS instance kubeconfig
|
||||
'''
|
||||
access_profile = self.managedcluster_client.managed_clusters.get_access_profile(resource_group_name=self.resource_group,
|
||||
resource_name=self.name,
|
||||
role_name="clusterUser")
|
||||
return access_profile.kube_config.decode('utf-8')
|
||||
|
||||
def create_agent_pool_profile_instance(self, agentpoolprofile):
|
||||
'''
|
||||
Helper method to serialize a dict to a ManagedClusterAgentPoolProfile
|
||||
:param: agentpoolprofile: dict with the parameters to setup the ManagedClusterAgentPoolProfile
|
||||
:return: ManagedClusterAgentPoolProfile
|
||||
'''
|
||||
return self.managedcluster_models.ManagedClusterAgentPoolProfile(**agentpoolprofile)
|
||||
|
||||
def create_service_principal_profile_instance(self, spnprofile):
|
||||
'''
|
||||
Helper method to serialize a dict to a ManagedClusterServicePrincipalProfile
|
||||
:param: spnprofile: dict with the parameters to setup the ManagedClusterServicePrincipalProfile
|
||||
:return: ManagedClusterServicePrincipalProfile
|
||||
'''
|
||||
return self.managedcluster_models.ManagedClusterServicePrincipalProfile(
|
||||
client_id=spnprofile['client_id'],
|
||||
secret=spnprofile['client_secret']
|
||||
)
|
||||
|
||||
def create_linux_profile_instance(self, linuxprofile):
|
||||
'''
|
||||
Helper method to serialize a dict to a ContainerServiceLinuxProfile
|
||||
:param: linuxprofile: dict with the parameters to setup the ContainerServiceLinuxProfile
|
||||
:return: ContainerServiceLinuxProfile
|
||||
'''
|
||||
return self.managedcluster_models.ContainerServiceLinuxProfile(
|
||||
admin_username=linuxprofile['admin_username'],
|
||||
ssh=self.managedcluster_models.ContainerServiceSshConfiguration(public_keys=[
|
||||
self.managedcluster_models.ContainerServiceSshPublicKey(key_data=str(linuxprofile['ssh_key']))])
|
||||
)
|
||||
|
||||
def create_network_profile_instance(self, network):
|
||||
return self.managedcluster_models.ContainerServiceNetworkProfile(**network) if network else None
|
||||
|
||||
def create_aad_profile_instance(self, aad):
|
||||
return self.managedcluster_models.ManagedClusterAADProfile(**aad) if aad else None
|
||||
|
||||
def create_addon_profile_instance(self, addon):
|
||||
result = dict()
|
||||
addon = addon or {}
|
||||
for key in addon.keys():
|
||||
if not ADDONS.get(key):
|
||||
self.fail('Unsupported addon {0}'.format(key))
|
||||
if addon.get(key):
|
||||
name = ADDONS[key]['name']
|
||||
config_spec = ADDONS[key].get('config') or dict()
|
||||
config = addon[key]
|
||||
for v in config_spec.keys():
|
||||
config[config_spec[v]] = config[v]
|
||||
result[name] = self.managedcluster_models.ManagedClusterAddonProfile(config=config, enabled=config['enabled'])
|
||||
return result
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution"""
|
||||
AzureRMManagedCluster()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,191 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2018 Yuwei Zhou, <yuwzho@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_aks_info
|
||||
|
||||
version_added: "2.9"
|
||||
|
||||
short_description: Get Azure Kubernetes Service facts
|
||||
|
||||
description:
|
||||
- Get facts for a specific Azure Kubernetes Service or all Azure Kubernetes Services.
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Limit results to a specific resource group.
|
||||
resource_group:
|
||||
description:
|
||||
- The resource group to search for the desired Azure Kubernetes Service
|
||||
tags:
|
||||
description:
|
||||
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
|
||||
show_kubeconfig:
|
||||
description:
|
||||
- Show kubeconfig of the AKS cluster.
|
||||
- Note the operation will cost more network overhead, not recommended when listing AKS.
|
||||
version_added: "2.8"
|
||||
choices:
|
||||
- user
|
||||
- admin
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Yuwei Zhou (@yuwzho)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get facts for one Azure Kubernetes Service
|
||||
azure_rm_aks_info:
|
||||
name: Testing
|
||||
resource_group: myResourceGroup
|
||||
|
||||
- name: Get facts for all Azure Kubernetes Services
|
||||
azure_rm_aks_info:
|
||||
|
||||
- name: Get facts by tags
|
||||
azure_rm_aks_info:
|
||||
tags:
|
||||
- testing
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
azure_aks:
|
||||
description: List of Azure Kubernetes Service dicts.
|
||||
returned: always
|
||||
type: list
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.common import AzureHttpError
|
||||
except Exception:
|
||||
# handled in azure_rm_common
|
||||
pass
|
||||
|
||||
AZURE_OBJECT_CLASS = 'managedClusters'
|
||||
|
||||
|
||||
class AzureRMManagedClusterInfo(AzureRMModuleBase):
|
||||
"""Utility class to get Azure Kubernetes Service facts"""
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.module_args = dict(
|
||||
name=dict(type='str'),
|
||||
resource_group=dict(type='str'),
|
||||
tags=dict(type='list'),
|
||||
show_kubeconfig=dict(type='str', choices=['user', 'admin']),
|
||||
)
|
||||
|
||||
self.results = dict(
|
||||
changed=False,
|
||||
aks=[],
|
||||
available_versions=[]
|
||||
)
|
||||
|
||||
self.name = None
|
||||
self.resource_group = None
|
||||
self.tags = None
|
||||
self.show_kubeconfig = None
|
||||
|
||||
super(AzureRMManagedClusterInfo, self).__init__(
|
||||
derived_arg_spec=self.module_args,
|
||||
supports_tags=False,
|
||||
facts_module=True
|
||||
)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
is_old_facts = self.module._name == 'azure_rm_aks_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_aks_facts' module has been renamed to 'azure_rm_aks_info'", version='2.13')
|
||||
|
||||
for key in self.module_args:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
self.results['aks'] = (
|
||||
self.get_item() if self.name
|
||||
else self.list_items()
|
||||
)
|
||||
|
||||
return self.results
|
||||
|
||||
def get_item(self):
|
||||
"""Get a single Azure Kubernetes Service"""
|
||||
|
||||
self.log('Get properties for {0}'.format(self.name))
|
||||
|
||||
item = None
|
||||
result = []
|
||||
|
||||
try:
|
||||
item = self.managedcluster_client.managed_clusters.get(self.resource_group, self.name)
|
||||
except CloudError:
|
||||
pass
|
||||
|
||||
if item and self.has_tags(item.tags, self.tags):
|
||||
result = [self.serialize_obj(item, AZURE_OBJECT_CLASS)]
|
||||
if self.show_kubeconfig:
|
||||
result[0]['kube_config'] = self.get_aks_kubeconfig(self.resource_group, self.name)
|
||||
|
||||
return result
|
||||
|
||||
def list_items(self):
|
||||
"""Get all Azure Kubernetes Services"""
|
||||
|
||||
self.log('List all Azure Kubernetes Services')
|
||||
|
||||
try:
|
||||
response = self.managedcluster_client.managed_clusters.list(self.resource_group)
|
||||
except AzureHttpError as exc:
|
||||
self.fail('Failed to list all items - {0}'.format(str(exc)))
|
||||
|
||||
results = []
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
item_dict = self.serialize_obj(item, AZURE_OBJECT_CLASS)
|
||||
if self.show_kubeconfig:
|
||||
item_dict['kube_config'] = self.get_aks_kubeconfig(self.resource_group, item.name)
|
||||
results.append(item_dict)
|
||||
|
||||
return results
|
||||
|
||||
def get_aks_kubeconfig(self, resource_group, name):
|
||||
'''
|
||||
Gets kubeconfig for the specified AKS instance.
|
||||
|
||||
:return: AKS instance kubeconfig
|
||||
'''
|
||||
if not self.show_kubeconfig:
|
||||
return ''
|
||||
role_name = 'cluster{0}'.format(str.capitalize(self.show_kubeconfig))
|
||||
access_profile = self.managedcluster_client.managed_clusters.get_access_profile(resource_group, name, role_name)
|
||||
return access_profile.kube_config.decode('utf-8')
|
||||
|
||||
|
||||
def main():
|
||||
"""Main module execution code path"""
|
||||
|
||||
AzureRMManagedClusterInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,133 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2018 Yuwei Zhou, <yuwzho@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_aksversion_info
|
||||
|
||||
version_added: "2.9"
|
||||
|
||||
short_description: Get available kubernetes versions supported by Azure Kubernetes Service
|
||||
|
||||
description:
|
||||
- Get available kubernetes versions supported by Azure Kubernetes Service.
|
||||
|
||||
options:
|
||||
location:
|
||||
description:
|
||||
- Get the versions available for creating a managed Kubernetes cluster.
|
||||
required: true
|
||||
version:
|
||||
description:
|
||||
- Get the upgrade versions available for a managed Kubernetes cluster version.
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Yuwei Zhou (@yuwzho)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get available versions for AKS in location eastus
|
||||
azure_rm_aksversion_info:
|
||||
location: eastus
|
||||
- name: Get available versions an AKS can be upgrade to
|
||||
azure_rm_aksversion_info:
|
||||
location: eastis
|
||||
version: 1.11.6
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
azure_aks_versions:
|
||||
description: List of supported kubernetes versions.
|
||||
returned: always
|
||||
type: list
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.common import AzureHttpError
|
||||
except Exception:
|
||||
# handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMAKSVersion(AzureRMModuleBase):
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.module_args = dict(
|
||||
location=dict(type='str', required=True),
|
||||
version=dict(type='str')
|
||||
)
|
||||
|
||||
self.results = dict(
|
||||
changed=False,
|
||||
azure_aks_versions=[]
|
||||
)
|
||||
|
||||
self.location = None
|
||||
self.version = None
|
||||
|
||||
super(AzureRMAKSVersion, self).__init__(
|
||||
derived_arg_spec=self.module_args,
|
||||
supports_tags=False,
|
||||
facts_module=True
|
||||
)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
is_old_facts = self.module._name == 'azure_rm_aksversion_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_aksversion_facts' module has been renamed to 'azure_rm_aksversion_info'", version='2.13')
|
||||
|
||||
for key in self.module_args:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
self.results['azure_aks_versions'] = self.get_all_versions(self.location, self.version)
|
||||
|
||||
return self.results
|
||||
|
||||
def get_all_versions(self, location, version):
|
||||
'''
|
||||
Get all kubernetes version supported by AKS
|
||||
:return: ordered version list
|
||||
'''
|
||||
try:
|
||||
result = dict()
|
||||
response = self.containerservice_client.container_services.list_orchestrators(self.location, resource_type='managedClusters')
|
||||
orchestrators = response.orchestrators
|
||||
for item in orchestrators:
|
||||
result[item.orchestrator_version] = [x.orchestrator_version for x in item.upgrades] if item.upgrades else []
|
||||
if version:
|
||||
return result.get(version) or []
|
||||
else:
|
||||
keys = list(result.keys())
|
||||
keys.sort()
|
||||
return keys
|
||||
except Exception as exc:
|
||||
self.fail('Error when getting AKS supported kubernetes version list for location {0} - {1}'.format(self.location, exc.message or str(exc)))
|
||||
|
||||
|
||||
def main():
|
||||
"""Main module execution code path"""
|
||||
|
||||
AzureRMAKSVersion()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,250 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2018 Yunge Zhu, <yungez@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_applicationsecuritygroup
|
||||
version_added: "2.8"
|
||||
short_description: Manage Azure Application Security Group
|
||||
description:
|
||||
- Create, update and delete instance of Azure Application Security Group.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
required: True
|
||||
name:
|
||||
description:
|
||||
- The name of the application security group.
|
||||
required: True
|
||||
location:
|
||||
description:
|
||||
- Resource location. If not set, location from the resource group will be used as default.
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the Application Security Group.
|
||||
- Use C(present) to create or update an Application Security Group and C(absent) to delete it.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Yunge Zhu (@yungezz)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create application security group
|
||||
azure_rm_applicationsecuritygroup:
|
||||
resource_group: myResourceGroup
|
||||
name: mySecurityGroup
|
||||
location: eastus
|
||||
tags:
|
||||
foo: bar
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- Resource id of the application security group.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/applicationSecurityGroups/
|
||||
mySecurityGroup"
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrest.polling import LROPoller
|
||||
from msrestazure.azure_operation import AzureOperationPoller
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class Actions:
|
||||
NoAction, CreateOrUpdate, Delete = range(3)
|
||||
|
||||
|
||||
class AzureRMApplicationSecurityGroup(AzureRMModuleBase):
|
||||
"""Configuration class for an Azure RM Application Security Group resource"""
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
location=dict(
|
||||
type='str'
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
)
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.location = None
|
||||
self.name = None
|
||||
self.tags = None
|
||||
|
||||
self.state = None
|
||||
|
||||
self.results = dict(changed=False)
|
||||
|
||||
self.to_do = Actions.NoAction
|
||||
|
||||
super(AzureRMApplicationSecurityGroup, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=True)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
"""Main module execution method"""
|
||||
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
if hasattr(self, key):
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
resource_group = self.get_resource_group(self.resource_group)
|
||||
|
||||
if not self.location:
|
||||
self.location = resource_group.location
|
||||
|
||||
old_response = self.get_applicationsecuritygroup()
|
||||
|
||||
if not old_response:
|
||||
self.log("Application Security Group instance doesn't exist")
|
||||
if self.state == 'present':
|
||||
self.to_do = Actions.CreateOrUpdate
|
||||
else:
|
||||
self.log("Old instance didn't exist")
|
||||
else:
|
||||
self.log("Application Security Group instance already exists")
|
||||
if self.state == 'present':
|
||||
if self.check_update(old_response):
|
||||
self.to_do = Actions.CreateOrUpdate
|
||||
|
||||
update_tags, self.tags = self.update_tags(old_response.get('tags', None))
|
||||
if update_tags:
|
||||
self.to_do = Actions.CreateOrUpdate
|
||||
|
||||
elif self.state == 'absent':
|
||||
self.to_do = Actions.Delete
|
||||
|
||||
if self.to_do == Actions.CreateOrUpdate:
|
||||
self.log("Need to Create / Update the Application Security Group instance")
|
||||
self.results['changed'] = True
|
||||
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
response = self.create_update_applicationsecuritygroup()
|
||||
self.results['id'] = response['id']
|
||||
|
||||
elif self.to_do == Actions.Delete:
|
||||
self.log("Delete Application Security Group instance")
|
||||
self.results['changed'] = True
|
||||
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
self.delete_applicationsecuritygroup()
|
||||
|
||||
return self.results
|
||||
|
||||
def check_update(self, existing_asg):
|
||||
if self.location and self.location.lower() != existing_asg['location'].lower():
|
||||
self.module.warn("location cannot be updated. Existing {0}, input {1}".format(existing_asg['location'], self.location))
|
||||
return False
|
||||
|
||||
def create_update_applicationsecuritygroup(self):
|
||||
'''
|
||||
Create or update Application Security Group.
|
||||
|
||||
:return: deserialized Application Security Group instance state dictionary
|
||||
'''
|
||||
self.log("Creating / Updating the Application Security Group instance {0}".format(self.name))
|
||||
|
||||
param = dict(name=self.name,
|
||||
tags=self.tags,
|
||||
location=self.location)
|
||||
try:
|
||||
response = self.network_client.application_security_groups.create_or_update(resource_group_name=self.resource_group,
|
||||
application_security_group_name=self.name,
|
||||
parameters=param)
|
||||
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
|
||||
response = self.get_poller_result(response)
|
||||
|
||||
except CloudError as exc:
|
||||
self.log('Error creating/updating Application Security Group instance.')
|
||||
self.fail("Error creating/updating Application Security Group instance: {0}".format(str(exc)))
|
||||
return response.as_dict()
|
||||
|
||||
def delete_applicationsecuritygroup(self):
|
||||
'''
|
||||
Deletes specified Application Security Group instance.
|
||||
|
||||
:return: True
|
||||
'''
|
||||
self.log("Deleting the Application Security Group instance {0}".format(self.name))
|
||||
try:
|
||||
response = self.network_client.application_security_groups.delete(resource_group_name=self.resource_group,
|
||||
application_security_group_name=self.name)
|
||||
except CloudError as e:
|
||||
self.log('Error deleting the Application Security Group instance.')
|
||||
self.fail("Error deleting the Application Security Group instance: {0}".format(str(e)))
|
||||
|
||||
return True
|
||||
|
||||
def get_applicationsecuritygroup(self):
|
||||
'''
|
||||
Gets the properties of the specified Application Security Group.
|
||||
|
||||
:return: deserialized Application Security Group instance state dictionary
|
||||
'''
|
||||
self.log("Checking if the Application Security Group instance {0} is present".format(self.name))
|
||||
found = False
|
||||
try:
|
||||
response = self.network_client.application_security_groups.get(resource_group_name=self.resource_group,
|
||||
application_security_group_name=self.name)
|
||||
self.log("Response : {0}".format(response))
|
||||
self.log("Application Security Group instance : {0} found".format(response.name))
|
||||
return response.as_dict()
|
||||
except CloudError as e:
|
||||
self.log('Did not find the Application Security Group instance.')
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution"""
|
||||
AzureRMApplicationSecurityGroup()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,229 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Yunge Zhu, <yungez@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_applicationsecuritygroup_info
|
||||
version_added: "2.9"
|
||||
short_description: Get Azure Application Security Group facts
|
||||
description:
|
||||
- Get facts of Azure Application Security Group.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
name:
|
||||
description:
|
||||
- The name of the application security group.
|
||||
tags:
|
||||
description:
|
||||
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Yunge Zhu (@yungezz)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: List application security groups in specific resource group
|
||||
azure_rm_applicationsecuritygroup_info:
|
||||
resource_group: myResourceGroup
|
||||
|
||||
- name: List application security groups in specific subscription
|
||||
azure_rm_applicationsecuritygroup_info:
|
||||
|
||||
- name: Get application security group by name
|
||||
azure_rm_applicationsecuritygroup_info:
|
||||
resource_group: myResourceGroup
|
||||
name: myApplicationSecurityGroup
|
||||
tags:
|
||||
- foo
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
applicationsecuritygroups:
|
||||
description:
|
||||
- List of application security groups.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description: Id of the application security group.
|
||||
type: str
|
||||
returned: always
|
||||
sample:
|
||||
"/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/applicationSecurityGroups/MyAsg"
|
||||
location:
|
||||
description:
|
||||
- Location of the application security group.
|
||||
type: str
|
||||
returned: always
|
||||
sample: eastus
|
||||
name:
|
||||
description:
|
||||
- Name of the resource.
|
||||
type: str
|
||||
returned: always
|
||||
sample: myAsg
|
||||
provisioning_state:
|
||||
description:
|
||||
- Provisioning state of application security group.
|
||||
type: str
|
||||
returned: always
|
||||
sample: Succeeded
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrest.polling import LROPoller
|
||||
from msrestazure.azure_operation import AzureOperationPoller
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
def applicationsecuritygroup_to_dict(asg):
|
||||
return dict(
|
||||
id=asg.id,
|
||||
location=asg.location,
|
||||
name=asg.name,
|
||||
tags=asg.tags,
|
||||
provisioning_state=asg.provisioning_state
|
||||
)
|
||||
|
||||
|
||||
class AzureRMApplicationSecurityGroupInfo(AzureRMModuleBase):
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str'
|
||||
),
|
||||
name=dict(
|
||||
type='str'
|
||||
),
|
||||
tags=dict(type='list')
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.tags = None
|
||||
|
||||
self.results = dict(changed=False)
|
||||
|
||||
super(AzureRMApplicationSecurityGroupInfo, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=False,
|
||||
supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
"""Main module execution method"""
|
||||
|
||||
is_old_facts = self.module._name == 'azure_rm_applicationsecuritygroup_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_applicationsecuritygroup_facts' module has been renamed to 'azure_rm_applicationsecuritygroup_info'",
|
||||
version='2.13')
|
||||
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
if hasattr(self, key):
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
if self.name:
|
||||
if self.resource_group:
|
||||
self.results['applicationsecuritygroups'] = self.get()
|
||||
else:
|
||||
self.fail("resource_group is required when filtering by name")
|
||||
elif self.resource_group:
|
||||
self.results['applicationsecuritygroups'] = self.list_by_resource_group()
|
||||
else:
|
||||
self.results['applicationsecuritygroups'] = self.list_all()
|
||||
|
||||
return self.results
|
||||
|
||||
def get(self):
|
||||
'''
|
||||
Gets the properties of the specified Application Security Group.
|
||||
|
||||
:return: deserialized Application Security Group instance state dictionary
|
||||
'''
|
||||
self.log("Get the Application Security Group instance {0}".format(self.name))
|
||||
|
||||
results = []
|
||||
try:
|
||||
response = self.network_client.application_security_groups.get(resource_group_name=self.resource_group,
|
||||
application_security_group_name=self.name)
|
||||
self.log("Response : {0}".format(response))
|
||||
|
||||
if response and self.has_tags(response.tags, self.tags):
|
||||
results.append(applicationsecuritygroup_to_dict(response))
|
||||
except CloudError as e:
|
||||
self.fail('Did not find the Application Security Group instance.')
|
||||
return results
|
||||
|
||||
def list_by_resource_group(self):
|
||||
'''
|
||||
Lists the properties of Application Security Groups in specific resource group.
|
||||
|
||||
:return: deserialized Application Security Group instance state dictionary
|
||||
'''
|
||||
self.log("Get the Application Security Groups in resource group {0}".format(self.resource_group))
|
||||
|
||||
results = []
|
||||
try:
|
||||
response = list(self.network_client.application_security_groups.list(resource_group_name=self.resource_group))
|
||||
self.log("Response : {0}".format(response))
|
||||
|
||||
if response:
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(applicationsecuritygroup_to_dict(item))
|
||||
except CloudError as e:
|
||||
self.log('Did not find the Application Security Group instance.')
|
||||
return results
|
||||
|
||||
def list_all(self):
|
||||
'''
|
||||
Lists the properties of Application Security Groups in specific subscription.
|
||||
|
||||
:return: deserialized Application Security Group instance state dictionary
|
||||
'''
|
||||
self.log("Get the Application Security Groups in current subscription")
|
||||
|
||||
results = []
|
||||
try:
|
||||
response = list(self.network_client.application_security_groups.list_all())
|
||||
self.log("Response : {0}".format(response))
|
||||
|
||||
if response:
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(applicationsecuritygroup_to_dict(item))
|
||||
except CloudError as e:
|
||||
self.log('Did not find the Application Security Group instance.')
|
||||
return results
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution"""
|
||||
AzureRMApplicationSecurityGroupInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,379 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2018 Yunge Zhu, <yungez@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_appserviceplan
|
||||
version_added: "2.7"
|
||||
short_description: Manage App Service Plan
|
||||
description:
|
||||
- Create, update and delete instance of App Service Plan.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- Name of the resource group to which the resource belongs.
|
||||
required: True
|
||||
|
||||
name:
|
||||
description:
|
||||
- Unique name of the app service plan to create or update.
|
||||
required: True
|
||||
|
||||
location:
|
||||
description:
|
||||
- Resource location. If not set, location from the resource group will be used as default.
|
||||
|
||||
sku:
|
||||
description:
|
||||
- The pricing tiers, e.g., C(F1), C(D1), C(B1), C(B2), C(B3), C(S1), C(P1), C(P1V2) etc.
|
||||
- Please see U(https://azure.microsoft.com/en-us/pricing/details/app-service/plans/) for more detail.
|
||||
- For Linux app service plan, please see U(https://azure.microsoft.com/en-us/pricing/details/app-service/linux/) for more detail.
|
||||
is_linux:
|
||||
description:
|
||||
- Describe whether to host webapp on Linux worker.
|
||||
type: bool
|
||||
default: false
|
||||
|
||||
number_of_workers:
|
||||
description:
|
||||
- Describe number of workers to be allocated.
|
||||
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the app service plan.
|
||||
- Use C(present) to create or update an app service plan and C(absent) to delete it.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Yunge Zhu (@yungezz)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a windows app service plan
|
||||
azure_rm_appserviceplan:
|
||||
resource_group: myResourceGroup
|
||||
name: myAppPlan
|
||||
location: eastus
|
||||
sku: S1
|
||||
|
||||
- name: Create a linux app service plan
|
||||
azure_rm_appserviceplan:
|
||||
resource_group: myResourceGroup
|
||||
name: myAppPlan
|
||||
location: eastus
|
||||
sku: S1
|
||||
is_linux: true
|
||||
number_of_workers: 1
|
||||
|
||||
- name: update sku of existing windows app service plan
|
||||
azure_rm_appserviceplan:
|
||||
resource_group: myResourceGroup
|
||||
name: myAppPlan
|
||||
location: eastus
|
||||
sku: S2
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
azure_appserviceplan:
|
||||
description: Facts about the current state of the app service plan.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/serverfarms/myAppPlan"
|
||||
}
|
||||
'''
|
||||
|
||||
import time
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrest.polling import LROPoller
|
||||
from msrestazure.azure_operation import AzureOperationPoller
|
||||
from msrest.serialization import Model
|
||||
from azure.mgmt.web.models import (
|
||||
app_service_plan, AppServicePlan, SkuDescription
|
||||
)
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
def _normalize_sku(sku):
|
||||
if sku is None:
|
||||
return sku
|
||||
|
||||
sku = sku.upper()
|
||||
if sku == 'FREE':
|
||||
return 'F1'
|
||||
elif sku == 'SHARED':
|
||||
return 'D1'
|
||||
return sku
|
||||
|
||||
|
||||
def get_sku_name(tier):
|
||||
tier = tier.upper()
|
||||
if tier == 'F1' or tier == "FREE":
|
||||
return 'FREE'
|
||||
elif tier == 'D1' or tier == "SHARED":
|
||||
return 'SHARED'
|
||||
elif tier in ['B1', 'B2', 'B3', 'BASIC']:
|
||||
return 'BASIC'
|
||||
elif tier in ['S1', 'S2', 'S3']:
|
||||
return 'STANDARD'
|
||||
elif tier in ['P1', 'P2', 'P3']:
|
||||
return 'PREMIUM'
|
||||
elif tier in ['P1V2', 'P2V2', 'P3V2']:
|
||||
return 'PREMIUMV2'
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def appserviceplan_to_dict(plan):
|
||||
return dict(
|
||||
id=plan.id,
|
||||
name=plan.name,
|
||||
kind=plan.kind,
|
||||
location=plan.location,
|
||||
reserved=plan.reserved,
|
||||
is_linux=plan.reserved,
|
||||
provisioning_state=plan.provisioning_state,
|
||||
status=plan.status,
|
||||
target_worker_count=plan.target_worker_count,
|
||||
sku=dict(
|
||||
name=plan.sku.name,
|
||||
size=plan.sku.size,
|
||||
tier=plan.sku.tier,
|
||||
family=plan.sku.family,
|
||||
capacity=plan.sku.capacity
|
||||
),
|
||||
resource_group=plan.resource_group,
|
||||
number_of_sites=plan.number_of_sites,
|
||||
tags=plan.tags if plan.tags else None
|
||||
)
|
||||
|
||||
|
||||
class AzureRMAppServicePlans(AzureRMModuleBase):
|
||||
"""Configuration class for an Azure RM App Service Plan resource"""
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
location=dict(
|
||||
type='str'
|
||||
),
|
||||
sku=dict(
|
||||
type='str'
|
||||
),
|
||||
is_linux=dict(
|
||||
type='bool',
|
||||
default=False
|
||||
),
|
||||
number_of_workers=dict(
|
||||
type='str'
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
)
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.location = None
|
||||
|
||||
self.sku = None
|
||||
self.is_linux = None
|
||||
self.number_of_workers = 1
|
||||
|
||||
self.tags = None
|
||||
|
||||
self.results = dict(
|
||||
changed=False,
|
||||
ansible_facts=dict(azure_appserviceplan=None)
|
||||
)
|
||||
self.state = None
|
||||
|
||||
super(AzureRMAppServicePlans, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=True)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
"""Main module execution method"""
|
||||
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
if kwargs[key]:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
old_response = None
|
||||
response = None
|
||||
to_be_updated = False
|
||||
|
||||
# set location
|
||||
resource_group = self.get_resource_group(self.resource_group)
|
||||
if not self.location:
|
||||
self.location = resource_group.location
|
||||
|
||||
# get app service plan
|
||||
old_response = self.get_plan()
|
||||
|
||||
# if not existing
|
||||
if not old_response:
|
||||
self.log("App Service plan doesn't exist")
|
||||
|
||||
if self.state == "present":
|
||||
to_be_updated = True
|
||||
|
||||
if not self.sku:
|
||||
self.fail('Please specify sku in plan when creation')
|
||||
|
||||
else:
|
||||
# existing app service plan, do update
|
||||
self.log("App Service Plan already exists")
|
||||
|
||||
if self.state == 'present':
|
||||
self.log('Result: {0}'.format(old_response))
|
||||
|
||||
update_tags, newtags = self.update_tags(old_response.get('tags', dict()))
|
||||
|
||||
if update_tags:
|
||||
to_be_updated = True
|
||||
self.tags = newtags
|
||||
|
||||
# check if sku changed
|
||||
if self.sku and _normalize_sku(self.sku) != old_response['sku']['size']:
|
||||
to_be_updated = True
|
||||
|
||||
# check if number_of_workers changed
|
||||
if self.number_of_workers and int(self.number_of_workers) != old_response['sku']['capacity']:
|
||||
to_be_updated = True
|
||||
|
||||
if self.is_linux and self.is_linux != old_response['reserved']:
|
||||
self.fail("Operation not allowed: cannot update reserved of app service plan.")
|
||||
|
||||
if old_response:
|
||||
self.results['id'] = old_response['id']
|
||||
|
||||
if to_be_updated:
|
||||
self.log('Need to Create/Update app service plan')
|
||||
self.results['changed'] = True
|
||||
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
response = self.create_or_update_plan()
|
||||
self.results['id'] = response['id']
|
||||
|
||||
if self.state == 'absent' and old_response:
|
||||
self.log("Delete app service plan")
|
||||
self.results['changed'] = True
|
||||
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
self.delete_plan()
|
||||
|
||||
self.log('App service plan instance deleted')
|
||||
|
||||
return self.results
|
||||
|
||||
def get_plan(self):
|
||||
'''
|
||||
Gets app service plan
|
||||
:return: deserialized app service plan dictionary
|
||||
'''
|
||||
self.log("Get App Service Plan {0}".format(self.name))
|
||||
|
||||
try:
|
||||
response = self.web_client.app_service_plans.get(self.resource_group, self.name)
|
||||
if response:
|
||||
self.log("Response : {0}".format(response))
|
||||
self.log("App Service Plan : {0} found".format(response.name))
|
||||
|
||||
return appserviceplan_to_dict(response)
|
||||
except CloudError as ex:
|
||||
self.log("Didn't find app service plan {0} in resource group {1}".format(self.name, self.resource_group))
|
||||
|
||||
return False
|
||||
|
||||
def create_or_update_plan(self):
|
||||
'''
|
||||
Creates app service plan
|
||||
:return: deserialized app service plan dictionary
|
||||
'''
|
||||
self.log("Create App Service Plan {0}".format(self.name))
|
||||
|
||||
try:
|
||||
# normalize sku
|
||||
sku = _normalize_sku(self.sku)
|
||||
|
||||
sku_def = SkuDescription(tier=get_sku_name(
|
||||
sku), name=sku, capacity=self.number_of_workers)
|
||||
plan_def = AppServicePlan(
|
||||
location=self.location, app_service_plan_name=self.name, sku=sku_def, reserved=self.is_linux, tags=self.tags if self.tags else None)
|
||||
|
||||
response = self.web_client.app_service_plans.create_or_update(self.resource_group, self.name, plan_def)
|
||||
|
||||
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
|
||||
response = self.get_poller_result(response)
|
||||
|
||||
self.log("Response : {0}".format(response))
|
||||
|
||||
return appserviceplan_to_dict(response)
|
||||
except CloudError as ex:
|
||||
self.fail("Failed to create app service plan {0} in resource group {1}: {2}".format(self.name, self.resource_group, str(ex)))
|
||||
|
||||
def delete_plan(self):
|
||||
'''
|
||||
Deletes specified App service plan in the specified subscription and resource group.
|
||||
|
||||
:return: True
|
||||
'''
|
||||
self.log("Deleting the App service plan {0}".format(self.name))
|
||||
try:
|
||||
response = self.web_client.app_service_plans.delete(resource_group_name=self.resource_group,
|
||||
name=self.name)
|
||||
except CloudError as e:
|
||||
self.log('Error attempting to delete App service plan.')
|
||||
self.fail(
|
||||
"Error deleting the App service plan : {0}".format(str(e)))
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution"""
|
||||
AzureRMAppServicePlans()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,241 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2018 Yunge Zhu, <yungez@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_appserviceplan_info
|
||||
|
||||
version_added: "2.9"
|
||||
|
||||
short_description: Get azure app service plan facts
|
||||
|
||||
description:
|
||||
- Get facts for a specific app service plan or all app service plans in a resource group, or all app service plan in current subscription.
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Only show results for a specific app service plan.
|
||||
resource_group:
|
||||
description:
|
||||
- Limit results by resource group.
|
||||
tags:
|
||||
description:
|
||||
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Yunge Zhu (@yungezz)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get facts for app service plan by name
|
||||
azure_rm_appserviceplan_info:
|
||||
resource_group: myResourceGroup
|
||||
name: myAppServicePlan
|
||||
|
||||
- name: Get azure_rm_appserviceplan_facts for app service plan in resource group
|
||||
azure_rm_appserviceplan_info:
|
||||
resource_group: myResourceGroup
|
||||
|
||||
- name: Get facts for app service plan with tags
|
||||
azure_rm_appserviceplan_info:
|
||||
tags:
|
||||
- testtag
|
||||
- foo:bar
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
appserviceplans:
|
||||
description: List of app service plans.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description: Id of the app service plan.
|
||||
returned: always
|
||||
type: str
|
||||
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/serverfarms/myPlan
|
||||
name:
|
||||
description: Name of the app service plan.
|
||||
returned: always
|
||||
type: str
|
||||
resource_group:
|
||||
description: Resource group of the app service plan.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myResourceGroup
|
||||
location:
|
||||
description: Location of the app service plan.
|
||||
returned: always
|
||||
type: str
|
||||
kind:
|
||||
description: Kind of the app service plan.
|
||||
returned: always
|
||||
type: str
|
||||
sample: app
|
||||
sku:
|
||||
description: Sku of the app service plan.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
name:
|
||||
description: Name of sku.
|
||||
returned: always
|
||||
type: str
|
||||
sample: S1
|
||||
family:
|
||||
description: Family of sku.
|
||||
returned: always
|
||||
type: str
|
||||
sample: S
|
||||
size:
|
||||
description: Size of sku.
|
||||
returned: always
|
||||
type: str
|
||||
sample: S1
|
||||
tier:
|
||||
description: Tier of sku.
|
||||
returned: always
|
||||
type: str
|
||||
sample: Standard
|
||||
capacity:
|
||||
description: Capacity of sku.
|
||||
returned: always
|
||||
type: int
|
||||
sample: 1
|
||||
'''
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.common import AzureMissingResourceHttpError, AzureHttpError
|
||||
except Exception:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
AZURE_OBJECT_CLASS = 'AppServicePlan'
|
||||
|
||||
|
||||
class AzureRMAppServicePlanInfo(AzureRMModuleBase):
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.module_arg_spec = dict(
|
||||
name=dict(type='str'),
|
||||
resource_group=dict(type='str'),
|
||||
tags=dict(type='list')
|
||||
)
|
||||
|
||||
self.results = dict(changed=False)
|
||||
|
||||
self.name = None
|
||||
self.resource_group = None
|
||||
self.tags = None
|
||||
self.info_level = None
|
||||
|
||||
super(AzureRMAppServicePlanInfo, self).__init__(self.module_arg_spec,
|
||||
supports_tags=False,
|
||||
facts_module=True)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
is_old_facts = self.module._name == 'azure_rm_appserviceplan_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_appserviceplan_facts' module has been renamed to 'azure_rm_appserviceplan_info'", version='2.13')
|
||||
|
||||
for key in self.module_arg_spec:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
if self.name:
|
||||
self.results['appserviceplans'] = self.list_by_name()
|
||||
elif self.resource_group:
|
||||
self.results['appserviceplans'] = self.list_by_resource_group()
|
||||
else:
|
||||
self.results['appserviceplans'] = self.list_all()
|
||||
|
||||
return self.results
|
||||
|
||||
def list_by_name(self):
|
||||
self.log('Get app service plan {0}'.format(self.name))
|
||||
item = None
|
||||
result = []
|
||||
|
||||
try:
|
||||
item = self.web_client.app_service_plans.get(self.resource_group, self.name)
|
||||
except CloudError:
|
||||
pass
|
||||
|
||||
if item and self.has_tags(item.tags, self.tags):
|
||||
curated_result = self.construct_curated_plan(item)
|
||||
result = [curated_result]
|
||||
|
||||
return result
|
||||
|
||||
def list_by_resource_group(self):
|
||||
self.log('List app service plans in resource groups {0}'.format(self.resource_group))
|
||||
try:
|
||||
response = list(self.web_client.app_service_plans.list_by_resource_group(self.resource_group))
|
||||
except CloudError as exc:
|
||||
self.fail("Error listing app service plan in resource groups {0} - {1}".format(self.resource_group, str(exc)))
|
||||
|
||||
results = []
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
curated_output = self.construct_curated_plan(item)
|
||||
results.append(curated_output)
|
||||
return results
|
||||
|
||||
def list_all(self):
|
||||
self.log('List app service plans in current subscription')
|
||||
try:
|
||||
response = list(self.web_client.app_service_plans.list())
|
||||
except CloudError as exc:
|
||||
self.fail("Error listing app service plans: {0}".format(str(exc)))
|
||||
|
||||
results = []
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
curated_output = self.construct_curated_plan(item)
|
||||
results.append(curated_output)
|
||||
return results
|
||||
|
||||
def construct_curated_plan(self, plan):
|
||||
plan_facts = self.serialize_obj(plan, AZURE_OBJECT_CLASS)
|
||||
|
||||
curated_output = dict()
|
||||
curated_output['id'] = plan_facts['id']
|
||||
curated_output['name'] = plan_facts['name']
|
||||
curated_output['resource_group'] = plan_facts['properties']['resourceGroup']
|
||||
curated_output['location'] = plan_facts['location']
|
||||
curated_output['tags'] = plan_facts.get('tags', None)
|
||||
curated_output['is_linux'] = False
|
||||
curated_output['kind'] = plan_facts['kind']
|
||||
curated_output['sku'] = plan_facts['sku']
|
||||
|
||||
if plan_facts['properties'].get('reserved', None):
|
||||
curated_output['is_linux'] = True
|
||||
|
||||
return curated_output
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMAppServicePlanInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,174 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2017 Yuwei Zhou, <yuwzho@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_automationaccount
|
||||
version_added: "2.9"
|
||||
short_description: Manage Azure Automation account
|
||||
description:
|
||||
- Create, delete an Azure Automation account.
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- Name of resource group.
|
||||
type: str
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
- Name of the automation account.
|
||||
type: str
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- State of the automation account. Use C(present) to create or update a automation account and C(absent) to delete an automation account.
|
||||
type: str
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
location:
|
||||
description:
|
||||
- Location of the resource.
|
||||
- If not specified, use resource group location.
|
||||
type: str
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Yuwei Zhou (@yuwzho)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create an automation account
|
||||
azure_rm_automationaccount:
|
||||
name: Testing
|
||||
resource_group: myResourceGroup
|
||||
|
||||
- name: Create an automation account
|
||||
azure_rm_automationaccount:
|
||||
name: Testing
|
||||
resource_group: myResourceGroup
|
||||
location: eastus
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- Automation account resource ID.
|
||||
type: str
|
||||
returned: success
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Automation/automationAccounts/Testing"
|
||||
''' # NOQA
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
|
||||
class AzureRMAutomationAccount(AzureRMModuleBase):
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(type='str', required=True),
|
||||
name=dict(type='str', required=True),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent']),
|
||||
location=dict(type='str')
|
||||
)
|
||||
|
||||
self.results = dict(
|
||||
changed=False,
|
||||
id=None
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.state = None
|
||||
self.location = None
|
||||
|
||||
super(AzureRMAutomationAccount, self).__init__(self.module_arg_spec, supports_check_mode=True)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
account = self.get_account()
|
||||
changed = False
|
||||
if self.state == 'present':
|
||||
if not account:
|
||||
if not self.location:
|
||||
resource_group = self.get_resource_group(self.resource_group)
|
||||
self.location = resource_group.location
|
||||
param = self.automation_models.AutomationAccountCreateOrUpdateParameters(
|
||||
location=self.location,
|
||||
sku=self.automation_models.Sku(name='Basic'),
|
||||
tags=self.tags
|
||||
)
|
||||
changed = True
|
||||
if not self.check_mode:
|
||||
account = self.create_or_update(param)
|
||||
elif self.tags:
|
||||
update_tags, tags = self.update_tags(account.tags)
|
||||
if update_tags:
|
||||
changed = True
|
||||
param = self.automation_models.AutomationAccountUpdateParameters(
|
||||
tags=tags
|
||||
)
|
||||
changed = True
|
||||
if not self.check_mode:
|
||||
self.update_account_tags(param)
|
||||
if account:
|
||||
self.results['id'] = account.id
|
||||
elif account:
|
||||
changed = True
|
||||
if not self.check_mode:
|
||||
self.delete_account()
|
||||
self.results['changed'] = changed
|
||||
return self.results
|
||||
|
||||
def get_account(self):
|
||||
try:
|
||||
return self.automation_client.automation_account.get(self.resource_group, self.name)
|
||||
except self.automation_models.ErrorResponseException:
|
||||
pass
|
||||
|
||||
def create_or_update(self, param):
|
||||
try:
|
||||
return self.automation_client.automation_account.create_or_update(self.resource_group, self.name, param)
|
||||
except self.automation_models.ErrorResponseException as exc:
|
||||
self.fail('Error when creating automation account {0}: {1}'.format(self.name, exc.message))
|
||||
|
||||
def update_account_tags(self, param):
|
||||
try:
|
||||
return self.automation_client.automation_account.update(self.resource_group, self.name, param)
|
||||
except self.automation_models.ErrorResponseException as exc:
|
||||
self.fail('Error when updating automation account {0}: {1}'.format(self.name, exc.message))
|
||||
|
||||
def delete_account(self):
|
||||
try:
|
||||
return self.automation_client.automation_account.delete(self.resource_group, self.name)
|
||||
except self.automation_models.ErrorResponseException as exc:
|
||||
self.fail('Error when deleting automation account {0}: {1}'.format(self.name, exc.message))
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMAutomationAccount()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,383 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2017 Yuwei Zhou, <yuwzho@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_automationaccount_info
|
||||
version_added: '2.9'
|
||||
short_description: Get Azure automation account facts
|
||||
description:
|
||||
- Get facts of automation account.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
type: str
|
||||
required: True
|
||||
name:
|
||||
description:
|
||||
- The name of the automation account.
|
||||
type: str
|
||||
tags:
|
||||
description:
|
||||
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
|
||||
type: list
|
||||
list_statistics:
|
||||
description:
|
||||
- List statistics details for a automation account.
|
||||
- Note this will cost network overhead, suggest only used when I(name) set.
|
||||
type: bool
|
||||
list_usages:
|
||||
description:
|
||||
- List usage details for a automation account.
|
||||
- Note this will cost network overhead, suggest only used when I(name) set.
|
||||
type: bool
|
||||
list_keys:
|
||||
description:
|
||||
- List keys for a automation account.
|
||||
- Note this will cost network overhead, suggest only used when I(name) set.
|
||||
type: bool
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Yuwei Zhou (@yuwzho)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get details of an automation account
|
||||
azure_rm_automationaccount_info:
|
||||
name: Testing
|
||||
resource_group: myResourceGroup
|
||||
list_statistics: yes
|
||||
list_usages: yes
|
||||
list_keys: yes
|
||||
|
||||
- name: List automation account in a resource group
|
||||
azure_rm_automationaccount_info:
|
||||
resource_group: myResourceGroup
|
||||
|
||||
- name: List automation account in a resource group
|
||||
azure_rm_automationaccount_info:
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
automation_accounts:
|
||||
description:
|
||||
- List of automation account dicts.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- Resource ID.
|
||||
type: str
|
||||
returned: always
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups
|
||||
/myResourceGroup/providers/Microsoft.Automation/automationAccounts/Testing"
|
||||
resource_group:
|
||||
description:
|
||||
- Resource group name.
|
||||
type: str
|
||||
returned: always
|
||||
sample: myResourceGroup
|
||||
name:
|
||||
description:
|
||||
- Resource name.
|
||||
type: str
|
||||
returned: always
|
||||
sample: Testing
|
||||
location:
|
||||
description:
|
||||
- Resource location.
|
||||
type: str
|
||||
returned: always
|
||||
sample: eastus
|
||||
creation_time:
|
||||
description:
|
||||
- Resource creation date time.
|
||||
type: str
|
||||
returned: always
|
||||
sample: "2019-04-26T02:55:16.500Z"
|
||||
last_modified_time:
|
||||
description:
|
||||
- Resource last modified date time.
|
||||
type: str
|
||||
returned: always
|
||||
sample: "2019-04-26T02:55:16.500Z"
|
||||
state:
|
||||
description:
|
||||
- Resource state.
|
||||
type: str
|
||||
returned: always
|
||||
sample: ok
|
||||
keys:
|
||||
description:
|
||||
- Resource keys.
|
||||
type: complex
|
||||
returned: always
|
||||
contains:
|
||||
key_name:
|
||||
description:
|
||||
- Name of the key.
|
||||
type: str
|
||||
returned: always
|
||||
sample: Primary
|
||||
permissions:
|
||||
description:
|
||||
- Permission of the key.
|
||||
type: str
|
||||
returned: always
|
||||
sample: Full
|
||||
value:
|
||||
description:
|
||||
- Value of the key.
|
||||
type: str
|
||||
returned: always
|
||||
sample: "MbepKTO6IyGwml0GaKBkKN"
|
||||
statistics:
|
||||
description:
|
||||
- Resource statistics.
|
||||
type: complex
|
||||
returned: always
|
||||
contains:
|
||||
counter_property:
|
||||
description:
|
||||
- Property value of the statistic.
|
||||
type: str
|
||||
returned: always
|
||||
sample: New
|
||||
counter_value:
|
||||
description:
|
||||
- Value of the statistic.
|
||||
type: int
|
||||
returned: always
|
||||
sample: 0
|
||||
end_time:
|
||||
description:
|
||||
- EndTime of the statistic.
|
||||
type: str
|
||||
returned: always
|
||||
sample: "2019-04-26T06:29:43.587518Z"
|
||||
id:
|
||||
description:
|
||||
- ID of the statistic.
|
||||
type: str
|
||||
returned: always
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups
|
||||
/myResourceGroup/providers/Microsoft.Automation/automationAccounts/Testing/statistics/New"
|
||||
start_time:
|
||||
description:
|
||||
- StartTime of the statistic.
|
||||
type: str
|
||||
returned: always
|
||||
sample: "2019-04-26T06:29:43.587518Z"
|
||||
usages:
|
||||
description:
|
||||
- Resource usages.
|
||||
type: complex
|
||||
returned: always
|
||||
contains:
|
||||
current_value:
|
||||
description:
|
||||
- Current usage.
|
||||
type: float
|
||||
returned: always
|
||||
sample: 0.0
|
||||
limit:
|
||||
description:
|
||||
- Max limit, C(-1) for unlimited.
|
||||
type: int
|
||||
returned: always
|
||||
sample: -1
|
||||
name:
|
||||
description:
|
||||
- Usage counter name.
|
||||
type: complex
|
||||
returned: always
|
||||
contains:
|
||||
localized_value:
|
||||
description:
|
||||
- Localized name.
|
||||
type: str
|
||||
returned: always
|
||||
sample: "SubscriptionUsage"
|
||||
value:
|
||||
description:
|
||||
- Name value.
|
||||
type: str
|
||||
returned: always
|
||||
sample: "SubscriptionUsage"
|
||||
unit:
|
||||
description:
|
||||
- Usage unit name.
|
||||
type: str
|
||||
returned: always
|
||||
sample: "Minute"
|
||||
throttle_status:
|
||||
description:
|
||||
- Usage throttle status.
|
||||
type: str
|
||||
returned: always
|
||||
sample: "NotThrottled"
|
||||
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.tools import parse_resource_id
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMAutomationAccountInfo(AzureRMModuleBase):
|
||||
def __init__(self):
|
||||
# define user inputs into argument
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str'
|
||||
),
|
||||
tags=dict(
|
||||
type='list'
|
||||
),
|
||||
list_statistics=dict(
|
||||
type='bool'
|
||||
),
|
||||
list_usages=dict(
|
||||
type='bool'
|
||||
),
|
||||
list_keys=dict(
|
||||
type='bool'
|
||||
)
|
||||
)
|
||||
# store the results of the module operation
|
||||
self.results = dict()
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.tags = None
|
||||
self.list_statistics = None
|
||||
self.list_usages = None
|
||||
self.list_keys = None
|
||||
|
||||
super(AzureRMAutomationAccountInfo, self).__init__(self.module_arg_spec, supports_tags=False, facts_module=True)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
is_old_facts = self.module._name == 'azure_rm_automationaccount_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_automationaccount_facts' module has been renamed to 'azure_rm_automationaccount_info'", version='2.13')
|
||||
|
||||
for key in list(self.module_arg_spec):
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
if self.resource_group and self.name:
|
||||
accounts = [self.get()]
|
||||
elif self.resource_group:
|
||||
accounts = self.list_by_resource_group()
|
||||
else:
|
||||
accounts = self.list_all()
|
||||
self.results['automation_accounts'] = [self.to_dict(x) for x in accounts if self.has_tags(x.tags, self.tags)]
|
||||
return self.results
|
||||
|
||||
def to_dict(self, account):
|
||||
if not account:
|
||||
return None
|
||||
id_dict = parse_resource_id(account.id)
|
||||
result = account.as_dict()
|
||||
result['resource_group'] = id_dict['resource_group']
|
||||
if self.list_statistics:
|
||||
result['statistics'] = self.get_statics(id_dict['resource_group'], account.name)
|
||||
if self.list_usages:
|
||||
result['usages'] = self.get_usages(id_dict['resource_group'], account.name)
|
||||
if self.list_keys:
|
||||
result['keys'] = self.list_account_keys(id_dict['resource_group'], account.name)
|
||||
return result
|
||||
|
||||
def get(self):
|
||||
try:
|
||||
return self.automation_client.automation_account.get(self.resource_group, self.name)
|
||||
except self.automation_models.ErrorResponseException as exc:
|
||||
self.fail('Error when getting automation account {0}: {1}'.format(self.name, exc.message))
|
||||
|
||||
def list_by_resource_group(self):
|
||||
result = []
|
||||
try:
|
||||
resp = self.automation_client.automation_account.list_by_resource_group(self.resource_group)
|
||||
while True:
|
||||
result.append(resp.next())
|
||||
except StopIteration:
|
||||
pass
|
||||
except self.automation_models.ErrorResponseException as exc:
|
||||
self.fail('Error when listing automation account in resource group {0}: {1}'.format(self.resource_group, exc.message))
|
||||
return result
|
||||
|
||||
def list_all(self):
|
||||
result = []
|
||||
try:
|
||||
resp = self.automation_client.automation_account.list()
|
||||
while True:
|
||||
result.append(resp.next())
|
||||
except StopIteration:
|
||||
pass
|
||||
except self.automation_models.ErrorResponseException as exc:
|
||||
self.fail('Error when listing automation account: {0}'.format(exc.message))
|
||||
return result
|
||||
|
||||
def get_statics(self, resource_group, name):
|
||||
result = []
|
||||
try:
|
||||
resp = self.automation_client.statistics.list_by_automation_account(resource_group, name)
|
||||
while True:
|
||||
result.append(resp.next().as_dict())
|
||||
except StopIteration:
|
||||
pass
|
||||
except self.automation_models.ErrorResponseException as exc:
|
||||
self.fail('Error when getting statics for automation account {0}/{1}: {2}'.format(resource_group, name, exc.message))
|
||||
return result
|
||||
|
||||
def get_usages(self, resource_group, name):
|
||||
result = []
|
||||
try:
|
||||
resp = self.automation_client.usages.list_by_automation_account(resource_group, name)
|
||||
while True:
|
||||
result.append(resp.next().as_dict())
|
||||
except StopIteration:
|
||||
pass
|
||||
except self.automation_models.ErrorResponseException as exc:
|
||||
self.fail('Error when getting usage for automation account {0}/{1}: {2}'.format(resource_group, name, exc.message))
|
||||
return result
|
||||
|
||||
def list_account_keys(self, resource_group, name):
|
||||
try:
|
||||
resp = self.automation_client.keys.list_by_automation_account(resource_group, name)
|
||||
return [x.as_dict() for x in resp.keys]
|
||||
except self.automation_models.ErrorResponseException as exc:
|
||||
self.fail('Error when listing keys for automation account {0}/{1}: {2}'.format(resource_group, name, exc.message))
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMAutomationAccountInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,649 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2017 Yuwei Zhou, <yuwzho@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_autoscale
|
||||
version_added: "2.7"
|
||||
short_description: Manage Azure autoscale setting
|
||||
description:
|
||||
- Create, delete an autoscale setting.
|
||||
options:
|
||||
target:
|
||||
description:
|
||||
- The identifier of the resource to apply autoscale setting.
|
||||
- It could be the resource id string.
|
||||
- It also could be a dict contains the C(name), C(subscription_id), C(namespace), C(types), C(resource_group) of the resource.
|
||||
resource_group:
|
||||
required: true
|
||||
description:
|
||||
- Resource group of the resource.
|
||||
enabled:
|
||||
type: bool
|
||||
description:
|
||||
- Specifies whether automatic scaling is enabled for the resource.
|
||||
default: true
|
||||
profiles:
|
||||
description:
|
||||
- The collection of automatic scaling profiles that specify different scaling parameters for different time periods.
|
||||
- A maximum of 20 profiles can be specified.
|
||||
suboptions:
|
||||
name:
|
||||
required: true
|
||||
description:
|
||||
- The name of the profile.
|
||||
count:
|
||||
required: true
|
||||
description:
|
||||
- The number of instances that will be set if metrics are not available for evaluation.
|
||||
- The default is only used if the current instance count is lower than the default.
|
||||
min_count:
|
||||
description:
|
||||
- The minimum number of instances for the resource.
|
||||
max_count:
|
||||
description:
|
||||
- The maximum number of instances for the resource.
|
||||
- The actual maximum number of instances is limited by the cores that are available in the subscription.
|
||||
recurrence_frequency:
|
||||
default: None
|
||||
description:
|
||||
- How often the schedule profile should take effect.
|
||||
- If this value is C(Week), meaning each week will have the same set of profiles.
|
||||
- This element is not used if the FixedDate element is used.
|
||||
choices:
|
||||
- None
|
||||
- Second
|
||||
- Minute
|
||||
- Hour
|
||||
- Day
|
||||
- Week
|
||||
- Month
|
||||
- Year
|
||||
recurrence_timezone:
|
||||
description:
|
||||
- The timezone of repeating times at which this profile begins.
|
||||
- This element is not used if the FixedDate element is used.
|
||||
recurrence_days:
|
||||
description:
|
||||
- The days of repeating times at which this profile begins.
|
||||
- This element is not used if the FixedDate element is used.
|
||||
recurrence_hours:
|
||||
description:
|
||||
- The hours of repeating times at which this profile begins.
|
||||
- This element is not used if the FixedDate element is used.
|
||||
recurrence_mins:
|
||||
description:
|
||||
- The mins of repeating times at which this profile begins.
|
||||
- This element is not used if the FixedDate element is used.
|
||||
fixed_date_timezone:
|
||||
description:
|
||||
- The specific date-time timezone for the profile.
|
||||
- This element is not used if the Recurrence element is used.
|
||||
fixed_date_start:
|
||||
description:
|
||||
- The specific date-time start for the profile.
|
||||
- This element is not used if the Recurrence element is used.
|
||||
fixed_date_end:
|
||||
description:
|
||||
- The specific date-time end for the profile.
|
||||
- This element is not used if the Recurrence element is used.
|
||||
rules:
|
||||
description:
|
||||
- The collection of rules that provide the triggers and parameters for the scaling action.
|
||||
- A maximum of 10 rules can be specified.
|
||||
suboptions:
|
||||
time_aggregation:
|
||||
default: Average
|
||||
description:
|
||||
- How the data that is collected should be combined over time.
|
||||
choices:
|
||||
- Average
|
||||
- Minimum
|
||||
- Maximum
|
||||
- Total
|
||||
- Count
|
||||
time_window:
|
||||
required: true
|
||||
description:
|
||||
- The range of time(minutes) in which instance data is collected.
|
||||
- This value must be greater than the delay in metric collection, which can vary from resource-to-resource.
|
||||
- Must be between 5 ~ 720.
|
||||
direction:
|
||||
description:
|
||||
- Whether the scaling action increases or decreases the number of instances.
|
||||
choices:
|
||||
- Increase
|
||||
- Decrease
|
||||
metric_name:
|
||||
required: true
|
||||
description:
|
||||
- The name of the metric that defines what the rule monitors.
|
||||
metric_resource_uri:
|
||||
description:
|
||||
- The resource identifier of the resource the rule monitors.
|
||||
value:
|
||||
description:
|
||||
- The number of instances that are involved in the scaling action.
|
||||
- This value must be 1 or greater.
|
||||
operator:
|
||||
default: GreaterThan
|
||||
description:
|
||||
- The operator that is used to compare the metric data and the threshold.
|
||||
choices:
|
||||
- Equals
|
||||
- NotEquals
|
||||
- GreaterThan
|
||||
- GreaterThanOrEqual
|
||||
- LessThan
|
||||
- LessThanOrEqual
|
||||
cooldown:
|
||||
description:
|
||||
- The amount of time (minutes) to wait since the last scaling action before this action occurs.
|
||||
- It must be between 1 ~ 10080.
|
||||
time_grain:
|
||||
required: true
|
||||
description:
|
||||
- The granularity(minutes) of metrics the rule monitors.
|
||||
- Must be one of the predefined values returned from metric definitions for the metric.
|
||||
- Must be between 1 ~ 720.
|
||||
statistic:
|
||||
default: Average
|
||||
description:
|
||||
- How the metrics from multiple instances are combined.
|
||||
choices:
|
||||
- Average
|
||||
- Min
|
||||
- Max
|
||||
- Sum
|
||||
threshold:
|
||||
default: 70
|
||||
description:
|
||||
- The threshold of the metric that triggers the scale action.
|
||||
type:
|
||||
description:
|
||||
- The type of action that should occur when the scale rule fires.
|
||||
choices:
|
||||
- PercentChangeCount
|
||||
- ExactCount
|
||||
- ChangeCount
|
||||
notifications:
|
||||
description:
|
||||
- The collection of notifications.
|
||||
suboptions:
|
||||
custom_emails:
|
||||
description:
|
||||
- The custom e-mails list. This value can be null or empty, in which case this attribute will be ignored.
|
||||
send_to_subscription_administrator:
|
||||
type: bool
|
||||
description:
|
||||
- A value indicating whether to send email to subscription administrator.
|
||||
webhooks:
|
||||
description:
|
||||
- The list of webhook notifications service uri.
|
||||
send_to_subscription_co_administrators:
|
||||
type: bool
|
||||
description:
|
||||
- A value indicating whether to send email to subscription co-administrators.
|
||||
state:
|
||||
default: present
|
||||
description:
|
||||
- Assert the state of the virtual network. Use C(present) to create or update and C(absent) to delete.
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
location:
|
||||
description:
|
||||
- location of the resource.
|
||||
name:
|
||||
required: true
|
||||
description:
|
||||
- name of the resource.
|
||||
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Yuwei Zhou (@yuwzho)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create an auto scale
|
||||
azure_rm_autoscale:
|
||||
target: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachineScaleSets/myVmss"
|
||||
enabled: true
|
||||
profiles:
|
||||
- count: '1'
|
||||
recurrence_days:
|
||||
- Monday
|
||||
name: Auto created scale condition
|
||||
recurrence_timezone: China Standard Time
|
||||
recurrence_mins:
|
||||
- '0'
|
||||
min_count: '1'
|
||||
max_count: '1'
|
||||
recurrence_frequency: Week
|
||||
recurrence_hours:
|
||||
- '18'
|
||||
name: scale
|
||||
resource_group: myResourceGroup
|
||||
|
||||
- name: Create an auto scale with complicated profile
|
||||
azure_rm_autoscale:
|
||||
target: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachineScaleSets
|
||||
/myVmss"
|
||||
enabled: true
|
||||
profiles:
|
||||
- count: '1'
|
||||
recurrence_days:
|
||||
- Monday
|
||||
name: Auto created scale condition 0
|
||||
rules:
|
||||
- Time_aggregation: Average
|
||||
time_window: 10
|
||||
direction: Increase
|
||||
metric_name: Percentage CPU
|
||||
metric_resource_uri: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtua
|
||||
lMachineScaleSets/vmss"
|
||||
value: '1'
|
||||
threshold: 70
|
||||
cooldown: 5
|
||||
time_grain: 1
|
||||
statistic: Average
|
||||
operator: GreaterThan
|
||||
type: ChangeCount
|
||||
max_count: '1'
|
||||
recurrence_mins:
|
||||
- '0'
|
||||
min_count: '1'
|
||||
recurrence_timezone: China Standard Time
|
||||
recurrence_frequency: Week
|
||||
recurrence_hours:
|
||||
- '6'
|
||||
notifications:
|
||||
- email_admin: True
|
||||
email_co_admin: False
|
||||
custom_emails:
|
||||
- yuwzho@microsoft.com
|
||||
name: scale
|
||||
resource_group: myResourceGroup
|
||||
|
||||
- name: Delete an Azure Auto Scale Setting
|
||||
azure_rm_autoscale:
|
||||
state: absent
|
||||
resource_group: myResourceGroup
|
||||
name: scale
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
state:
|
||||
description: Current state of the resource.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"changed": false,
|
||||
"enabled": true,
|
||||
"id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/microsoft.insights/autoscalesettings/scale",
|
||||
"location": "eastus",
|
||||
"name": "scale",
|
||||
"notifications": [
|
||||
{
|
||||
"custom_emails": [
|
||||
"yuwzho@microsoft.com"
|
||||
],
|
||||
"send_to_subscription_administrator": true,
|
||||
"send_to_subscription_co_administrators": false,
|
||||
"webhooks": []
|
||||
}
|
||||
],
|
||||
"profiles": [
|
||||
{
|
||||
"count": "1",
|
||||
"max_count": "1",
|
||||
"min_count": "1",
|
||||
"name": "Auto created scale condition 0",
|
||||
"recurrence_days": [
|
||||
"Monday"
|
||||
],
|
||||
"recurrence_frequency": "Week",
|
||||
"recurrence_hours": [
|
||||
"6"
|
||||
],
|
||||
"recurrence_mins": [
|
||||
"0"
|
||||
],
|
||||
"recurrence_timezone": "China Standard Time",
|
||||
"rules": [
|
||||
{
|
||||
"cooldown": 5.0,
|
||||
"direction": "Increase",
|
||||
"metric_name": "Percentage CPU",
|
||||
"metric_resource_uri": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsof
|
||||
t.Compute/virtualMachineScaleSets/MyVmss",
|
||||
"operator": "GreaterThan",
|
||||
"statistic": "Average",
|
||||
"threshold": 70.0,
|
||||
"time_aggregation": "Average",
|
||||
"time_grain": 1.0,
|
||||
"time_window": 10.0,
|
||||
"type": "ChangeCount",
|
||||
"value": "1"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"target": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachineScale
|
||||
Sets/myVmss"
|
||||
}
|
||||
''' # NOQA
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id
|
||||
from ansible.module_utils._text import to_native
|
||||
from datetime import timedelta
|
||||
|
||||
try:
|
||||
from msrestazure.tools import parse_resource_id
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.mgmt.monitor.models import WebhookNotification, EmailNotification, AutoscaleNotification, RecurrentSchedule, MetricTrigger, \
|
||||
ScaleAction, AutoscaleSettingResource, AutoscaleProfile, ScaleCapacity, TimeWindow, Recurrence, ScaleRule
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
# duplicated in azure_rm_autoscale_facts
|
||||
def timedelta_to_minutes(time):
|
||||
if not time:
|
||||
return 0
|
||||
return time.days * 1440 + time.seconds / 60.0 + time.microseconds / 60000000.0
|
||||
|
||||
|
||||
def get_enum_value(item):
|
||||
if 'value' in dir(item):
|
||||
return to_native(item.value)
|
||||
return to_native(item)
|
||||
|
||||
|
||||
def auto_scale_to_dict(instance):
|
||||
if not instance:
|
||||
return dict()
|
||||
return dict(
|
||||
id=to_native(instance.id or ''),
|
||||
name=to_native(instance.name),
|
||||
location=to_native(instance.location),
|
||||
profiles=[profile_to_dict(p) for p in instance.profiles or []],
|
||||
notifications=[notification_to_dict(n) for n in instance.notifications or []],
|
||||
enabled=instance.enabled,
|
||||
target=to_native(instance.target_resource_uri),
|
||||
tags=instance.tags
|
||||
)
|
||||
|
||||
|
||||
def rule_to_dict(rule):
|
||||
if not rule:
|
||||
return dict()
|
||||
result = dict(metric_name=to_native(rule.metric_trigger.metric_name),
|
||||
metric_resource_uri=to_native(rule.metric_trigger.metric_resource_uri),
|
||||
time_grain=timedelta_to_minutes(rule.metric_trigger.time_grain),
|
||||
statistic=get_enum_value(rule.metric_trigger.statistic),
|
||||
time_window=timedelta_to_minutes(rule.metric_trigger.time_window),
|
||||
time_aggregation=get_enum_value(rule.metric_trigger.time_aggregation),
|
||||
operator=get_enum_value(rule.metric_trigger.operator),
|
||||
threshold=float(rule.metric_trigger.threshold))
|
||||
if rule.scale_action and to_native(rule.scale_action.direction) != 'None':
|
||||
result['direction'] = get_enum_value(rule.scale_action.direction)
|
||||
result['type'] = get_enum_value(rule.scale_action.type)
|
||||
result['value'] = to_native(rule.scale_action.value)
|
||||
result['cooldown'] = timedelta_to_minutes(rule.scale_action.cooldown)
|
||||
return result
|
||||
|
||||
|
||||
def profile_to_dict(profile):
|
||||
if not profile:
|
||||
return dict()
|
||||
result = dict(name=to_native(profile.name),
|
||||
count=to_native(profile.capacity.default),
|
||||
max_count=to_native(profile.capacity.maximum),
|
||||
min_count=to_native(profile.capacity.minimum))
|
||||
|
||||
if profile.rules:
|
||||
result['rules'] = [rule_to_dict(r) for r in profile.rules]
|
||||
if profile.fixed_date:
|
||||
result['fixed_date_timezone'] = profile.fixed_date.time_zone
|
||||
result['fixed_date_start'] = profile.fixed_date.start
|
||||
result['fixed_date_end'] = profile.fixed_date.end
|
||||
if profile.recurrence:
|
||||
if get_enum_value(profile.recurrence.frequency) != 'None':
|
||||
result['recurrence_frequency'] = get_enum_value(profile.recurrence.frequency)
|
||||
if profile.recurrence.schedule:
|
||||
result['recurrence_timezone'] = to_native(str(profile.recurrence.schedule.time_zone))
|
||||
result['recurrence_days'] = [to_native(r) for r in profile.recurrence.schedule.days]
|
||||
result['recurrence_hours'] = [to_native(r) for r in profile.recurrence.schedule.hours]
|
||||
result['recurrence_mins'] = [to_native(r) for r in profile.recurrence.schedule.minutes]
|
||||
return result
|
||||
|
||||
|
||||
def notification_to_dict(notification):
|
||||
if not notification:
|
||||
return dict()
|
||||
return dict(send_to_subscription_administrator=notification.email.send_to_subscription_administrator if notification.email else False,
|
||||
send_to_subscription_co_administrators=notification.email.send_to_subscription_co_administrators if notification.email else False,
|
||||
custom_emails=[to_native(e) for e in notification.email.custom_emails or []],
|
||||
webhooks=[to_native(w.service_url) for w in notification.webhooks or []])
|
||||
|
||||
|
||||
rule_spec = dict(
|
||||
metric_name=dict(type='str', required=True),
|
||||
metric_resource_uri=dict(type='str'),
|
||||
time_grain=dict(type='float', required=True),
|
||||
statistic=dict(type='str', choices=['Average', 'Min', 'Max', 'Sum'], default='Average'),
|
||||
time_window=dict(type='float', required=True),
|
||||
time_aggregation=dict(type='str', choices=['Average', 'Minimum', 'Maximum', 'Total', 'Count'], default='Average'),
|
||||
operator=dict(type='str',
|
||||
choices=['Equals', 'NotEquals', 'GreaterThan', 'GreaterThanOrEqual', 'LessThan', 'LessThanOrEqual'],
|
||||
default='GreaterThan'),
|
||||
threshold=dict(type='float', default=70),
|
||||
direction=dict(type='str', choices=['Increase', 'Decrease']),
|
||||
type=dict(type='str', choices=['PercentChangeCount', 'ExactCount', 'ChangeCount']),
|
||||
value=dict(type='str'),
|
||||
cooldown=dict(type='float')
|
||||
)
|
||||
|
||||
|
||||
profile_spec = dict(
|
||||
name=dict(type='str', required=True),
|
||||
count=dict(type='str', required=True),
|
||||
max_count=dict(type='str'),
|
||||
min_count=dict(type='str'),
|
||||
rules=dict(type='list', elements='dict', options=rule_spec),
|
||||
fixed_date_timezone=dict(type='str'),
|
||||
fixed_date_start=dict(type='str'),
|
||||
fixed_date_end=dict(type='str'),
|
||||
recurrence_frequency=dict(type='str', choices=['None', 'Second', 'Minute', 'Hour', 'Day', 'Week', 'Month', 'Year'], default='None'),
|
||||
recurrence_timezone=dict(type='str'),
|
||||
recurrence_days=dict(type='list', elements='str'),
|
||||
recurrence_hours=dict(type='list', elements='str'),
|
||||
recurrence_mins=dict(type='list', elements='str')
|
||||
)
|
||||
|
||||
|
||||
notification_spec = dict(
|
||||
send_to_subscription_administrator=dict(type='bool', aliases=['email_admin'], default=False),
|
||||
send_to_subscription_co_administrators=dict(type='bool', aliases=['email_co_admin'], default=False),
|
||||
custom_emails=dict(type='list', elements='str'),
|
||||
webhooks=dict(type='list', elements='str')
|
||||
)
|
||||
|
||||
|
||||
class AzureRMAutoScale(AzureRMModuleBase):
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(type='str', required=True),
|
||||
name=dict(type='str', required=True),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent']),
|
||||
location=dict(type='str'),
|
||||
target=dict(type='raw'),
|
||||
profiles=dict(type='list', elements='dict', options=profile_spec),
|
||||
enabled=dict(type='bool', default=True),
|
||||
notifications=dict(type='list', elements='dict', options=notification_spec)
|
||||
)
|
||||
|
||||
self.results = dict(
|
||||
changed=False
|
||||
)
|
||||
|
||||
required_if = [
|
||||
('state', 'present', ['target', 'profiles'])
|
||||
]
|
||||
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.state = None
|
||||
self.location = None
|
||||
self.tags = None
|
||||
self.target = None
|
||||
self.profiles = None
|
||||
self.notifications = None
|
||||
self.enabled = None
|
||||
|
||||
super(AzureRMAutoScale, self).__init__(self.module_arg_spec, supports_check_mode=True, required_if=required_if)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
results = None
|
||||
changed = False
|
||||
|
||||
self.log('Fetching auto scale settings {0}'.format(self.name))
|
||||
results = self.get_auto_scale()
|
||||
if results and self.state == 'absent':
|
||||
# delete
|
||||
changed = True
|
||||
if not self.check_mode:
|
||||
self.delete_auto_scale()
|
||||
elif self.state == 'present':
|
||||
|
||||
if not self.location:
|
||||
# Set default location
|
||||
resource_group = self.get_resource_group(self.resource_group)
|
||||
self.location = resource_group.location
|
||||
|
||||
resource_id = self.target
|
||||
if isinstance(self.target, dict):
|
||||
resource_id = format_resource_id(val=self.target['name'],
|
||||
subscription_id=self.target.get('subscription_id') or self.subscription_id,
|
||||
namespace=self.target['namespace'],
|
||||
types=self.target['types'],
|
||||
resource_group=self.target.get('resource_group') or self.resource_group)
|
||||
self.target = resource_id
|
||||
resource_name = self.name
|
||||
|
||||
def create_rule_instance(params):
|
||||
rule = params.copy()
|
||||
rule['metric_resource_uri'] = rule.get('metric_resource_uri', self.target)
|
||||
rule['time_grain'] = timedelta(minutes=rule.get('time_grain', 0))
|
||||
rule['time_window'] = timedelta(minutes=rule.get('time_window', 0))
|
||||
rule['cooldown'] = timedelta(minutes=rule.get('cooldown', 0))
|
||||
return ScaleRule(metric_trigger=MetricTrigger(**rule), scale_action=ScaleAction(**rule))
|
||||
|
||||
profiles = [AutoscaleProfile(name=p.get('name'),
|
||||
capacity=ScaleCapacity(minimum=p.get('min_count'),
|
||||
maximum=p.get('max_count'),
|
||||
default=p.get('count')),
|
||||
rules=[create_rule_instance(r) for r in p.get('rules') or []],
|
||||
fixed_date=TimeWindow(time_zone=p.get('fixed_date_timezone'),
|
||||
start=p.get('fixed_date_start'),
|
||||
end=p.get('fixed_date_end')) if p.get('fixed_date_timezone') else None,
|
||||
recurrence=Recurrence(frequency=p.get('recurrence_frequency'),
|
||||
schedule=(RecurrentSchedule(time_zone=p.get('recurrence_timezone'),
|
||||
days=p.get('recurrence_days'),
|
||||
hours=p.get('recurrence_hours'),
|
||||
minutes=p.get('recurrence_mins'))))
|
||||
if p.get('recurrence_frequency') and p['recurrence_frequency'] != 'None' else None)
|
||||
for p in self.profiles or []]
|
||||
|
||||
notifications = [AutoscaleNotification(email=EmailNotification(**n),
|
||||
webhooks=[WebhookNotification(service_uri=w) for w in n.get('webhooks') or []])
|
||||
for n in self.notifications or []]
|
||||
|
||||
if not results:
|
||||
# create new
|
||||
changed = True
|
||||
else:
|
||||
# check changed
|
||||
resource_name = results.autoscale_setting_resource_name or self.name
|
||||
update_tags, tags = self.update_tags(results.tags)
|
||||
if update_tags:
|
||||
changed = True
|
||||
self.tags = tags
|
||||
if self.target != results.target_resource_uri:
|
||||
changed = True
|
||||
if self.enabled != results.enabled:
|
||||
changed = True
|
||||
profile_result_set = set([str(profile_to_dict(p)) for p in results.profiles or []])
|
||||
if profile_result_set != set([str(profile_to_dict(p)) for p in profiles]):
|
||||
changed = True
|
||||
notification_result_set = set([str(notification_to_dict(n)) for n in results.notifications or []])
|
||||
if notification_result_set != set([str(notification_to_dict(n)) for n in notifications]):
|
||||
changed = True
|
||||
if changed:
|
||||
# construct the instance will be send to create_or_update api
|
||||
results = AutoscaleSettingResource(location=self.location,
|
||||
tags=self.tags,
|
||||
profiles=profiles,
|
||||
notifications=notifications,
|
||||
enabled=self.enabled,
|
||||
autoscale_setting_resource_name=resource_name,
|
||||
target_resource_uri=self.target)
|
||||
if not self.check_mode:
|
||||
results = self.create_or_update_auto_scale(results)
|
||||
# results should be the dict of the instance
|
||||
self.results = auto_scale_to_dict(results)
|
||||
self.results['changed'] = changed
|
||||
return self.results
|
||||
|
||||
def get_auto_scale(self):
|
||||
try:
|
||||
return self.monitor_client.autoscale_settings.get(self.resource_group, self.name)
|
||||
except Exception as exc:
|
||||
self.log('Error: failed to get auto scale settings {0} - {1}'.format(self.name, str(exc)))
|
||||
return None
|
||||
|
||||
def create_or_update_auto_scale(self, param):
|
||||
try:
|
||||
return self.monitor_client.autoscale_settings.create_or_update(self.resource_group, self.name, param)
|
||||
except Exception as exc:
|
||||
self.fail("Error creating auto scale settings {0} - {1}".format(self.name, str(exc)))
|
||||
|
||||
def delete_auto_scale(self):
|
||||
self.log('Deleting auto scale settings {0}'.format(self.name))
|
||||
try:
|
||||
return self.monitor_client.autoscale_settings.delete(self.resource_group, self.name)
|
||||
except Exception as exc:
|
||||
self.fail("Error deleting auto scale settings {0} - {1}".format(self.name, str(exc)))
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMAutoScale()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,271 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2017 Yuwei Zhou, <yuwzho@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_autoscale_info
|
||||
version_added: "2.9"
|
||||
short_description: Get Azure Auto Scale Setting facts
|
||||
description:
|
||||
- Get facts of Auto Scale Setting.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
required: True
|
||||
name:
|
||||
description:
|
||||
- The name of the Auto Scale Setting.
|
||||
tags:
|
||||
description:
|
||||
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Yuwei Zhou (@yuwzho)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get instance of Auto Scale Setting
|
||||
azure_rm_autoscale_info:
|
||||
resource_group: myResourceGroup
|
||||
name: auto_scale_name
|
||||
|
||||
- name: List instances of Auto Scale Setting
|
||||
azure_rm_autoscale_info:
|
||||
resource_group: myResourceGroup
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
autoscales:
|
||||
description: List of Azure Scale Settings dicts.
|
||||
returned: always
|
||||
type: list
|
||||
sample: [{
|
||||
"enabled": true,
|
||||
"id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/microsoft.insights/autoscalesettings/scale",
|
||||
"location": "eastus",
|
||||
"name": "scale",
|
||||
"notifications": [
|
||||
{
|
||||
"custom_emails": [
|
||||
"yuwzho@microsoft.com"
|
||||
],
|
||||
"send_to_subscription_administrator": true,
|
||||
"send_to_subscription_co_administrators": false,
|
||||
"webhooks": []
|
||||
}
|
||||
],
|
||||
"profiles": [
|
||||
{
|
||||
"count": "1",
|
||||
"max_count": "1",
|
||||
"min_count": "1",
|
||||
"name": "Auto created scale condition 0",
|
||||
"recurrence_days": [
|
||||
"Monday"
|
||||
],
|
||||
"recurrence_frequency": "Week",
|
||||
"recurrence_hours": [
|
||||
"6"
|
||||
],
|
||||
"recurrence_mins": [
|
||||
"0"
|
||||
],
|
||||
"recurrence_timezone": "China Standard Time",
|
||||
"rules": [
|
||||
{
|
||||
"cooldown": 5.0,
|
||||
"direction": "Increase",
|
||||
"metric_name": "Percentage CPU",
|
||||
"metric_resource_uri": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsof
|
||||
t.Compute/virtualMachineScaleSets/myVmss",
|
||||
"operator": "GreaterThan",
|
||||
"statistic": "Average",
|
||||
"threshold": 70.0,
|
||||
"time_aggregation": "Average",
|
||||
"time_grain": 1.0,
|
||||
"time_window": 10.0,
|
||||
"type": "ChangeCount",
|
||||
"value": "1"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"target": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachineScale
|
||||
Sets/myVmss"
|
||||
}]
|
||||
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
# duplicated in azure_rm_autoscale
|
||||
def timedelta_to_minutes(time):
|
||||
if not time:
|
||||
return 0
|
||||
return time.days * 1440 + time.seconds / 60.0 + time.microseconds / 60000000.0
|
||||
|
||||
|
||||
def get_enum_value(item):
|
||||
if 'value' in dir(item):
|
||||
return to_native(item.value)
|
||||
return to_native(item)
|
||||
|
||||
|
||||
def auto_scale_to_dict(instance):
|
||||
if not instance:
|
||||
return dict()
|
||||
return dict(
|
||||
id=to_native(instance.id or ''),
|
||||
name=to_native(instance.name),
|
||||
location=to_native(instance.location),
|
||||
profiles=[profile_to_dict(p) for p in instance.profiles or []],
|
||||
notifications=[notification_to_dict(n) for n in instance.notifications or []],
|
||||
enabled=instance.enabled,
|
||||
target=to_native(instance.target_resource_uri),
|
||||
tags=instance.tags
|
||||
)
|
||||
|
||||
|
||||
def rule_to_dict(rule):
|
||||
if not rule:
|
||||
return dict()
|
||||
result = dict(metric_name=to_native(rule.metric_trigger.metric_name),
|
||||
metric_resource_uri=to_native(rule.metric_trigger.metric_resource_uri),
|
||||
time_grain=timedelta_to_minutes(rule.metric_trigger.time_grain),
|
||||
statistic=get_enum_value(rule.metric_trigger.statistic),
|
||||
time_window=timedelta_to_minutes(rule.metric_trigger.time_window),
|
||||
time_aggregation=get_enum_value(rule.metric_trigger.time_aggregation),
|
||||
operator=get_enum_value(rule.metric_trigger.operator),
|
||||
threshold=float(rule.metric_trigger.threshold))
|
||||
if rule.scale_action and to_native(rule.scale_action.direction) != 'None':
|
||||
result['direction'] = get_enum_value(rule.scale_action.direction)
|
||||
result['type'] = get_enum_value(rule.scale_action.type)
|
||||
result['value'] = to_native(rule.scale_action.value)
|
||||
result['cooldown'] = timedelta_to_minutes(rule.scale_action.cooldown)
|
||||
return result
|
||||
|
||||
|
||||
def profile_to_dict(profile):
|
||||
if not profile:
|
||||
return dict()
|
||||
result = dict(name=to_native(profile.name),
|
||||
count=to_native(profile.capacity.default),
|
||||
max_count=to_native(profile.capacity.maximum),
|
||||
min_count=to_native(profile.capacity.minimum))
|
||||
|
||||
if profile.rules:
|
||||
result['rules'] = [rule_to_dict(r) for r in profile.rules]
|
||||
if profile.fixed_date:
|
||||
result['fixed_date_timezone'] = profile.fixed_date.time_zone
|
||||
result['fixed_date_start'] = profile.fixed_date.start
|
||||
result['fixed_date_end'] = profile.fixed_date.end
|
||||
if profile.recurrence:
|
||||
if get_enum_value(profile.recurrence.frequency) != 'None':
|
||||
result['recurrence_frequency'] = get_enum_value(profile.recurrence.frequency)
|
||||
if profile.recurrence.schedule:
|
||||
result['recurrence_timezone'] = to_native(str(profile.recurrence.schedule.time_zone))
|
||||
result['recurrence_days'] = [to_native(r) for r in profile.recurrence.schedule.days]
|
||||
result['recurrence_hours'] = [to_native(r) for r in profile.recurrence.schedule.hours]
|
||||
result['recurrence_mins'] = [to_native(r) for r in profile.recurrence.schedule.minutes]
|
||||
return result
|
||||
|
||||
|
||||
def notification_to_dict(notification):
|
||||
if not notification:
|
||||
return dict()
|
||||
return dict(send_to_subscription_administrator=notification.email.send_to_subscription_administrator if notification.email else False,
|
||||
send_to_subscription_co_administrators=notification.email.send_to_subscription_co_administrators if notification.email else False,
|
||||
custom_emails=[to_native(e) for e in notification.email.custom_emails or []],
|
||||
webhooks=[to_native(w.service_url) for w in notification.webhooks or []])
|
||||
|
||||
|
||||
class AzureRMAutoScaleInfo(AzureRMModuleBase):
|
||||
def __init__(self):
|
||||
# define user inputs into argument
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str'
|
||||
),
|
||||
tags=dict(
|
||||
type='list'
|
||||
)
|
||||
)
|
||||
# store the results of the module operation
|
||||
self.results = dict()
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.tags = None
|
||||
|
||||
super(AzureRMAutoScaleInfo, self).__init__(self.module_arg_spec, supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
is_old_facts = self.module._name == 'azure_rm_autoscale_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_autoscale_facts' module has been renamed to 'azure_rm_autoscale_info'", version='2.13')
|
||||
|
||||
for key in list(self.module_arg_spec):
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
if self.resource_group and self.name:
|
||||
self.results['autoscales'] = self.get()
|
||||
elif self.resource_group:
|
||||
self.results['autoscales'] = self.list_by_resource_group()
|
||||
return self.results
|
||||
|
||||
def get(self):
|
||||
result = []
|
||||
try:
|
||||
instance = self.monitor_client.autoscale_settings.get(self.resource_group, self.name)
|
||||
result = [auto_scale_to_dict(instance)]
|
||||
except Exception as ex:
|
||||
self.log('Could not get facts for autoscale {0} - {1}.'.format(self.name, str(ex)))
|
||||
return result
|
||||
|
||||
def list_by_resource_group(self):
|
||||
results = []
|
||||
try:
|
||||
response = self.monitor_client.autoscale_settings.list_by_resource_group(self.resource_group)
|
||||
results = [auto_scale_to_dict(item) for item in response if self.has_tags(item.tags, self.tags)]
|
||||
except Exception as ex:
|
||||
self.log('Could not get facts for autoscale {0} - {1}.'.format(self.name, str(ex)))
|
||||
return results
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMAutoScaleInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,346 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2017 Julien Stroheker, <juliens@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_availabilityset
|
||||
|
||||
version_added: "2.4"
|
||||
|
||||
short_description: Manage Azure Availability Set
|
||||
|
||||
description:
|
||||
- Create, update and delete Azure Availability Set.
|
||||
- An availability set cannot be updated, you will have to recreate one instead.
|
||||
- The only update operation will be for the tags.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- Name of a resource group where the availability set exists or will be created.
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
- Name of the availability set.
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the availability set.
|
||||
- Use C(present) to create or update a availability set and C(absent) to delete a availability set.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
location:
|
||||
description:
|
||||
- Valid Azure location. Defaults to location of the resource group.
|
||||
platform_update_domain_count:
|
||||
description:
|
||||
- Update domains indicate groups of virtual machines and underlying physical hardware that can be rebooted at the same time.
|
||||
type: int
|
||||
default: 5
|
||||
platform_fault_domain_count:
|
||||
description:
|
||||
- Fault domains define the group of virtual machines that share a common power source and network switch.
|
||||
- Should be between C(1) and C(3).
|
||||
type: int
|
||||
default: 3
|
||||
sku:
|
||||
description:
|
||||
- Define if the availability set supports managed disks.
|
||||
default: Classic
|
||||
choices:
|
||||
- Classic
|
||||
- Aligned
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Julien Stroheker (@julienstroheker)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create an availability set with default options
|
||||
azure_rm_availabilityset:
|
||||
name: myAvailabilitySet
|
||||
location: eastus
|
||||
resource_group: myResourceGroup
|
||||
|
||||
- name: Create an availability set with advanced options
|
||||
azure_rm_availabilityset:
|
||||
name: myAvailabilitySet
|
||||
location: eastus
|
||||
resource_group: myResourceGroup
|
||||
platform_update_domain_count: 5
|
||||
platform_fault_domain_count: 3
|
||||
sku: Aligned
|
||||
|
||||
- name: Delete an availability set
|
||||
azure_rm_availabilityset:
|
||||
name: myAvailabilitySet
|
||||
location: eastus
|
||||
resource_group: myResourceGroup
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
state:
|
||||
description: Current state of the availability set.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- Resource ID.
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx/resourceGroups/v-xisuRG/providers/Microsoft.Compute/availabilitySets/myavailabilityset2"
|
||||
location:
|
||||
description:
|
||||
- Location where the resource lives.
|
||||
type: str
|
||||
sample: eastus
|
||||
name:
|
||||
description:
|
||||
- Resource name.
|
||||
type: str
|
||||
sample: myavailabilityset2
|
||||
platform_fault_domain_count:
|
||||
description:
|
||||
- Fault domains values.
|
||||
type: int
|
||||
sample: 2
|
||||
platform_update_domain_count:
|
||||
description:
|
||||
- Update domains values.
|
||||
type: int
|
||||
sample: 5
|
||||
sku:
|
||||
description:
|
||||
- The availability set supports managed disks.
|
||||
type: str
|
||||
sample: Aligned
|
||||
tags:
|
||||
description:
|
||||
- Resource tags.
|
||||
type: dict
|
||||
sample: {env: sandbox}
|
||||
|
||||
changed:
|
||||
description: Whether or not the resource has changed
|
||||
returned: always
|
||||
type: bool
|
||||
sample: true
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
def availability_set_to_dict(avaset):
|
||||
'''
|
||||
Serializing the availability set from the API to Dict
|
||||
:return: dict
|
||||
'''
|
||||
return dict(
|
||||
id=avaset.id,
|
||||
name=avaset.name,
|
||||
location=avaset.location,
|
||||
platform_update_domain_count=avaset.platform_update_domain_count,
|
||||
platform_fault_domain_count=avaset.platform_fault_domain_count,
|
||||
tags=avaset.tags,
|
||||
sku=avaset.sku.name
|
||||
)
|
||||
|
||||
|
||||
class AzureRMAvailabilitySet(AzureRMModuleBase):
|
||||
"""Configuration class for an Azure RM availability set resource"""
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
),
|
||||
location=dict(
|
||||
type='str'
|
||||
),
|
||||
platform_update_domain_count=dict(
|
||||
type='int',
|
||||
default=5
|
||||
),
|
||||
platform_fault_domain_count=dict(
|
||||
type='int',
|
||||
default=3
|
||||
),
|
||||
sku=dict(
|
||||
type='str',
|
||||
default='Classic',
|
||||
choices=['Classic', 'Aligned']
|
||||
)
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.location = None
|
||||
self.tags = None
|
||||
self.platform_update_domain_count = None
|
||||
self.platform_fault_domain_count = None
|
||||
self.sku = None
|
||||
self.state = None
|
||||
self.warning = False
|
||||
|
||||
self.results = dict(changed=False, state=dict())
|
||||
|
||||
super(AzureRMAvailabilitySet, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=True)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
"""Main module execution method"""
|
||||
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
resource_group = None
|
||||
response = None
|
||||
to_be_updated = False
|
||||
|
||||
resource_group = self.get_resource_group(self.resource_group)
|
||||
if not self.location:
|
||||
self.location = resource_group.location
|
||||
|
||||
# Check if the AS already present in the RG
|
||||
if self.state == 'present':
|
||||
response = self.get_availabilityset()
|
||||
self.results['state'] = response
|
||||
|
||||
if not response:
|
||||
to_be_updated = True
|
||||
else:
|
||||
update_tags, response['tags'] = self.update_tags(response['tags'])
|
||||
|
||||
if update_tags:
|
||||
self.log("Tags has to be updated")
|
||||
to_be_updated = True
|
||||
|
||||
if response['platform_update_domain_count'] != self.platform_update_domain_count:
|
||||
self.faildeploy('platform_update_domain_count')
|
||||
|
||||
if response['platform_fault_domain_count'] != self.platform_fault_domain_count:
|
||||
self.faildeploy('platform_fault_domain_count')
|
||||
|
||||
if response['sku'] != self.sku:
|
||||
self.faildeploy('sku')
|
||||
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
if to_be_updated:
|
||||
self.results['state'] = self.create_or_update_availabilityset()
|
||||
self.results['changed'] = True
|
||||
|
||||
elif self.state == 'absent':
|
||||
self.delete_availabilityset()
|
||||
self.results['changed'] = True
|
||||
|
||||
return self.results
|
||||
|
||||
def faildeploy(self, param):
|
||||
'''
|
||||
Helper method to push fail message in the console.
|
||||
Useful to notify that the users cannot change some values in a Availability Set
|
||||
|
||||
:param: variable's name impacted
|
||||
:return: void
|
||||
'''
|
||||
self.fail("You tried to change {0} but is was unsuccessful. An Availability Set is immutable, except tags".format(str(param)))
|
||||
|
||||
def create_or_update_availabilityset(self):
|
||||
'''
|
||||
Method calling the Azure SDK to create or update the AS.
|
||||
:return: void
|
||||
'''
|
||||
self.log("Creating availabilityset {0}".format(self.name))
|
||||
try:
|
||||
params_sku = self.compute_models.Sku(
|
||||
name=self.sku
|
||||
)
|
||||
params = self.compute_models.AvailabilitySet(
|
||||
location=self.location,
|
||||
tags=self.tags,
|
||||
platform_update_domain_count=self.platform_update_domain_count,
|
||||
platform_fault_domain_count=self.platform_fault_domain_count,
|
||||
sku=params_sku
|
||||
)
|
||||
response = self.compute_client.availability_sets.create_or_update(self.resource_group, self.name, params)
|
||||
except CloudError as e:
|
||||
self.log('Error attempting to create the availability set.')
|
||||
self.fail("Error creating the availability set: {0}".format(str(e)))
|
||||
|
||||
return availability_set_to_dict(response)
|
||||
|
||||
def delete_availabilityset(self):
|
||||
'''
|
||||
Method calling the Azure SDK to delete the AS.
|
||||
:return: void
|
||||
'''
|
||||
self.log("Deleting availabilityset {0}".format(self.name))
|
||||
try:
|
||||
response = self.compute_client.availability_sets.delete(self.resource_group, self.name)
|
||||
except CloudError as e:
|
||||
self.log('Error attempting to delete the availability set.')
|
||||
self.fail("Error deleting the availability set: {0}".format(str(e)))
|
||||
|
||||
return True
|
||||
|
||||
def get_availabilityset(self):
|
||||
'''
|
||||
Method calling the Azure SDK to get an AS.
|
||||
:return: void
|
||||
'''
|
||||
self.log("Checking if the availabilityset {0} is present".format(self.name))
|
||||
found = False
|
||||
try:
|
||||
response = self.compute_client.availability_sets.get(self.resource_group, self.name)
|
||||
found = True
|
||||
except CloudError as e:
|
||||
self.log('Did not find the Availability set.')
|
||||
if found is True:
|
||||
return availability_set_to_dict(response)
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution"""
|
||||
AzureRMAvailabilitySet()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,216 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2016, Julien Stroheker <juliens@microsoft.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_availabilityset_info
|
||||
|
||||
version_added: "2.9"
|
||||
|
||||
short_description: Get Azure Availability Set facts
|
||||
|
||||
description:
|
||||
- Get facts for a specific availability set or all availability sets.
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Limit results to a specific availability set.
|
||||
resource_group:
|
||||
description:
|
||||
- The resource group to search for the desired availability set.
|
||||
tags:
|
||||
description:
|
||||
- List of tags to be matched.
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Julien Stroheker (@julienstroheker)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get facts for one availability set
|
||||
azure_rm_availabilityset_info:
|
||||
name: Testing
|
||||
resource_group: myResourceGroup
|
||||
|
||||
- name: Get facts for all availability sets in a specific resource group
|
||||
azure_rm_availabilityset_info:
|
||||
resource_group: myResourceGroup
|
||||
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
azure_availabilityset:
|
||||
description: List of availability sets dicts.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
location:
|
||||
description:
|
||||
- Location where the resource lives.
|
||||
type: str
|
||||
sample: eastus2
|
||||
name:
|
||||
description:
|
||||
- Resource name.
|
||||
type: str
|
||||
sample: myAvailabilitySet
|
||||
properties:
|
||||
description:
|
||||
- The properties of the resource.
|
||||
type: dict
|
||||
contains:
|
||||
platformFaultDomainCount:
|
||||
description:
|
||||
- Fault Domain count.
|
||||
type: int
|
||||
sample: 3
|
||||
platformUpdateDomainCount:
|
||||
description:
|
||||
- Update Domain count.
|
||||
type: int
|
||||
sample: 2
|
||||
virtualMachines:
|
||||
description:
|
||||
- A list of references to all virtualmachines in the availability set.
|
||||
type: list
|
||||
sample: []
|
||||
sku:
|
||||
description:
|
||||
- Location where the resource lives.
|
||||
type: str
|
||||
sample: Aligned
|
||||
type:
|
||||
description:
|
||||
- Resource type.
|
||||
type: str
|
||||
sample: "Microsoft.Compute/availabilitySets"
|
||||
tags:
|
||||
description:
|
||||
- Resource tags.
|
||||
type: dict
|
||||
sample: { env: sandbox }
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except Exception:
|
||||
# handled in azure_rm_common
|
||||
pass
|
||||
|
||||
AZURE_OBJECT_CLASS = 'AvailabilitySet'
|
||||
|
||||
|
||||
class AzureRMAvailabilitySetInfo(AzureRMModuleBase):
|
||||
"""Utility class to get availability set facts"""
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.module_args = dict(
|
||||
name=dict(type='str'),
|
||||
resource_group=dict(type='str'),
|
||||
tags=dict(type='list')
|
||||
)
|
||||
|
||||
self.results = dict(
|
||||
changed=False,
|
||||
ansible_info=dict(
|
||||
azure_availabilitysets=[]
|
||||
)
|
||||
)
|
||||
|
||||
self.name = None
|
||||
self.resource_group = None
|
||||
self.tags = None
|
||||
|
||||
super(AzureRMAvailabilitySetInfo, self).__init__(
|
||||
derived_arg_spec=self.module_args,
|
||||
supports_tags=False,
|
||||
facts_module=True
|
||||
)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
is_old_facts = self.module._name == 'azure_rm_availabilityset_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_availabilityset_facts' module has been renamed to 'azure_rm_availabilityset_info'", version='2.13')
|
||||
|
||||
for key in self.module_args:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
if self.name and not self.resource_group:
|
||||
self.fail("Parameter error: resource group required when filtering by name.")
|
||||
if self.name:
|
||||
self.results['ansible_info']['azure_availabilitysets'] = self.get_item()
|
||||
else:
|
||||
self.results['ansible_info']['azure_availabilitysets'] = self.list_items()
|
||||
|
||||
return self.results
|
||||
|
||||
def get_item(self):
|
||||
"""Get a single availability set"""
|
||||
|
||||
self.log('Get properties for {0}'.format(self.name))
|
||||
|
||||
item = None
|
||||
result = []
|
||||
|
||||
try:
|
||||
item = self.compute_client.availability_sets.get(self.resource_group, self.name)
|
||||
except CloudError:
|
||||
pass
|
||||
|
||||
if item and self.has_tags(item.tags, self.tags):
|
||||
avase = self.serialize_obj(item, AZURE_OBJECT_CLASS)
|
||||
avase['name'] = item.name
|
||||
avase['type'] = item.type
|
||||
avase['sku'] = item.sku.name
|
||||
result = [avase]
|
||||
|
||||
return result
|
||||
|
||||
def list_items(self):
|
||||
"""Get all availability sets"""
|
||||
|
||||
self.log('List all availability sets')
|
||||
|
||||
try:
|
||||
response = self.compute_client.availability_sets.list(self.resource_group)
|
||||
except CloudError as exc:
|
||||
self.fail('Failed to list all items - {0}'.format(str(exc)))
|
||||
|
||||
results = []
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
avase = self.serialize_obj(item, AZURE_OBJECT_CLASS)
|
||||
avase['name'] = item.name
|
||||
avase['type'] = item.type
|
||||
avase['sku'] = item.sku.name
|
||||
results.append(avase)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def main():
|
||||
"""Main module execution code path"""
|
||||
|
||||
AzureRMAvailabilitySetInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,729 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Zim Kalinowski, (@zikalino), Jurijs Fadejevs (@needgithubid)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_azurefirewall
|
||||
version_added: '2.9'
|
||||
short_description: Manage Azure Firewall instance
|
||||
description:
|
||||
- Create, update and delete instance of Azure Firewall.
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
required: true
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- The name of the Azure Firewall.
|
||||
required: true
|
||||
type: str
|
||||
location:
|
||||
description:
|
||||
- Resource location.
|
||||
type: str
|
||||
application_rule_collections:
|
||||
description:
|
||||
- Collection of application rule collections used by Azure Firewall.
|
||||
type: list
|
||||
suboptions:
|
||||
priority:
|
||||
description:
|
||||
- Priority of the application rule collection resource.
|
||||
type: int
|
||||
action:
|
||||
description:
|
||||
- The action type of a rule collection.
|
||||
choices:
|
||||
- allow
|
||||
- deny
|
||||
type: str
|
||||
rules:
|
||||
description:
|
||||
- Collection of rules used by a application rule collection.
|
||||
type: list
|
||||
suboptions:
|
||||
name:
|
||||
description:
|
||||
- Name of the application rule.
|
||||
type: str
|
||||
description:
|
||||
description:
|
||||
- Description of the rule.
|
||||
type: str
|
||||
source_addresses:
|
||||
description:
|
||||
- List of source IP addresses for this rule.
|
||||
type: list
|
||||
protocols:
|
||||
description:
|
||||
- Array of ApplicationRuleProtocols.
|
||||
type: list
|
||||
target_fqdns:
|
||||
description:
|
||||
- List of FQDNs for this rule.
|
||||
type: list
|
||||
fqdn_tags:
|
||||
description:
|
||||
- List of FQDN Tags for this rule.
|
||||
type: list
|
||||
name:
|
||||
description:
|
||||
- Gets name of the resource that is unique within a resource group.
|
||||
- This name can be used to access the resource.
|
||||
type: str
|
||||
nat_rule_collections:
|
||||
description:
|
||||
- Collection of NAT rule collections used by Azure Firewall.
|
||||
type: list
|
||||
suboptions:
|
||||
priority:
|
||||
description:
|
||||
- Priority of the NAT rule collection resource.
|
||||
type: int
|
||||
action:
|
||||
description:
|
||||
- The action type of a NAT rule collection
|
||||
choices:
|
||||
- snat
|
||||
- dnat
|
||||
type: str
|
||||
rules:
|
||||
description:
|
||||
- Collection of rules used by a NAT rule collection.
|
||||
type: list
|
||||
suboptions:
|
||||
name:
|
||||
description:
|
||||
- Name of the NAT rule.
|
||||
type: str
|
||||
description:
|
||||
description:
|
||||
- Description of the rule.
|
||||
type: str
|
||||
source_addresses:
|
||||
description:
|
||||
- List of source IP addresses for this rule.
|
||||
type: list
|
||||
destination_addresses:
|
||||
description:
|
||||
- List of destination IP addresses for this rule.
|
||||
type: list
|
||||
destination_ports:
|
||||
description:
|
||||
- List of destination ports.
|
||||
type: list
|
||||
protocols:
|
||||
description:
|
||||
- Array of AzureFirewallNetworkRuleProtocols applicable to this NAT rule.
|
||||
type: list
|
||||
translated_address:
|
||||
description:
|
||||
- The translated address for this NAT rule.
|
||||
type: str
|
||||
translated_port:
|
||||
description:
|
||||
- The translated port for this NAT rule.
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- Gets name of the resource that is unique within a resource group.
|
||||
- This name can be used to access the resource.
|
||||
type: str
|
||||
network_rule_collections:
|
||||
description:
|
||||
- Collection of network rule collections used by Azure Firewall.
|
||||
type: list
|
||||
suboptions:
|
||||
priority:
|
||||
description:
|
||||
- Priority of the network rule collection resource.
|
||||
type: int
|
||||
action:
|
||||
description:
|
||||
- The action type of a rule collection.
|
||||
type: str
|
||||
choices:
|
||||
- allow
|
||||
- deny
|
||||
rules:
|
||||
description:
|
||||
- Collection of rules used by a network rule collection.
|
||||
type: list
|
||||
suboptions:
|
||||
name:
|
||||
description:
|
||||
- Name of the network rule.
|
||||
type: str
|
||||
description:
|
||||
description:
|
||||
- Description of the rule.
|
||||
type: str
|
||||
protocols:
|
||||
description:
|
||||
- Array of AzureFirewallNetworkRuleProtocols.
|
||||
type: list
|
||||
source_addresses:
|
||||
description:
|
||||
- List of source IP addresses for this rule.
|
||||
type: list
|
||||
destination_addresses:
|
||||
description:
|
||||
- List of destination IP addresses.
|
||||
type: list
|
||||
destination_ports:
|
||||
description:
|
||||
- List of destination ports.
|
||||
type: list
|
||||
name:
|
||||
description:
|
||||
- Gets name of the resource that is unique within a resource group.
|
||||
- This name can be used to access the resource.
|
||||
type: str
|
||||
ip_configurations:
|
||||
description:
|
||||
- IP configuration of the Azure Firewall resource.
|
||||
type: list
|
||||
suboptions:
|
||||
subnet:
|
||||
description:
|
||||
- Existing subnet.
|
||||
- It can be a string containing subnet resource ID.
|
||||
- It can be a dictionary containing I(name), I(virtual_network_name) and optionally I(resource_group) .
|
||||
type: raw
|
||||
public_ip_address:
|
||||
description:
|
||||
- Existing public IP address.
|
||||
- It can be a string containing resource ID.
|
||||
- It can be a string containing a name in current resource group.
|
||||
- It can be a dictionary containing I(name) and optionally I(resource_group).
|
||||
type: raw
|
||||
name:
|
||||
description:
|
||||
- Name of the resource that is unique within a resource group.
|
||||
- This name can be used to access the resource.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the AzureFirewall.
|
||||
- Use C(present) to create or update an AzureFirewall and C(absent) to delete it.
|
||||
default: present
|
||||
type: str
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
- Jurijs Fadejevs (@needgithubid)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create Azure Firewall
|
||||
azure_rm_azurefirewall:
|
||||
resource_group: myResourceGroup
|
||||
name: myAzureFirewall
|
||||
tags:
|
||||
key1: value1
|
||||
application_rule_collections:
|
||||
- priority: 110
|
||||
action:
|
||||
type: deny
|
||||
rules:
|
||||
- name: rule1
|
||||
description: Deny inbound rule
|
||||
source_addresses:
|
||||
- 216.58.216.164
|
||||
- 10.0.0.0/24
|
||||
protocols:
|
||||
- type: https
|
||||
port: '443'
|
||||
target_fqdns:
|
||||
- www.test.com
|
||||
name: apprulecoll
|
||||
nat_rule_collections:
|
||||
- priority: 112
|
||||
action:
|
||||
type: dnat
|
||||
rules:
|
||||
- name: DNAT-HTTPS-traffic
|
||||
description: D-NAT all outbound web traffic for inspection
|
||||
source_addresses:
|
||||
- '*'
|
||||
destination_addresses:
|
||||
- 1.2.3.4
|
||||
destination_ports:
|
||||
- '443'
|
||||
protocols:
|
||||
- tcp
|
||||
translated_address: 1.2.3.5
|
||||
translated_port: '8443'
|
||||
name: natrulecoll
|
||||
network_rule_collections:
|
||||
- priority: 112
|
||||
action:
|
||||
type: deny
|
||||
rules:
|
||||
- name: L4-traffic
|
||||
description: Block traffic based on source IPs and ports
|
||||
protocols:
|
||||
- tcp
|
||||
source_addresses:
|
||||
- 192.168.1.1-192.168.1.12
|
||||
- 10.1.4.12-10.1.4.255
|
||||
destination_addresses:
|
||||
- '*'
|
||||
destination_ports:
|
||||
- 443-444
|
||||
- '8443'
|
||||
name: netrulecoll
|
||||
ip_configurations:
|
||||
- subnet: >-
|
||||
/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup
|
||||
/providers/Microsoft.Network/virtualNetworks/myVirtualNetwork
|
||||
/subnets/AzureFirewallSubnet
|
||||
public_ip_address: >-
|
||||
/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup
|
||||
/providers/Microsoft.Network/publicIPAddresses/
|
||||
myPublicIpAddress
|
||||
name: azureFirewallIpConfiguration
|
||||
- name: Delete Azure Firewall
|
||||
azure_rm_azurefirewall:
|
||||
resource_group: myResourceGroup
|
||||
name: myAzureFirewall
|
||||
state: absent
|
||||
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- Resource ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/azureFirewalls/myAzureFirewall
|
||||
'''
|
||||
|
||||
import time
|
||||
import json
|
||||
import re
|
||||
from ansible.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
|
||||
from ansible.module_utils.azure_rm_common_rest import GenericRestClient
|
||||
from copy import deepcopy
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class Actions:
|
||||
NoAction, Create, Update, Delete = range(4)
|
||||
|
||||
|
||||
class AzureRMAzureFirewalls(AzureRMModuleBaseExt):
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
disposition='resource_group_name',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str',
|
||||
disposition='azure_firewall_name',
|
||||
required=True
|
||||
),
|
||||
location=dict(
|
||||
type='str',
|
||||
updatable=False,
|
||||
disposition='/',
|
||||
comparison='location'
|
||||
),
|
||||
application_rule_collections=dict(
|
||||
type='list',
|
||||
disposition='/properties/applicationRuleCollections',
|
||||
options=dict(
|
||||
priority=dict(
|
||||
type='int',
|
||||
disposition='properties/*'
|
||||
),
|
||||
action=dict(
|
||||
type='str',
|
||||
choices=['allow',
|
||||
'deny'],
|
||||
disposition='properties/action/type',
|
||||
pattern='camelize'
|
||||
),
|
||||
rules=dict(
|
||||
type='list',
|
||||
disposition='properties/*',
|
||||
options=dict(
|
||||
name=dict(
|
||||
type='str'
|
||||
),
|
||||
description=dict(
|
||||
type='str'
|
||||
),
|
||||
source_addresses=dict(
|
||||
type='list',
|
||||
disposition='sourceAddresses'
|
||||
),
|
||||
protocols=dict(
|
||||
type='list',
|
||||
options=dict(
|
||||
type=dict(
|
||||
type='str',
|
||||
disposition='protocolType'
|
||||
),
|
||||
port=dict(
|
||||
type='str'
|
||||
)
|
||||
)
|
||||
),
|
||||
target_fqdns=dict(
|
||||
type='list',
|
||||
disposition='targetFqdns'
|
||||
),
|
||||
fqdn_tags=dict(
|
||||
type='list',
|
||||
disposition='fqdnTags'
|
||||
)
|
||||
)
|
||||
),
|
||||
name=dict(
|
||||
type='str'
|
||||
)
|
||||
)
|
||||
),
|
||||
nat_rule_collections=dict(
|
||||
type='list',
|
||||
disposition='/properties/natRuleCollections',
|
||||
options=dict(
|
||||
priority=dict(
|
||||
type='int',
|
||||
disposition='properties/*'
|
||||
),
|
||||
action=dict(
|
||||
type='str',
|
||||
disposition='properties/action/type',
|
||||
choices=['snat',
|
||||
'dnat'],
|
||||
pattern='camelize'
|
||||
),
|
||||
rules=dict(
|
||||
type='list',
|
||||
disposition='properties/*',
|
||||
options=dict(
|
||||
name=dict(
|
||||
type='str'
|
||||
),
|
||||
description=dict(
|
||||
type='str'
|
||||
),
|
||||
source_addresses=dict(
|
||||
type='list',
|
||||
disposition='sourceAddresses'
|
||||
),
|
||||
destination_addresses=dict(
|
||||
type='list',
|
||||
disposition='destinationAddresses'
|
||||
),
|
||||
destination_ports=dict(
|
||||
type='list',
|
||||
disposition='destinationPorts'
|
||||
),
|
||||
protocols=dict(
|
||||
type='list'
|
||||
),
|
||||
translated_address=dict(
|
||||
type='str',
|
||||
disposition='translatedAddress'
|
||||
),
|
||||
translated_port=dict(
|
||||
type='str',
|
||||
disposition='translatedPort'
|
||||
)
|
||||
)
|
||||
),
|
||||
name=dict(
|
||||
type='str'
|
||||
)
|
||||
)
|
||||
),
|
||||
network_rule_collections=dict(
|
||||
type='list',
|
||||
disposition='/properties/networkRuleCollections',
|
||||
options=dict(
|
||||
priority=dict(
|
||||
type='int',
|
||||
disposition='properties/*'
|
||||
),
|
||||
action=dict(
|
||||
type='str',
|
||||
choices=['allow',
|
||||
'deny'],
|
||||
disposition='properties/action/type',
|
||||
pattern='camelize'
|
||||
),
|
||||
rules=dict(
|
||||
type='list',
|
||||
disposition='properties/*',
|
||||
options=dict(
|
||||
name=dict(
|
||||
type='str'
|
||||
),
|
||||
description=dict(
|
||||
type='str'
|
||||
),
|
||||
protocols=dict(
|
||||
type='list'
|
||||
),
|
||||
source_addresses=dict(
|
||||
type='list',
|
||||
disposition='sourceAddresses'
|
||||
),
|
||||
destination_addresses=dict(
|
||||
type='list',
|
||||
disposition='destinationAddresses'
|
||||
),
|
||||
destination_ports=dict(
|
||||
type='list',
|
||||
disposition='destinationPorts'
|
||||
)
|
||||
)
|
||||
),
|
||||
name=dict(
|
||||
type='str'
|
||||
)
|
||||
)
|
||||
),
|
||||
ip_configurations=dict(
|
||||
type='list',
|
||||
disposition='/properties/ipConfigurations',
|
||||
options=dict(
|
||||
subnet=dict(
|
||||
type='raw',
|
||||
disposition='properties/subnet/id',
|
||||
pattern=('/subscriptions/{subscription_id}/resourceGroups'
|
||||
'/{resource_group}/providers/Microsoft.Network'
|
||||
'/virtualNetworks/{virtual_network_name}/subnets'
|
||||
'/{name}')
|
||||
),
|
||||
public_ip_address=dict(
|
||||
type='raw',
|
||||
disposition='properties/publicIPAddress/id',
|
||||
pattern=('/subscriptions/{subscription_id}/resourceGroups'
|
||||
'/{resource_group}/providers/Microsoft.Network'
|
||||
'/publicIPAddresses/{name}')
|
||||
),
|
||||
name=dict(
|
||||
type='str'
|
||||
)
|
||||
)
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
)
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.body = {}
|
||||
|
||||
self.results = dict(changed=False)
|
||||
self.mgmt_client = None
|
||||
self.state = None
|
||||
self.url = None
|
||||
self.status_code = [200, 201, 202]
|
||||
self.to_do = Actions.NoAction
|
||||
|
||||
self.body = {}
|
||||
self.query_parameters = {}
|
||||
self.query_parameters['api-version'] = '2018-11-01'
|
||||
self.header_parameters = {}
|
||||
self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
|
||||
|
||||
super(AzureRMAzureFirewalls, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=True)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
for key in list(self.module_arg_spec.keys()):
|
||||
if hasattr(self, key):
|
||||
setattr(self, key, kwargs[key])
|
||||
elif kwargs[key] is not None:
|
||||
self.body[key] = kwargs[key]
|
||||
|
||||
self.inflate_parameters(self.module_arg_spec, self.body, 0)
|
||||
|
||||
old_response = None
|
||||
response = None
|
||||
|
||||
self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
|
||||
resource_group = self.get_resource_group(self.resource_group)
|
||||
|
||||
if 'location' not in self.body:
|
||||
self.body['location'] = resource_group.location
|
||||
|
||||
self.url = ('/subscriptions' +
|
||||
'/' + self.subscription_id +
|
||||
'/resourceGroups' +
|
||||
'/' + self.resource_group +
|
||||
'/providers' +
|
||||
'/Microsoft.Network' +
|
||||
'/azureFirewalls' +
|
||||
'/' + self.name)
|
||||
|
||||
old_response = self.get_resource()
|
||||
|
||||
if not old_response:
|
||||
self.log("AzureFirewall instance doesn't exist")
|
||||
|
||||
if self.state == 'absent':
|
||||
self.log("Old instance didn't exist")
|
||||
else:
|
||||
self.to_do = Actions.Create
|
||||
else:
|
||||
self.log('AzureFirewall instance already exists')
|
||||
|
||||
if self.state == 'absent':
|
||||
self.to_do = Actions.Delete
|
||||
else:
|
||||
modifiers = {}
|
||||
self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
|
||||
self.results['modifiers'] = modifiers
|
||||
self.results['compare'] = []
|
||||
if not self.default_compare(modifiers, self.body, old_response, '', self.results):
|
||||
self.to_do = Actions.Update
|
||||
|
||||
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
|
||||
self.log('Need to Create / Update the AzureFirewall instance')
|
||||
|
||||
if self.check_mode:
|
||||
self.results['changed'] = True
|
||||
return self.results
|
||||
|
||||
response = self.create_update_resource()
|
||||
|
||||
# if not old_response:
|
||||
self.results['changed'] = True
|
||||
# else:
|
||||
# self.results['changed'] = old_response.__ne__(response)
|
||||
self.log('Creation / Update done')
|
||||
elif self.to_do == Actions.Delete:
|
||||
self.log('AzureFirewall instance deleted')
|
||||
self.results['changed'] = True
|
||||
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
self.delete_resource()
|
||||
|
||||
# make sure instance is actually deleted, for some Azure resources, instance is hanging around
|
||||
# for some time after deletion -- this should be really fixed in Azure
|
||||
while self.get_resource():
|
||||
time.sleep(20)
|
||||
else:
|
||||
self.log('AzureFirewall instance unchanged')
|
||||
self.results['changed'] = False
|
||||
response = old_response
|
||||
|
||||
if response:
|
||||
self.results["id"] = response["id"]
|
||||
while response['properties']['provisioningState'] == 'Updating':
|
||||
time.sleep(30)
|
||||
response = self.get_resource()
|
||||
|
||||
return self.results
|
||||
|
||||
def create_update_resource(self):
|
||||
# self.log('Creating / Updating the AzureFirewall instance {0}'.format(self.))
|
||||
|
||||
try:
|
||||
response = self.mgmt_client.query(self.url,
|
||||
'PUT',
|
||||
self.query_parameters,
|
||||
self.header_parameters,
|
||||
self.body,
|
||||
self.status_code,
|
||||
600,
|
||||
30)
|
||||
except CloudError as exc:
|
||||
self.log('Error attempting to create the AzureFirewall instance.')
|
||||
self.fail('Error creating the AzureFirewall instance: {0}'.format(str(exc)))
|
||||
|
||||
try:
|
||||
response = json.loads(response.text)
|
||||
except Exception:
|
||||
response = {'text': response.text}
|
||||
|
||||
return response
|
||||
|
||||
def delete_resource(self):
|
||||
# self.log('Deleting the AzureFirewall instance {0}'.format(self.))
|
||||
try:
|
||||
response = self.mgmt_client.query(self.url,
|
||||
'DELETE',
|
||||
self.query_parameters,
|
||||
self.header_parameters,
|
||||
None,
|
||||
self.status_code,
|
||||
600,
|
||||
30)
|
||||
except CloudError as e:
|
||||
self.log('Error attempting to delete the AzureFirewall instance.')
|
||||
self.fail('Error deleting the AzureFirewall instance: {0}'.format(str(e)))
|
||||
|
||||
return True
|
||||
|
||||
def get_resource(self):
|
||||
# self.log('Checking if the AzureFirewall instance {0} is present'.format(self.))
|
||||
found = False
|
||||
try:
|
||||
response = self.mgmt_client.query(self.url,
|
||||
'GET',
|
||||
self.query_parameters,
|
||||
self.header_parameters,
|
||||
None,
|
||||
self.status_code,
|
||||
600,
|
||||
30)
|
||||
response = json.loads(response.text)
|
||||
found = True
|
||||
self.log("Response : {0}".format(response))
|
||||
# self.log("AzureFirewall instance : {0} found".format(response.name))
|
||||
except CloudError as e:
|
||||
self.log('Did not find the AzureFirewall instance.')
|
||||
if found is True:
|
||||
return response
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMAzureFirewalls()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,275 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Liu Qingyi, (@smile37773)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_azurefirewall_info
|
||||
version_added: '2.9'
|
||||
short_description: Get AzureFirewall info
|
||||
description:
|
||||
- Get info of AzureFirewall.
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- Resource name.
|
||||
type: str
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
author:
|
||||
- Liu Qingyi (@smile37773)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: List all Azure Firewalls for a given subscription
|
||||
azure_rm_azurefirewall_info:
|
||||
- name: List all Azure Firewalls for a given resource group
|
||||
azure_rm_azurefirewall_info:
|
||||
resource_group: myResourceGroup
|
||||
- name: Get Azure Firewall
|
||||
azure_rm_azurefirewall_info:
|
||||
resource_group: myResourceGroup
|
||||
name: myAzureFirewall
|
||||
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
firewalls:
|
||||
description:
|
||||
- A list of dict results where the key is the name of the AzureFirewall and the values are the facts for that AzureFirewall.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- Resource ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/
|
||||
myResourceGroup/providers/Microsoft.Network/azureFirewalls/myAzureFirewall"
|
||||
name:
|
||||
description:
|
||||
- Resource name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "myAzureFirewall"
|
||||
location:
|
||||
description:
|
||||
- Resource location.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "eastus"
|
||||
tags:
|
||||
description:
|
||||
- Resource tags.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: { "tag": "value" }
|
||||
etag:
|
||||
description:
|
||||
- Gets a unique read-only string that changes whenever the resource is updated.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
|
||||
nat_rule_collections:
|
||||
description:
|
||||
- Collection of NAT rule collections used by Azure Firewall.
|
||||
type: list
|
||||
network_rule_collections:
|
||||
description:
|
||||
- Collection of network rule collections used by Azure Firewall.
|
||||
type: list
|
||||
ip_configurations:
|
||||
description:
|
||||
- IP configuration of the Azure Firewall resource.
|
||||
type: list
|
||||
provisioning_state:
|
||||
description:
|
||||
- The current state of the gallery.
|
||||
type: str
|
||||
sample: "Succeeded"
|
||||
|
||||
'''
|
||||
|
||||
import time
|
||||
import json
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
from ansible.module_utils.azure_rm_common_rest import GenericRestClient
|
||||
from copy import deepcopy
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except Exception:
|
||||
# handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMAzureFirewallsInfo(AzureRMModuleBase):
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str'
|
||||
),
|
||||
name=dict(
|
||||
type='str'
|
||||
)
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
|
||||
self.results = dict(changed=False)
|
||||
self.mgmt_client = None
|
||||
self.state = None
|
||||
self.url = None
|
||||
self.status_code = [200]
|
||||
|
||||
self.query_parameters = {}
|
||||
self.query_parameters['api-version'] = '2018-11-01'
|
||||
self.header_parameters = {}
|
||||
self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
|
||||
|
||||
self.mgmt_client = None
|
||||
super(AzureRMAzureFirewallsInfo, self).__init__(self.module_arg_spec, supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
for key in self.module_arg_spec:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
|
||||
if (self.resource_group is not None and self.name is not None):
|
||||
self.results['firewalls'] = self.get()
|
||||
elif (self.resource_group is not None):
|
||||
self.results['firewalls'] = self.list()
|
||||
else:
|
||||
self.results['firewalls'] = self.listall()
|
||||
return self.results
|
||||
|
||||
def get(self):
|
||||
response = None
|
||||
results = {}
|
||||
# prepare url
|
||||
self.url = ('/subscriptions' +
|
||||
'/{{ subscription_id }}' +
|
||||
'/resourceGroups' +
|
||||
'/{{ resource_group }}' +
|
||||
'/providers' +
|
||||
'/Microsoft.Network' +
|
||||
'/azureFirewalls' +
|
||||
'/{{ azure_firewall_name }}')
|
||||
self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
|
||||
self.url = self.url.replace('{{ resource_group }}', self.resource_group)
|
||||
self.url = self.url.replace('{{ azure_firewall_name }}', self.name)
|
||||
|
||||
try:
|
||||
response = self.mgmt_client.query(self.url,
|
||||
'GET',
|
||||
self.query_parameters,
|
||||
self.header_parameters,
|
||||
None,
|
||||
self.status_code,
|
||||
600,
|
||||
30)
|
||||
results = json.loads(response.text)
|
||||
# self.log('Response : {0}'.format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
|
||||
|
||||
return self.format_item(results)
|
||||
|
||||
def list(self):
|
||||
response = None
|
||||
results = {}
|
||||
# prepare url
|
||||
self.url = ('/subscriptions' +
|
||||
'/{{ subscription_id }}' +
|
||||
'/resourceGroups' +
|
||||
'/{{ resource_group }}' +
|
||||
'/providers' +
|
||||
'/Microsoft.Network' +
|
||||
'/azureFirewalls')
|
||||
self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
|
||||
self.url = self.url.replace('{{ resource_group }}', self.resource_group)
|
||||
|
||||
try:
|
||||
response = self.mgmt_client.query(self.url,
|
||||
'GET',
|
||||
self.query_parameters,
|
||||
self.header_parameters,
|
||||
None,
|
||||
self.status_code,
|
||||
600,
|
||||
30)
|
||||
results = json.loads(response.text)
|
||||
# self.log('Response : {0}'.format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
|
||||
|
||||
return [self.format_item(x) for x in results['value']] if results['value'] else []
|
||||
|
||||
def listall(self):
|
||||
response = None
|
||||
results = {}
|
||||
# prepare url
|
||||
self.url = ('/subscriptions' +
|
||||
'/{{ subscription_id }}' +
|
||||
'/providers' +
|
||||
'/Microsoft.Network' +
|
||||
'/azureFirewalls')
|
||||
self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
|
||||
|
||||
try:
|
||||
response = self.mgmt_client.query(self.url,
|
||||
'GET',
|
||||
self.query_parameters,
|
||||
self.header_parameters,
|
||||
None,
|
||||
self.status_code,
|
||||
600,
|
||||
30)
|
||||
results = json.loads(response.text)
|
||||
# self.log('Response : {0}'.format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
|
||||
|
||||
return [self.format_item(x) for x in results['value']] if results['value'] else []
|
||||
|
||||
def format_item(self, item):
|
||||
d = {
|
||||
'id': item['id'],
|
||||
'name': item['name'],
|
||||
'location': item['location'],
|
||||
'etag': item['etag'],
|
||||
'tags': item.get('tags'),
|
||||
'nat_rule_collections': item['properties']['natRuleCollections'],
|
||||
'network_rule_collections': item['properties']['networkRuleCollections'],
|
||||
'ip_configurations': item['properties']['ipConfigurations'],
|
||||
'provisioning_state': item['properties']['provisioningState']
|
||||
}
|
||||
return d
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMAzureFirewallsInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,341 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2019 Junyi Yi (@JunyiYi)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
#
|
||||
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
|
||||
#
|
||||
# ----------------------------------------------------------------------------
|
||||
#
|
||||
# This file is automatically generated by Magic Modules and manual
|
||||
# changes will be clobbered when the file is regenerated.
|
||||
#
|
||||
#
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_batchaccount
|
||||
version_added: "2.9"
|
||||
short_description: Manages a Batch Account on Azure
|
||||
description:
|
||||
- Create, update and delete instance of Azure Batch Account.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group in which to create the Batch Account.
|
||||
required: true
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- The name of the Batch Account.
|
||||
required: true
|
||||
type: str
|
||||
location:
|
||||
description:
|
||||
- Specifies the supported Azure location where the resource exists.
|
||||
type: str
|
||||
auto_storage_account:
|
||||
description:
|
||||
- Existing storage account with which to associate the Batch Account.
|
||||
- It can be the storage account name which is in the same resource group.
|
||||
- It can be the storage account ID. Fox example "/subscriptions/{subscription_id}/resourceGroups/
|
||||
{resource_group}/providers/Microsoft.Storage/storageAccounts/{name}".
|
||||
- It can be a dict which contains I(name) and I(resource_group) of the storage account.
|
||||
key_vault:
|
||||
description:
|
||||
- Existing key vault with which to associate the Batch Account.
|
||||
- It can be the key vault name which is in the same resource group.
|
||||
- It can be the key vault ID. For example "/subscriptions/{subscription_id}/resourceGroups/
|
||||
{resource_group}/providers/Microsoft.KeyVault/vaults/{name}".
|
||||
- It can be a dict which contains I(name) and I(resource_group) of the key vault.
|
||||
pool_allocation_mode:
|
||||
description:
|
||||
- The pool acclocation mode of the Batch Account.
|
||||
default: batch_service
|
||||
choices:
|
||||
- batch_service
|
||||
- user_subscription
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the Batch Account.
|
||||
- Use C(present) to create or update a Batch Account and C(absent) to delete it.
|
||||
default: present
|
||||
type: str
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Junyi Yi (@JunyiYi)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create Batch Account
|
||||
azure_rm_batchaccount:
|
||||
resource_group: MyResGroup
|
||||
name: mybatchaccount
|
||||
location: eastus
|
||||
auto_storage_account:
|
||||
name: mystorageaccountname
|
||||
pool_allocation_mode: batch_service
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- The ID of the Batch account.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Batch/batchAccounts/sampleacct"
|
||||
account_endpoint:
|
||||
description:
|
||||
- The account endpoint used to interact with the Batch service.
|
||||
returned: always
|
||||
type: str
|
||||
sample: sampleacct.westus.batch.azure.com
|
||||
'''
|
||||
|
||||
import time
|
||||
from ansible.module_utils.azure_rm_common import normalize_location_name
|
||||
from ansible.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
|
||||
from ansible.module_utils.common.dict_transformations import _snake_to_camel
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrest.polling import LROPoller
|
||||
from msrestazure.azure_operation import AzureOperationPoller
|
||||
from msrest.serialization import Model
|
||||
from azure.mgmt.batch import BatchManagementClient
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class Actions:
|
||||
NoAction, Create, Update, Delete = range(4)
|
||||
|
||||
|
||||
class AzureRMBatchAccount(AzureRMModuleBaseExt):
|
||||
"""Configuration class for an Azure RM Batch Account resource"""
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
required=True,
|
||||
type='str'
|
||||
),
|
||||
name=dict(
|
||||
required=True,
|
||||
type='str'
|
||||
),
|
||||
location=dict(
|
||||
type='str',
|
||||
updatable=False,
|
||||
disposition='/'
|
||||
),
|
||||
auto_storage_account=dict(
|
||||
type='raw'
|
||||
),
|
||||
key_vault=dict(
|
||||
type='raw',
|
||||
updatable=False,
|
||||
disposition='/'
|
||||
),
|
||||
pool_allocation_mode=dict(
|
||||
default='batch_service',
|
||||
type='str',
|
||||
choices=['batch_service', 'user_subscription'],
|
||||
updatable=False,
|
||||
disposition='/'
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
)
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.batch_account = dict()
|
||||
self.tags = None
|
||||
|
||||
self.results = dict(changed=False)
|
||||
self.mgmt_client = None
|
||||
self.state = None
|
||||
self.to_do = Actions.NoAction
|
||||
|
||||
super(AzureRMBatchAccount, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=True)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
"""Main module execution method"""
|
||||
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
if hasattr(self, key):
|
||||
setattr(self, key, kwargs[key])
|
||||
elif kwargs[key] is not None:
|
||||
self.batch_account[key] = kwargs[key]
|
||||
|
||||
resource_group = self.get_resource_group(self.resource_group)
|
||||
if self.batch_account.get('location') is None:
|
||||
self.batch_account['location'] = resource_group.location
|
||||
if self.batch_account.get('auto_storage_account') is not None:
|
||||
self.batch_account['auto_storage'] = {
|
||||
'storage_account_id': self.normalize_resource_id(
|
||||
self.batch_account.pop('auto_storage_account'),
|
||||
'/subscriptions/{subscription_id}/resourceGroups/{resource_group}/providers/Microsoft.Storage/storageAccounts/{name}')
|
||||
}
|
||||
if self.batch_account.get('key_vault') is not None:
|
||||
id = self.normalize_resource_id(
|
||||
self.batch_account.pop('key_vault'),
|
||||
'/subscriptions/{subscription_id}/resourceGroups/{resource_group}/providers/Microsoft.KeyVault/vaults/{name}')
|
||||
url = 'https://' + id.split('/').pop() + '.vault.azure.net/'
|
||||
self.batch_account['key_vault_reference'] = {
|
||||
'id': id,
|
||||
'url': url
|
||||
}
|
||||
self.batch_account['pool_allocation_mode'] = _snake_to_camel(self.batch_account['pool_allocation_mode'], True)
|
||||
|
||||
response = None
|
||||
|
||||
self.mgmt_client = self.get_mgmt_svc_client(BatchManagementClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
|
||||
old_response = self.get_batchaccount()
|
||||
|
||||
if not old_response:
|
||||
self.log("Batch Account instance doesn't exist")
|
||||
if self.state == 'absent':
|
||||
self.log("Old instance didn't exist")
|
||||
else:
|
||||
self.to_do = Actions.Create
|
||||
else:
|
||||
self.log("Batch Account instance already exists")
|
||||
if self.state == 'absent':
|
||||
self.to_do = Actions.Delete
|
||||
elif self.state == 'present':
|
||||
self.results['old'] = old_response
|
||||
self.results['new'] = self.batch_account
|
||||
if not self.idempotency_check(old_response, self.batch_account):
|
||||
self.to_do = Actions.Update
|
||||
|
||||
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
|
||||
self.log("Need to Create / Update the Batch Account instance")
|
||||
|
||||
self.results['changed'] = True
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
response = self.create_update_batchaccount()
|
||||
|
||||
self.log("Creation / Update done")
|
||||
elif self.to_do == Actions.Delete:
|
||||
self.log("Batch Account instance deleted")
|
||||
self.results['changed'] = True
|
||||
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
self.delete_batchaccount()
|
||||
else:
|
||||
self.log("Batch Account instance unchanged")
|
||||
self.results['changed'] = False
|
||||
response = old_response
|
||||
|
||||
if self.state == 'present':
|
||||
self.results.update({
|
||||
'id': response.get('id', None),
|
||||
'account_endpoint': response.get('account_endpoint', None)
|
||||
})
|
||||
return self.results
|
||||
|
||||
def create_update_batchaccount(self):
|
||||
'''
|
||||
Creates or updates Batch Account with the specified configuration.
|
||||
|
||||
:return: deserialized Batch Account instance state dictionary
|
||||
'''
|
||||
self.log("Creating / Updating the Batch Account instance {0}".format(self.name))
|
||||
|
||||
try:
|
||||
if self.to_do == Actions.Create:
|
||||
response = self.mgmt_client.batch_account.create(resource_group_name=self.resource_group,
|
||||
account_name=self.name,
|
||||
parameters=self.batch_account)
|
||||
else:
|
||||
response = self.mgmt_client.batch_account.update(resource_group_name=self.resource_group,
|
||||
account_name=self.name,
|
||||
tags=self.tags,
|
||||
auto_storage=self.batch_account.get('auto_storage'))
|
||||
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
|
||||
response = self.get_poller_result(response)
|
||||
except CloudError as exc:
|
||||
self.log('Error attempting to create the Batch Account instance.')
|
||||
self.fail("Error creating the Batch Account instance: {0}".format(str(exc)))
|
||||
return response.as_dict()
|
||||
|
||||
def delete_batchaccount(self):
|
||||
'''
|
||||
Deletes specified Batch Account instance in the specified subscription and resource group.
|
||||
|
||||
:return: True
|
||||
'''
|
||||
self.log("Deleting the Batch Account instance {0}".format(self.name))
|
||||
try:
|
||||
response = self.mgmt_client.batch_account.delete(resource_group_name=self.resource_group,
|
||||
account_name=self.name)
|
||||
except CloudError as e:
|
||||
self.log('Error attempting to delete the Batch Account instance.')
|
||||
self.fail("Error deleting the Batch Account instance: {0}".format(str(e)))
|
||||
|
||||
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
|
||||
response = self.get_poller_result(response)
|
||||
return True
|
||||
|
||||
def get_batchaccount(self):
|
||||
'''
|
||||
Gets the properties of the specified Batch Account
|
||||
:return: deserialized Batch Account instance state dictionary
|
||||
'''
|
||||
self.log("Checking if the Batch Account instance {0} is present".format(self.name))
|
||||
found = False
|
||||
try:
|
||||
response = self.mgmt_client.batch_account.get(resource_group_name=self.resource_group,
|
||||
account_name=self.name)
|
||||
found = True
|
||||
self.log("Response : {0}".format(response))
|
||||
self.log("Batch Account instance : {0} found".format(response.name))
|
||||
except CloudError as e:
|
||||
self.log('Did not find the Batch Account instance.')
|
||||
if found is True:
|
||||
return response.as_dict()
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution"""
|
||||
AzureRMBatchAccount()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,666 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2018 Hai Cao, <t-haicao@microsoft.com>, Yunge Zhu <yungez@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_cdnendpoint
|
||||
version_added: "2.8"
|
||||
short_description: Manage a Azure CDN endpoint
|
||||
description:
|
||||
- Create, update, start, stop and delete a Azure CDN endpoint.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- Name of a resource group where the Azure CDN endpoint exists or will be created.
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
- Name of the Azure CDN endpoint.
|
||||
required: true
|
||||
location:
|
||||
description:
|
||||
- Valid azure location. Defaults to location of the resource group.
|
||||
started:
|
||||
description:
|
||||
- Use with I(state=present) to start the endpoint.
|
||||
type: bool
|
||||
purge:
|
||||
description:
|
||||
- Use with I(state=present) to purge the endpoint.
|
||||
type: bool
|
||||
default: false
|
||||
purge_content_paths:
|
||||
description:
|
||||
- Use with I(state=present) and I(purge=true) to specify content paths to be purged.
|
||||
type: list
|
||||
default: ['/']
|
||||
profile_name:
|
||||
description:
|
||||
- Name of the CDN profile where the endpoint attached to.
|
||||
required: true
|
||||
origins:
|
||||
description:
|
||||
- Set of source of the content being delivered via CDN.
|
||||
suboptions:
|
||||
name:
|
||||
description:
|
||||
- Origin name.
|
||||
required: true
|
||||
host_name:
|
||||
description:
|
||||
- The address of the origin.
|
||||
- It can be a domain name, IPv4 address, or IPv6 address.
|
||||
required: true
|
||||
http_port:
|
||||
description:
|
||||
- The value of the HTTP port. Must be between C(1) and C(65535).
|
||||
type: int
|
||||
https_port:
|
||||
description:
|
||||
- The value of the HTTPS port. Must be between C(1) and C(65535).
|
||||
type: int
|
||||
required: true
|
||||
origin_host_header:
|
||||
description:
|
||||
- The host header value sent to the origin with each request.
|
||||
type: str
|
||||
origin_path:
|
||||
description:
|
||||
- A directory path on the origin that CDN can use to retrieve content from.
|
||||
- E.g. contoso.cloudapp.net/originpath.
|
||||
type: str
|
||||
content_types_to_compress:
|
||||
description:
|
||||
- List of content types on which compression applies.
|
||||
- This value should be a valid MIME type.
|
||||
type: list
|
||||
is_compression_enabled:
|
||||
description:
|
||||
- Indicates whether content compression is enabled on CDN.
|
||||
type: bool
|
||||
default: false
|
||||
is_http_allowed:
|
||||
description:
|
||||
- Indicates whether HTTP traffic is allowed on the endpoint.
|
||||
type: bool
|
||||
default: true
|
||||
is_https_allowed:
|
||||
description:
|
||||
- Indicates whether HTTPS traffic is allowed on the endpoint.
|
||||
type: bool
|
||||
default: true
|
||||
query_string_caching_behavior:
|
||||
description:
|
||||
- Defines how CDN caches requests that include query strings.
|
||||
type: str
|
||||
choices:
|
||||
- ignore_query_string
|
||||
- bypass_caching
|
||||
- use_query_string
|
||||
- not_set
|
||||
default: ignore_query_string
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the Azure CDN endpoint. Use C(present) to create or update a Azure CDN endpoint and C(absent) to delete it.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Yunge Zhu (@yungezz)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a Azure CDN endpoint
|
||||
azure_rm_cdnendpoint:
|
||||
resource_group: myResourceGroup
|
||||
profile_name: myProfile
|
||||
name: myEndpoint
|
||||
origins:
|
||||
- name: TestOrig
|
||||
host_name: "www.example.com"
|
||||
tags:
|
||||
testing: testing
|
||||
delete: on-exit
|
||||
foo: bar
|
||||
- name: Delete a Azure CDN endpoint
|
||||
azure_rm_cdnendpoint:
|
||||
resource_group: myResourceGroup
|
||||
profile_name: myProfile
|
||||
name: myEndpoint
|
||||
state: absent
|
||||
'''
|
||||
RETURN = '''
|
||||
state:
|
||||
description: Current state of the Azure CDN endpoint.
|
||||
returned: always
|
||||
type: str
|
||||
id:
|
||||
description:
|
||||
- Id of the CDN endpoint.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/Microsoft.Cdn/profiles/myProfile/endpoints/
|
||||
myEndpoint"
|
||||
host_name:
|
||||
description:
|
||||
- Host name of the CDN endpoint.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "myendpoint.azureedge.net"
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
from ansible.module_utils.common.dict_transformations import _snake_to_camel
|
||||
|
||||
try:
|
||||
from azure.mgmt.cdn.models import Endpoint, DeepCreatedOrigin, EndpointUpdateParameters, QueryStringCachingBehavior, ErrorResponseException
|
||||
from azure.mgmt.cdn import CdnManagementClient
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
def cdnendpoint_to_dict(cdnendpoint):
|
||||
return dict(
|
||||
id=cdnendpoint.id,
|
||||
name=cdnendpoint.name,
|
||||
type=cdnendpoint.type,
|
||||
location=cdnendpoint.location,
|
||||
tags=cdnendpoint.tags,
|
||||
origin_host_header=cdnendpoint.origin_host_header,
|
||||
origin_path=cdnendpoint.origin_path,
|
||||
content_types_to_compress=cdnendpoint.content_types_to_compress,
|
||||
is_compression_enabled=cdnendpoint.is_compression_enabled,
|
||||
is_http_allowed=cdnendpoint.is_http_allowed,
|
||||
is_https_allowed=cdnendpoint.is_https_allowed,
|
||||
query_string_caching_behavior=cdnendpoint.query_string_caching_behavior,
|
||||
optimization_type=cdnendpoint.optimization_type,
|
||||
probe_path=cdnendpoint.probe_path,
|
||||
geo_filters=[geo_filter_to_dict(geo_filter) for geo_filter in cdnendpoint.geo_filters] if cdnendpoint.geo_filters else None,
|
||||
host_name=cdnendpoint.host_name,
|
||||
origins=[deep_created_origin_to_dict(origin) for origin in cdnendpoint.origins] if cdnendpoint.origins else None,
|
||||
resource_state=cdnendpoint.resource_state,
|
||||
provisioning_state=cdnendpoint.provisioning_state
|
||||
)
|
||||
|
||||
|
||||
def deep_created_origin_to_dict(origin):
|
||||
return dict(
|
||||
name=origin.name,
|
||||
host_name=origin.host_name,
|
||||
http_port=origin.http_port,
|
||||
https_port=origin.https_port,
|
||||
)
|
||||
|
||||
|
||||
def geo_filter_to_dict(geo_filter):
|
||||
return dict(
|
||||
relative_path=geo_filter.relative_path,
|
||||
action=geo_filter.action,
|
||||
country_codes=geo_filter.country_codes,
|
||||
)
|
||||
|
||||
|
||||
def default_content_types():
|
||||
return ["text/plain",
|
||||
"text/html",
|
||||
"text/css",
|
||||
"text/javascript",
|
||||
"application/x-javascript",
|
||||
"application/javascript",
|
||||
"application/json",
|
||||
"application/xml"]
|
||||
|
||||
|
||||
origin_spec = dict(
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
host_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
http_port=dict(
|
||||
type='int'
|
||||
),
|
||||
https_port=dict(
|
||||
type='int'
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class AzureRMCdnendpoint(AzureRMModuleBase):
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
location=dict(
|
||||
type='str'
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
),
|
||||
started=dict(
|
||||
type='bool'
|
||||
),
|
||||
purge=dict(
|
||||
type='bool'
|
||||
),
|
||||
purge_content_paths=dict(
|
||||
type='list',
|
||||
elements='str',
|
||||
default=['/']
|
||||
),
|
||||
profile_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
origins=dict(
|
||||
type='list',
|
||||
elements='dict',
|
||||
options=origin_spec
|
||||
),
|
||||
origin_host_header=dict(
|
||||
type='str',
|
||||
),
|
||||
origin_path=dict(
|
||||
type='str',
|
||||
),
|
||||
content_types_to_compress=dict(
|
||||
type='list',
|
||||
elements='str',
|
||||
),
|
||||
is_compression_enabled=dict(
|
||||
type='bool',
|
||||
default=False
|
||||
),
|
||||
is_http_allowed=dict(
|
||||
type='bool',
|
||||
default=True
|
||||
),
|
||||
is_https_allowed=dict(
|
||||
type='bool',
|
||||
default=True
|
||||
),
|
||||
query_string_caching_behavior=dict(
|
||||
type='str',
|
||||
choices=[
|
||||
'ignore_query_string',
|
||||
'bypass_caching',
|
||||
'use_query_string',
|
||||
'not_set'
|
||||
],
|
||||
default='ignore_query_string'
|
||||
),
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.state = None
|
||||
self.started = None
|
||||
self.purge = None
|
||||
self.purge_content_paths = None
|
||||
self.location = None
|
||||
self.profile_name = None
|
||||
self.origins = None
|
||||
self.tags = None
|
||||
self.origin_host_header = None
|
||||
self.origin_path = None
|
||||
self.content_types_to_compress = None
|
||||
self.is_compression_enabled = None
|
||||
self.is_http_allowed = None
|
||||
self.is_https_allowed = None
|
||||
self.query_string_caching_behavior = None
|
||||
|
||||
self.cdn_client = None
|
||||
|
||||
self.results = dict(changed=False)
|
||||
|
||||
super(AzureRMCdnendpoint, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=True)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
"""Main module execution method"""
|
||||
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
self.cdn_client = self.get_cdn_client()
|
||||
|
||||
to_be_updated = False
|
||||
|
||||
resource_group = self.get_resource_group(self.resource_group)
|
||||
if not self.location:
|
||||
self.location = resource_group.location
|
||||
|
||||
if self.query_string_caching_behavior:
|
||||
self.query_string_caching_behavior = _snake_to_camel(self.query_string_caching_behavior)
|
||||
|
||||
response = self.get_cdnendpoint()
|
||||
|
||||
if self.state == 'present':
|
||||
|
||||
if not response:
|
||||
|
||||
if self.started is None:
|
||||
# If endpoint doesn't exist and no start/stop operation specified, create endpoint.
|
||||
if self.origins is None:
|
||||
self.fail("Origins is not provided when trying to create endpoint")
|
||||
self.log("Need to create the Azure CDN endpoint")
|
||||
|
||||
if not self.check_mode:
|
||||
result = self.create_cdnendpoint()
|
||||
self.results['id'] = result['id']
|
||||
self.results['host_name'] = result['host_name']
|
||||
self.log("Creation done")
|
||||
|
||||
self.results['changed'] = True
|
||||
return self.results
|
||||
|
||||
else:
|
||||
# Fail the module when user try to start/stop a non-existed endpoint
|
||||
self.log("Can't stop/stop a non-existed endpoint")
|
||||
self.fail("This endpoint is not found, stop/start is forbidden")
|
||||
|
||||
else:
|
||||
self.log('Results : {0}'.format(response))
|
||||
self.results['id'] = response['id']
|
||||
self.results['host_name'] = response['host_name']
|
||||
|
||||
update_tags, response['tags'] = self.update_tags(response['tags'])
|
||||
|
||||
if update_tags:
|
||||
to_be_updated = True
|
||||
|
||||
if response['provisioning_state'] == "Succeeded":
|
||||
if self.started is False and response['resource_state'] == 'Running':
|
||||
self.log("Need to stop the Azure CDN endpoint")
|
||||
|
||||
if not self.check_mode:
|
||||
result = self.stop_cdnendpoint()
|
||||
self.log("Endpoint stopped")
|
||||
|
||||
self.results['changed'] = True
|
||||
|
||||
elif self.started and response['resource_state'] == 'Stopped':
|
||||
self.log("Need to start the Azure CDN endpoint")
|
||||
|
||||
if not self.check_mode:
|
||||
result = self.start_cdnendpoint()
|
||||
self.log("Endpoint started")
|
||||
|
||||
self.results['changed'] = True
|
||||
|
||||
elif self.started is not None:
|
||||
self.module.warn("Start/Stop not performed due to current resource state {0}".format(response['resource_state']))
|
||||
self.results['changed'] = False
|
||||
|
||||
if self.purge:
|
||||
self.log("Need to purge endpoint")
|
||||
|
||||
if not self.check_mode:
|
||||
result = self.purge_cdnendpoint()
|
||||
self.log("Endpoint purged")
|
||||
|
||||
self.results['changed'] = True
|
||||
|
||||
to_be_updated = to_be_updated or self.check_update(response)
|
||||
|
||||
if to_be_updated:
|
||||
self.log("Need to update the Azure CDN endpoint")
|
||||
self.results['changed'] = True
|
||||
|
||||
if not self.check_mode:
|
||||
result = self.update_cdnendpoint()
|
||||
self.results['host_name'] = result['host_name']
|
||||
self.log("Update done")
|
||||
|
||||
elif self.started is not None:
|
||||
self.module.warn("Start/Stop not performed due to current provisioning state {0}".format(response['provisioning_state']))
|
||||
self.results['changed'] = False
|
||||
|
||||
elif self.state == 'absent' and response:
|
||||
self.log("Need to delete the Azure CDN endpoint")
|
||||
self.results['changed'] = True
|
||||
|
||||
if not self.check_mode:
|
||||
self.delete_cdnendpoint()
|
||||
self.log("Azure CDN endpoint deleted")
|
||||
|
||||
return self.results
|
||||
|
||||
def create_cdnendpoint(self):
|
||||
'''
|
||||
Creates a Azure CDN endpoint.
|
||||
|
||||
:return: deserialized Azure CDN endpoint instance state dictionary
|
||||
'''
|
||||
self.log("Creating the Azure CDN endpoint instance {0}".format(self.name))
|
||||
|
||||
origins = []
|
||||
for item in self.origins:
|
||||
origins.append(
|
||||
DeepCreatedOrigin(name=item['name'],
|
||||
host_name=item['host_name'],
|
||||
http_port=item['http_port'] if 'http_port' in item else None,
|
||||
https_port=item['https_port'] if 'https_port' in item else None)
|
||||
)
|
||||
|
||||
parameters = Endpoint(
|
||||
origins=origins,
|
||||
location=self.location,
|
||||
tags=self.tags,
|
||||
origin_host_header=self.origin_host_header,
|
||||
origin_path=self.origin_path,
|
||||
content_types_to_compress=default_content_types() if self.is_compression_enabled and not self.content_types_to_compress
|
||||
else self.content_types_to_compress,
|
||||
is_compression_enabled=self.is_compression_enabled if self.is_compression_enabled is not None else False,
|
||||
is_http_allowed=self.is_http_allowed if self.is_http_allowed is not None else True,
|
||||
is_https_allowed=self.is_https_allowed if self.is_https_allowed is not None else True,
|
||||
query_string_caching_behavior=self.query_string_caching_behavior if self.query_string_caching_behavior
|
||||
else QueryStringCachingBehavior.ignore_query_string
|
||||
)
|
||||
|
||||
try:
|
||||
poller = self.cdn_client.endpoints.create(self.resource_group, self.profile_name, self.name, parameters)
|
||||
response = self.get_poller_result(poller)
|
||||
return cdnendpoint_to_dict(response)
|
||||
except ErrorResponseException as exc:
|
||||
self.log('Error attempting to create Azure CDN endpoint instance.')
|
||||
self.fail("Error creating Azure CDN endpoint instance: {0}".format(exc.message))
|
||||
|
||||
def update_cdnendpoint(self):
|
||||
'''
|
||||
Updates a Azure CDN endpoint.
|
||||
|
||||
:return: deserialized Azure CDN endpoint instance state dictionary
|
||||
'''
|
||||
self.log("Updating the Azure CDN endpoint instance {0}".format(self.name))
|
||||
|
||||
endpoint_update_properties = EndpointUpdateParameters(
|
||||
tags=self.tags,
|
||||
origin_host_header=self.origin_host_header,
|
||||
origin_path=self.origin_path,
|
||||
content_types_to_compress=default_content_types() if self.is_compression_enabled and not self.content_types_to_compress
|
||||
else self.content_types_to_compress,
|
||||
is_compression_enabled=self.is_compression_enabled,
|
||||
is_http_allowed=self.is_http_allowed,
|
||||
is_https_allowed=self.is_https_allowed,
|
||||
query_string_caching_behavior=self.query_string_caching_behavior,
|
||||
)
|
||||
|
||||
try:
|
||||
poller = self.cdn_client.endpoints.update(self.resource_group, self.profile_name, self.name, endpoint_update_properties)
|
||||
response = self.get_poller_result(poller)
|
||||
return cdnendpoint_to_dict(response)
|
||||
except ErrorResponseException as exc:
|
||||
self.log('Error attempting to update Azure CDN endpoint instance.')
|
||||
self.fail("Error updating Azure CDN endpoint instance: {0}".format(exc.message))
|
||||
|
||||
def delete_cdnendpoint(self):
|
||||
'''
|
||||
Deletes the specified Azure CDN endpoint in the specified subscription and resource group.
|
||||
|
||||
:return: True
|
||||
'''
|
||||
self.log("Deleting the Azure CDN endpoint {0}".format(self.name))
|
||||
try:
|
||||
poller = self.cdn_client.endpoints.delete(
|
||||
self.resource_group, self.profile_name, self.name)
|
||||
self.get_poller_result(poller)
|
||||
return True
|
||||
except ErrorResponseException as e:
|
||||
self.log('Error attempting to delete the Azure CDN endpoint.')
|
||||
self.fail("Error deleting the Azure CDN endpoint: {0}".format(e.message))
|
||||
return False
|
||||
|
||||
def get_cdnendpoint(self):
|
||||
'''
|
||||
Gets the properties of the specified Azure CDN endpoint.
|
||||
|
||||
:return: deserialized Azure CDN endpoint state dictionary
|
||||
'''
|
||||
self.log(
|
||||
"Checking if the Azure CDN endpoint {0} is present".format(self.name))
|
||||
try:
|
||||
response = self.cdn_client.endpoints.get(self.resource_group, self.profile_name, self.name)
|
||||
self.log("Response : {0}".format(response))
|
||||
self.log("Azure CDN endpoint : {0} found".format(response.name))
|
||||
return cdnendpoint_to_dict(response)
|
||||
except ErrorResponseException:
|
||||
self.log('Did not find the Azure CDN endpoint.')
|
||||
return False
|
||||
|
||||
def start_cdnendpoint(self):
|
||||
'''
|
||||
Starts an existing Azure CDN endpoint that is on a stopped state.
|
||||
|
||||
:return: deserialized Azure CDN endpoint state dictionary
|
||||
'''
|
||||
self.log(
|
||||
"Starting the Azure CDN endpoint {0}".format(self.name))
|
||||
try:
|
||||
poller = self.cdn_client.endpoints.start(self.resource_group, self.profile_name, self.name)
|
||||
response = self.get_poller_result(poller)
|
||||
self.log("Response : {0}".format(response))
|
||||
self.log("Azure CDN endpoint : {0} started".format(response.name))
|
||||
return self.get_cdnendpoint()
|
||||
except ErrorResponseException:
|
||||
self.log('Fail to start the Azure CDN endpoint.')
|
||||
return False
|
||||
|
||||
def purge_cdnendpoint(self):
|
||||
'''
|
||||
Purges an existing Azure CDN endpoint.
|
||||
|
||||
:return: deserialized Azure CDN endpoint state dictionary
|
||||
'''
|
||||
self.log(
|
||||
"Purging the Azure CDN endpoint {0}".format(self.name))
|
||||
try:
|
||||
poller = self.cdn_client.endpoints.purge_content(self.resource_group,
|
||||
self.profile_name,
|
||||
self.name,
|
||||
content_paths=self.purge_content_paths)
|
||||
response = self.get_poller_result(poller)
|
||||
self.log("Response : {0}".format(response))
|
||||
return self.get_cdnendpoint()
|
||||
except ErrorResponseException as e:
|
||||
self.log('Fail to purge the Azure CDN endpoint.')
|
||||
return False
|
||||
|
||||
def stop_cdnendpoint(self):
|
||||
'''
|
||||
Stops an existing Azure CDN endpoint that is on a running state.
|
||||
|
||||
:return: deserialized Azure CDN endpoint state dictionary
|
||||
'''
|
||||
self.log(
|
||||
"Stopping the Azure CDN endpoint {0}".format(self.name))
|
||||
try:
|
||||
poller = self.cdn_client.endpoints.stop(self.resource_group, self.profile_name, self.name)
|
||||
response = self.get_poller_result(poller)
|
||||
self.log("Response : {0}".format(response))
|
||||
self.log("Azure CDN endpoint : {0} stopped".format(response.name))
|
||||
return self.get_cdnendpoint()
|
||||
except ErrorResponseException:
|
||||
self.log('Fail to stop the Azure CDN endpoint.')
|
||||
return False
|
||||
|
||||
def check_update(self, response):
|
||||
|
||||
if self.origin_host_header and response['origin_host_header'] != self.origin_host_header:
|
||||
self.log("Origin host header Diff - Origin {0} / Update {1}".format(response['origin_host_header'], self.origin_host_header))
|
||||
return True
|
||||
|
||||
if self.origin_path and response['origin_path'] != self.origin_path:
|
||||
self.log("Origin path Diff - Origin {0} / Update {1}".format(response['origin_path'], self.origin_path))
|
||||
return True
|
||||
|
||||
if self.content_types_to_compress and response['content_types_to_compress'] != self.content_types_to_compress:
|
||||
self.log("Content types to compress Diff - Origin {0} / Update {1}".format(response['content_types_to_compress'], self.content_types_to_compress))
|
||||
return True
|
||||
|
||||
if self.is_compression_enabled is not None and response['is_compression_enabled'] != self.is_compression_enabled:
|
||||
self.log("is_compression_enabled Diff - Origin {0} / Update {1}".format(response['is_compression_enabled'], self.is_compression_enabled))
|
||||
return True
|
||||
|
||||
if self.is_http_allowed is not None and response['is_http_allowed'] != self.is_http_allowed:
|
||||
self.log("is_http_allowed Diff - Origin {0} / Update {1}".format(response['is_http_allowed'], self.is_http_allowed))
|
||||
return True
|
||||
|
||||
if self.is_https_allowed is not None and response['is_https_allowed'] != self.is_https_allowed:
|
||||
self.log("is_https_allowed Diff - Origin {0} / Update {1}".format(response['is_https_allowed'], self.is_https_allowed))
|
||||
return True
|
||||
|
||||
if self.query_string_caching_behavior and \
|
||||
_snake_to_camel(response['query_string_caching_behavior']).lower() != _snake_to_camel(self.query_string_caching_behavior).lower():
|
||||
self.log("query_string_caching_behavior Diff - Origin {0} / Update {1}".format(response['query_string_caching_behavior'],
|
||||
self.query_string_caching_behavior))
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def get_cdn_client(self):
|
||||
if not self.cdn_client:
|
||||
self.cdn_client = self.get_mgmt_svc_client(CdnManagementClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager,
|
||||
api_version='2017-04-02')
|
||||
return self.cdn_client
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution"""
|
||||
AzureRMCdnendpoint()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,315 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Hai Cao, <t-haicao@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_cdnendpoint_info
|
||||
|
||||
version_added: "2.9"
|
||||
|
||||
short_description: Get Azure CDN endpoint facts
|
||||
|
||||
description:
|
||||
- Get facts for a specific Azure CDN endpoint or all Azure CDN endpoints.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- Name of resource group where this CDN profile belongs to.
|
||||
required: true
|
||||
profile_name:
|
||||
description:
|
||||
- Name of CDN profile.
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
- Limit results to a specific Azure CDN endpoint.
|
||||
tags:
|
||||
description:
|
||||
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Hai Cao (@caohai)
|
||||
- Yunge zhu (@yungezz)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get facts for all endpoints in CDN profile
|
||||
azure_rm_cdnendpoint_info:
|
||||
resource_group: myResourceGroup
|
||||
profile_name: myCDNProfile
|
||||
|
||||
- name: Get facts of specific CDN endpoint
|
||||
azure_rm_cdnendpoint_info:
|
||||
resource_group: myResourceGroup
|
||||
profile_name: myCDNProfile
|
||||
name: myEndpoint1
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
cdnendpoints:
|
||||
description: List of Azure CDN endpoints.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
resource_group:
|
||||
description:
|
||||
- Name of a resource group where the Azure CDN endpoint exists.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myResourceGroup
|
||||
name:
|
||||
description:
|
||||
- Name of the Azure CDN endpoint.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myEndpoint
|
||||
profile_name:
|
||||
description:
|
||||
- Name of the Azure CDN profile that this endpoint is attached to.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myProfile
|
||||
location:
|
||||
description:
|
||||
- Location of the Azure CDN endpoint.
|
||||
type: str
|
||||
sample: WestUS
|
||||
id:
|
||||
description:
|
||||
- ID of the Azure CDN endpoint.
|
||||
type: str
|
||||
sample:
|
||||
"/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myCDN/providers/Microsoft.Cdn/profiles/myProfile/endpoints/myEndpoint1"
|
||||
provisioning_state:
|
||||
description:
|
||||
- Provisioning status of the Azure CDN endpoint.
|
||||
type: str
|
||||
sample: Succeeded
|
||||
resource_state:
|
||||
description:
|
||||
- Resource status of the profile.
|
||||
type: str
|
||||
sample: Running
|
||||
is_compression_enabled:
|
||||
description:
|
||||
- Indicates whether content compression is enabled on CDN.
|
||||
type: bool
|
||||
sample: true
|
||||
is_http_allowed:
|
||||
description:
|
||||
- Indicates whether HTTP traffic is allowed on the endpoint.
|
||||
type: bool
|
||||
sample: true
|
||||
is_https_allowed:
|
||||
description:
|
||||
- Indicates whether HTTPS traffic is allowed on the endpoint.
|
||||
type: bool
|
||||
sample: true
|
||||
query_string_caching_behavior:
|
||||
description:
|
||||
- Defines how CDN caches requests that include query strings.
|
||||
type: str
|
||||
sample: IgnoreQueryString
|
||||
content_types_to_compress:
|
||||
description:
|
||||
- List of content types on which compression applies.
|
||||
type: list
|
||||
sample: [
|
||||
"text/plain",
|
||||
"text/html",
|
||||
"text/css",
|
||||
"text/javascript",
|
||||
"application/x-javascript",
|
||||
"application/javascript",
|
||||
"application/json",
|
||||
"application/xml"
|
||||
]
|
||||
origins:
|
||||
description:
|
||||
- The source of the content being delivered via CDN.
|
||||
sample: {
|
||||
"host_name": "xxxxxxxx.blob.core.windows.net",
|
||||
"http_port": null,
|
||||
"https_port": null,
|
||||
"name": "xxxxxxxx-blob-core-windows-net"
|
||||
}
|
||||
origin_host_header:
|
||||
description:
|
||||
- The host header value sent to the origin with each request.
|
||||
type: str
|
||||
sample: xxxxxxxx.blob.core.windows.net
|
||||
origin_path:
|
||||
description:
|
||||
- A directory path on the origin that CDN can use to retrieve content from.
|
||||
type: str
|
||||
sample: /pic/
|
||||
tags:
|
||||
description:
|
||||
- The tags of the Azure CDN endpoint.
|
||||
type: list
|
||||
sample: foo
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from azure.mgmt.cdn import CdnManagementClient
|
||||
from azure.mgmt.cdn.models import ErrorResponseException
|
||||
from azure.common import AzureHttpError
|
||||
except ImportError:
|
||||
# handled in azure_rm_common
|
||||
pass
|
||||
|
||||
import re
|
||||
|
||||
AZURE_OBJECT_CLASS = 'endpoints'
|
||||
|
||||
|
||||
class AzureRMCdnEndpointInfo(AzureRMModuleBase):
|
||||
"""Utility class to get Azure Azure CDN endpoint facts"""
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.module_args = dict(
|
||||
name=dict(type='str'),
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
profile_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
tags=dict(type='list')
|
||||
)
|
||||
|
||||
self.results = dict(
|
||||
changed=False,
|
||||
cdnendpoints=[]
|
||||
)
|
||||
|
||||
self.name = None
|
||||
self.resource_group = None
|
||||
self.profile_name = None
|
||||
self.tags = None
|
||||
|
||||
super(AzureRMCdnEndpointInfo, self).__init__(
|
||||
derived_arg_spec=self.module_args,
|
||||
supports_tags=False,
|
||||
facts_module=True
|
||||
)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
is_old_facts = self.module._name == 'azure_rm_cdnendpoint_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_cdnendpoint_facts' module has been renamed to 'azure_rm_cdnendpoint_info'", version='2.13')
|
||||
|
||||
for key in self.module_args:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
self.cdn_client = self.get_mgmt_svc_client(CdnManagementClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager,
|
||||
api_version='2017-04-02')
|
||||
|
||||
if self.name:
|
||||
self.results['cdnendpoints'] = self.get_item()
|
||||
else:
|
||||
self.results['cdnendpoints'] = self.list_by_profile()
|
||||
|
||||
return self.results
|
||||
|
||||
def get_item(self):
|
||||
"""Get a single Azure Azure CDN endpoint"""
|
||||
|
||||
self.log('Get properties for {0}'.format(self.name))
|
||||
|
||||
item = None
|
||||
result = []
|
||||
|
||||
try:
|
||||
item = self.cdn_client.endpoints.get(
|
||||
self.resource_group, self.profile_name, self.name)
|
||||
except ErrorResponseException:
|
||||
pass
|
||||
|
||||
if item and self.has_tags(item.tags, self.tags):
|
||||
result = [self.serialize_cdnendpoint(item)]
|
||||
|
||||
return result
|
||||
|
||||
def list_by_profile(self):
|
||||
"""Get all Azure Azure CDN endpoints within an Azure CDN profile"""
|
||||
|
||||
self.log('List all Azure CDN endpoints within an Azure CDN profile')
|
||||
|
||||
try:
|
||||
response = self.cdn_client.endpoints.list_by_profile(
|
||||
self.resource_group, self.profile_name)
|
||||
except ErrorResponseException as exc:
|
||||
self.fail('Failed to list all items - {0}'.format(str(exc)))
|
||||
|
||||
results = []
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(self.serialize_cdnendpoint(item))
|
||||
|
||||
return results
|
||||
|
||||
def serialize_cdnendpoint(self, cdnendpoint):
|
||||
'''
|
||||
Convert a Azure CDN endpoint object to dict.
|
||||
:param cdn: Azure CDN endpoint object
|
||||
:return: dict
|
||||
'''
|
||||
result = self.serialize_obj(cdnendpoint, AZURE_OBJECT_CLASS)
|
||||
|
||||
new_result = {}
|
||||
new_result['id'] = cdnendpoint.id
|
||||
new_result['resource_group'] = re.sub('\\/.*', '', re.sub('.*resourcegroups\\/', '', result['id']))
|
||||
new_result['profile_name'] = re.sub('\\/.*', '', re.sub('.*profiles\\/', '', result['id']))
|
||||
new_result['name'] = cdnendpoint.name
|
||||
new_result['type'] = cdnendpoint.type
|
||||
new_result['location'] = cdnendpoint.location
|
||||
new_result['resource_state'] = cdnendpoint.resource_state
|
||||
new_result['provisioning_state'] = cdnendpoint.provisioning_state
|
||||
new_result['query_string_caching_behavior'] = cdnendpoint.query_string_caching_behavior
|
||||
new_result['is_compression_enabled'] = cdnendpoint.is_compression_enabled
|
||||
new_result['is_http_allowed'] = cdnendpoint.is_http_allowed
|
||||
new_result['is_https_allowed'] = cdnendpoint.is_https_allowed
|
||||
new_result['content_types_to_compress'] = cdnendpoint.content_types_to_compress
|
||||
new_result['origin_host_header'] = cdnendpoint.origin_host_header
|
||||
new_result['origin_path'] = cdnendpoint.origin_path
|
||||
new_result['origin'] = dict(
|
||||
name=cdnendpoint.origins[0].name,
|
||||
host_name=cdnendpoint.origins[0].host_name,
|
||||
http_port=cdnendpoint.origins[0].http_port,
|
||||
https_port=cdnendpoint.origins[0].https_port
|
||||
)
|
||||
new_result['tags'] = cdnendpoint.tags
|
||||
return new_result
|
||||
|
||||
|
||||
def main():
|
||||
"""Main module execution code path"""
|
||||
|
||||
AzureRMCdnEndpointInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,304 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2018 Hai Cao, <t-haicao@microsoft.com>, Yunge Zhu <yungez@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_cdnprofile
|
||||
version_added: "2.8"
|
||||
short_description: Manage a Azure CDN profile
|
||||
description:
|
||||
- Create, update and delete a Azure CDN profile.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- Name of a resource group where the CDN profile exists or will be created.
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
- Name of the CDN profile.
|
||||
required: true
|
||||
location:
|
||||
description:
|
||||
- Valid Azure location. Defaults to location of the resource group.
|
||||
sku:
|
||||
description:
|
||||
- The pricing tier, defines a CDN provider, feature list and rate of the CDN profile.
|
||||
- Detailed pricing can be find at U(https://azure.microsoft.com/en-us/pricing/details/cdn/).
|
||||
choices:
|
||||
- standard_verizon
|
||||
- premium_verizon
|
||||
- custom_verizon
|
||||
- standard_akamai
|
||||
- standard_chinacdn
|
||||
- standard_microsoft
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the CDN profile. Use C(present) to create or update a CDN profile and C(absent) to delete it.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Hai Cao (@caohai)
|
||||
- Yunge Zhu (@yungezz)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a CDN profile
|
||||
azure_rm_cdnprofile:
|
||||
resource_group: myResourceGroup
|
||||
name: myCDN
|
||||
sku: standard_akamai
|
||||
tags:
|
||||
testing: testing
|
||||
|
||||
- name: Delete the CDN profile
|
||||
azure_rm_cdnprofile:
|
||||
resource_group: myResourceGroup
|
||||
name: myCDN
|
||||
state: absent
|
||||
'''
|
||||
RETURN = '''
|
||||
id:
|
||||
description: Current state of the CDN profile.
|
||||
returned: always
|
||||
type: dict
|
||||
example:
|
||||
id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/Microsoft.Cdn/profiles/myCDN
|
||||
'''
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from azure.mgmt.cdn.models import Profile, Sku, ErrorResponseException
|
||||
from azure.mgmt.cdn import CdnManagementClient
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
def cdnprofile_to_dict(cdnprofile):
|
||||
return dict(
|
||||
id=cdnprofile.id,
|
||||
name=cdnprofile.name,
|
||||
type=cdnprofile.type,
|
||||
location=cdnprofile.location,
|
||||
sku=cdnprofile.sku.name,
|
||||
resource_state=cdnprofile.resource_state,
|
||||
provisioning_state=cdnprofile.provisioning_state,
|
||||
tags=cdnprofile.tags
|
||||
)
|
||||
|
||||
|
||||
class AzureRMCdnprofile(AzureRMModuleBase):
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
location=dict(
|
||||
type='str'
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
),
|
||||
sku=dict(
|
||||
type='str',
|
||||
choices=['standard_verizon', 'premium_verizon', 'custom_verizon', 'standard_akamai', 'standard_chinacdn', 'standard_microsoft']
|
||||
)
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.location = None
|
||||
self.state = None
|
||||
self.tags = None
|
||||
self.sku = None
|
||||
|
||||
self.cdn_client = None
|
||||
|
||||
required_if = [
|
||||
('state', 'present', ['sku'])
|
||||
]
|
||||
|
||||
self.results = dict(changed=False)
|
||||
|
||||
super(AzureRMCdnprofile, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=True,
|
||||
required_if=required_if)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
"""Main module execution method"""
|
||||
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
self.cdn_client = self.get_cdn_client()
|
||||
|
||||
to_be_updated = False
|
||||
|
||||
resource_group = self.get_resource_group(self.resource_group)
|
||||
if not self.location:
|
||||
self.location = resource_group.location
|
||||
|
||||
response = self.get_cdnprofile()
|
||||
|
||||
if self.state == 'present':
|
||||
|
||||
if not response:
|
||||
self.log("Need to create the CDN profile")
|
||||
|
||||
if not self.check_mode:
|
||||
new_response = self.create_cdnprofile()
|
||||
self.results['id'] = new_response['id']
|
||||
|
||||
self.results['changed'] = True
|
||||
|
||||
else:
|
||||
self.log('Results : {0}'.format(response))
|
||||
update_tags, response['tags'] = self.update_tags(response['tags'])
|
||||
|
||||
if response['provisioning_state'] == "Succeeded":
|
||||
if update_tags:
|
||||
to_be_updated = True
|
||||
|
||||
if to_be_updated:
|
||||
self.log("Need to update the CDN profile")
|
||||
|
||||
if not self.check_mode:
|
||||
new_response = self.update_cdnprofile()
|
||||
self.results['id'] = new_response['id']
|
||||
|
||||
self.results['changed'] = True
|
||||
|
||||
elif self.state == 'absent':
|
||||
if not response:
|
||||
self.fail("CDN profile {0} not exists.".format(self.name))
|
||||
else:
|
||||
self.log("Need to delete the CDN profile")
|
||||
self.results['changed'] = True
|
||||
|
||||
if not self.check_mode:
|
||||
self.delete_cdnprofile()
|
||||
self.results['id'] = response['id']
|
||||
|
||||
return self.results
|
||||
|
||||
def create_cdnprofile(self):
|
||||
'''
|
||||
Creates a Azure CDN profile.
|
||||
|
||||
:return: deserialized Azure CDN profile instance state dictionary
|
||||
'''
|
||||
self.log("Creating the Azure CDN profile instance {0}".format(self.name))
|
||||
|
||||
parameters = Profile(
|
||||
location=self.location,
|
||||
sku=Sku(name=self.sku),
|
||||
tags=self.tags
|
||||
)
|
||||
|
||||
import uuid
|
||||
xid = str(uuid.uuid1())
|
||||
|
||||
try:
|
||||
poller = self.cdn_client.profiles.create(self.resource_group,
|
||||
self.name,
|
||||
parameters,
|
||||
custom_headers={'x-ms-client-request-id': xid}
|
||||
)
|
||||
response = self.get_poller_result(poller)
|
||||
return cdnprofile_to_dict(response)
|
||||
except ErrorResponseException as exc:
|
||||
self.log('Error attempting to create Azure CDN profile instance.')
|
||||
self.fail("Error creating Azure CDN profile instance: {0}.\n Request id: {1}".format(exc.message, xid))
|
||||
|
||||
def update_cdnprofile(self):
|
||||
'''
|
||||
Updates a Azure CDN profile.
|
||||
|
||||
:return: deserialized Azure CDN profile instance state dictionary
|
||||
'''
|
||||
self.log("Updating the Azure CDN profile instance {0}".format(self.name))
|
||||
|
||||
try:
|
||||
poller = self.cdn_client.profiles.update(self.resource_group, self.name, self.tags)
|
||||
response = self.get_poller_result(poller)
|
||||
return cdnprofile_to_dict(response)
|
||||
except ErrorResponseException as exc:
|
||||
self.log('Error attempting to update Azure CDN profile instance.')
|
||||
self.fail("Error updating Azure CDN profile instance: {0}".format(exc.message))
|
||||
|
||||
def delete_cdnprofile(self):
|
||||
'''
|
||||
Deletes the specified Azure CDN profile in the specified subscription and resource group.
|
||||
|
||||
:return: True
|
||||
'''
|
||||
self.log("Deleting the CDN profile {0}".format(self.name))
|
||||
try:
|
||||
poller = self.cdn_client.profiles.delete(
|
||||
self.resource_group, self.name)
|
||||
self.get_poller_result(poller)
|
||||
return True
|
||||
except ErrorResponseException as e:
|
||||
self.log('Error attempting to delete the CDN profile.')
|
||||
self.fail("Error deleting the CDN profile: {0}".format(e.message))
|
||||
return False
|
||||
|
||||
def get_cdnprofile(self):
|
||||
'''
|
||||
Gets the properties of the specified CDN profile.
|
||||
|
||||
:return: deserialized CDN profile state dictionary
|
||||
'''
|
||||
self.log(
|
||||
"Checking if the CDN profile {0} is present".format(self.name))
|
||||
try:
|
||||
response = self.cdn_client.profiles.get(self.resource_group, self.name)
|
||||
self.log("Response : {0}".format(response))
|
||||
self.log("CDN profile : {0} found".format(response.name))
|
||||
return cdnprofile_to_dict(response)
|
||||
except ErrorResponseException:
|
||||
self.log('Did not find the CDN profile.')
|
||||
return False
|
||||
|
||||
def get_cdn_client(self):
|
||||
if not self.cdn_client:
|
||||
self.cdn_client = self.get_mgmt_svc_client(CdnManagementClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager,
|
||||
api_version='2017-04-02')
|
||||
return self.cdn_client
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution"""
|
||||
AzureRMCdnprofile()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,268 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2018 Hai Cao, <t-haicao@microsoft.com>, Yunge Zhu <yungez@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_cdnprofile_info
|
||||
|
||||
version_added: "2.9"
|
||||
|
||||
short_description: Get Azure CDN profile facts
|
||||
|
||||
description:
|
||||
- Get facts for a specific Azure CDN profile or all CDN profiles.
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Limit results to a specific CDN profile.
|
||||
resource_group:
|
||||
description:
|
||||
- The resource group to search for the desired CDN profile.
|
||||
tags:
|
||||
description:
|
||||
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Hai Cao (@caohai)
|
||||
- Yunge Zhu (@yungezz)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get facts for one CDN profile
|
||||
azure_rm_cdnprofile_info:
|
||||
name: Testing
|
||||
resource_group: myResourceGroup
|
||||
|
||||
- name: Get facts for all CDN profiles
|
||||
azure_rm_cdnprofile_info:
|
||||
|
||||
- name: Get facts by tags
|
||||
azure_rm_cdnprofile_info:
|
||||
tags:
|
||||
- Environment:Test
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
cdnprofiles:
|
||||
description: List of CDN profiles.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
resource_group:
|
||||
description:
|
||||
- Name of a resource group where the CDN profile exists.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myResourceGroup
|
||||
name:
|
||||
description:
|
||||
- Name of the CDN profile.
|
||||
returned: always
|
||||
type: str
|
||||
sample: Testing
|
||||
location:
|
||||
description:
|
||||
- Location of the CDN profile.
|
||||
type: str
|
||||
sample: WestUS
|
||||
id:
|
||||
description:
|
||||
- ID of the CDN profile.
|
||||
type: str
|
||||
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/Microsoft.Cdn/profiles/myCDN
|
||||
provisioning_state:
|
||||
description:
|
||||
- Provisioning status of the profile.
|
||||
type: str
|
||||
sample: Succeeded
|
||||
resource_state:
|
||||
description:
|
||||
- Resource status of the profile.
|
||||
type: str
|
||||
sample: Active
|
||||
sku:
|
||||
description:
|
||||
- The pricing tier, defines a CDN provider, feature list and rate of the CDN profile.
|
||||
type: str
|
||||
sample: standard_verizon
|
||||
type:
|
||||
description:
|
||||
- The type of the CDN profile.
|
||||
type: str
|
||||
sample: Microsoft.Cdn/profiles
|
||||
tags:
|
||||
description:
|
||||
- The tags of the CDN profile.
|
||||
type: list
|
||||
sample: [
|
||||
{"foo": "bar"}
|
||||
]
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from azure.mgmt.cdn.models import ErrorResponseException
|
||||
from azure.common import AzureHttpError
|
||||
from azure.mgmt.cdn import CdnManagementClient
|
||||
except Exception:
|
||||
# handled in azure_rm_common
|
||||
pass
|
||||
|
||||
import re
|
||||
|
||||
AZURE_OBJECT_CLASS = 'profiles'
|
||||
|
||||
|
||||
class AzureRMCdnprofileInfo(AzureRMModuleBase):
|
||||
"""Utility class to get Azure CDN profile facts"""
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.module_args = dict(
|
||||
name=dict(type='str'),
|
||||
resource_group=dict(type='str'),
|
||||
tags=dict(type='list')
|
||||
)
|
||||
|
||||
self.results = dict(
|
||||
changed=False,
|
||||
cdnprofiles=[]
|
||||
)
|
||||
|
||||
self.name = None
|
||||
self.resource_group = None
|
||||
self.tags = None
|
||||
self.cdn_client = None
|
||||
|
||||
super(AzureRMCdnprofileInfo, self).__init__(
|
||||
derived_arg_spec=self.module_args,
|
||||
supports_tags=False,
|
||||
facts_module=True
|
||||
)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
is_old_facts = self.module._name == 'azure_rm_cdnprofile_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_cdnprofile_facts' module has been renamed to 'azure_rm_cdnprofile_info'", version='2.13')
|
||||
|
||||
for key in self.module_args:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
self.cdn_client = self.get_cdn_client()
|
||||
|
||||
if self.name and not self.resource_group:
|
||||
self.fail("Parameter error: resource group required when filtering by name.")
|
||||
|
||||
if self.name:
|
||||
self.results['cdnprofiles'] = self.get_item()
|
||||
elif self.resource_group:
|
||||
self.results['cdnprofiles'] = self.list_resource_group()
|
||||
else:
|
||||
self.results['cdnprofiles'] = self.list_all()
|
||||
|
||||
return self.results
|
||||
|
||||
def get_item(self):
|
||||
"""Get a single Azure CDN profile"""
|
||||
|
||||
self.log('Get properties for {0}'.format(self.name))
|
||||
|
||||
item = None
|
||||
result = []
|
||||
|
||||
try:
|
||||
item = self.cdn_client.profiles.get(
|
||||
self.resource_group, self.name)
|
||||
except ErrorResponseException:
|
||||
pass
|
||||
|
||||
if item and self.has_tags(item.tags, self.tags):
|
||||
result = [self.serialize_cdnprofile(item)]
|
||||
|
||||
return result
|
||||
|
||||
def list_resource_group(self):
|
||||
"""Get all Azure CDN profiles within a resource group"""
|
||||
|
||||
self.log('List all Azure CDNs within a resource group')
|
||||
|
||||
try:
|
||||
response = self.cdn_client.profiles.list_by_resource_group(
|
||||
self.resource_group)
|
||||
except AzureHttpError as exc:
|
||||
self.fail('Failed to list all items - {0}'.format(str(exc)))
|
||||
|
||||
results = []
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(self.serialize_cdnprofile(item))
|
||||
|
||||
return results
|
||||
|
||||
def list_all(self):
|
||||
"""Get all Azure CDN profiles within a subscription"""
|
||||
self.log('List all CDN profiles within a subscription')
|
||||
try:
|
||||
response = self.cdn_client.profiles.list()
|
||||
except Exception as exc:
|
||||
self.fail("Error listing all items - {0}".format(str(exc)))
|
||||
|
||||
results = []
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(self.serialize_cdnprofile(item))
|
||||
return results
|
||||
|
||||
def serialize_cdnprofile(self, cdnprofile):
|
||||
'''
|
||||
Convert a CDN profile object to dict.
|
||||
:param cdn: CDN profile object
|
||||
:return: dict
|
||||
'''
|
||||
result = self.serialize_obj(cdnprofile, AZURE_OBJECT_CLASS)
|
||||
|
||||
new_result = {}
|
||||
new_result['id'] = cdnprofile.id
|
||||
new_result['resource_group'] = re.sub('\\/.*', '', re.sub('.*resourcegroups\\/', '', result['id']))
|
||||
new_result['name'] = cdnprofile.name
|
||||
new_result['type'] = cdnprofile.type
|
||||
new_result['location'] = cdnprofile.location
|
||||
new_result['resource_state'] = cdnprofile.resource_state
|
||||
new_result['sku'] = cdnprofile.sku.name
|
||||
new_result['provisioning_state'] = cdnprofile.provisioning_state
|
||||
new_result['tags'] = cdnprofile.tags
|
||||
return new_result
|
||||
|
||||
def get_cdn_client(self):
|
||||
if not self.cdn_client:
|
||||
self.cdn_client = self.get_mgmt_svc_client(CdnManagementClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager,
|
||||
api_version='2017-04-02')
|
||||
return self.cdn_client
|
||||
|
||||
|
||||
def main():
|
||||
"""Main module execution code path"""
|
||||
|
||||
AzureRMCdnprofileInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,529 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_containerinstance
|
||||
version_added: "2.5"
|
||||
short_description: Manage an Azure Container Instance
|
||||
description:
|
||||
- Create, update and delete an Azure Container Instance.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- Name of resource group.
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
- The name of the container group.
|
||||
required: true
|
||||
os_type:
|
||||
description:
|
||||
- The OS type of containers.
|
||||
choices:
|
||||
- linux
|
||||
- windows
|
||||
default: linux
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the container instance. Use C(present) to create or update an container instance and C(absent) to delete it.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
ip_address:
|
||||
description:
|
||||
- The IP address type of the container group.
|
||||
- Default is C(none) and creating an instance without public IP.
|
||||
choices:
|
||||
- public
|
||||
- none
|
||||
default: 'none'
|
||||
dns_name_label:
|
||||
description:
|
||||
- The Dns name label for the IP.
|
||||
type: str
|
||||
version_added: "2.8"
|
||||
ports:
|
||||
description:
|
||||
- List of ports exposed within the container group.
|
||||
- This option is deprecated, using I(ports) under I(containers)".
|
||||
type: list
|
||||
location:
|
||||
description:
|
||||
- Valid azure location. Defaults to location of the resource group.
|
||||
registry_login_server:
|
||||
description:
|
||||
- The container image registry login server.
|
||||
registry_username:
|
||||
description:
|
||||
- The username to log in container image registry server.
|
||||
registry_password:
|
||||
description:
|
||||
- The password to log in container image registry server.
|
||||
containers:
|
||||
description:
|
||||
- List of containers.
|
||||
- Required when creation.
|
||||
suboptions:
|
||||
name:
|
||||
description:
|
||||
- The name of the container instance.
|
||||
required: true
|
||||
image:
|
||||
description:
|
||||
- The container image name.
|
||||
required: true
|
||||
memory:
|
||||
description:
|
||||
- The required memory of the containers in GB.
|
||||
type: float
|
||||
default: 1.5
|
||||
cpu:
|
||||
description:
|
||||
- The required number of CPU cores of the containers.
|
||||
type: float
|
||||
default: 1
|
||||
ports:
|
||||
description:
|
||||
- List of ports exposed within the container group.
|
||||
type: list
|
||||
environment_variables:
|
||||
description:
|
||||
- List of container environment variables.
|
||||
- When updating existing container all existing variables will be replaced by new ones.
|
||||
type: dict
|
||||
suboptions:
|
||||
name:
|
||||
description:
|
||||
- Environment variable name.
|
||||
type: str
|
||||
value:
|
||||
description:
|
||||
- Environment variable value.
|
||||
type: str
|
||||
is_secure:
|
||||
description:
|
||||
- Is variable secure.
|
||||
type: bool
|
||||
version_added: "2.8"
|
||||
commands:
|
||||
description:
|
||||
- List of commands to execute within the container instance in exec form.
|
||||
- When updating existing container all existing commands will be replaced by new ones.
|
||||
type: list
|
||||
version_added: "2.8"
|
||||
restart_policy:
|
||||
description:
|
||||
- Restart policy for all containers within the container group.
|
||||
type: str
|
||||
choices:
|
||||
- always
|
||||
- on_failure
|
||||
- never
|
||||
version_added: "2.8"
|
||||
force_update:
|
||||
description:
|
||||
- Force update of existing container instance. Any update will result in deletion and recreation of existing containers.
|
||||
type: bool
|
||||
default: 'no'
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create sample container group
|
||||
azure_rm_containerinstance:
|
||||
resource_group: myResourceGroup
|
||||
name: myContainerInstanceGroup
|
||||
os_type: linux
|
||||
ip_address: public
|
||||
containers:
|
||||
- name: myContainer1
|
||||
image: httpd
|
||||
memory: 1.5
|
||||
ports:
|
||||
- 80
|
||||
- 81
|
||||
'''
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- Resource ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.ContainerInstance/containerGroups/aci1b6dd89
|
||||
provisioning_state:
|
||||
description:
|
||||
- Provisioning state of the container.
|
||||
returned: always
|
||||
type: str
|
||||
sample: Creating
|
||||
ip_address:
|
||||
description:
|
||||
- Public IP Address of created container group.
|
||||
returned: if address is public
|
||||
type: str
|
||||
sample: 175.12.233.11
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
from ansible.module_utils.common.dict_transformations import _snake_to_camel
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrest.polling import LROPoller
|
||||
from azure.mgmt.containerinstance import ContainerInstanceManagementClient
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
def create_container_dict_from_obj(container):
|
||||
'''
|
||||
Create a dict from an instance of a Container.
|
||||
|
||||
:param rule: Container
|
||||
:return: dict
|
||||
'''
|
||||
results = dict(
|
||||
name=container.name,
|
||||
image=container.image,
|
||||
memory=container.resources.requests.memory_in_gb,
|
||||
cpu=container.resources.requests.cpu
|
||||
# command (list of str)
|
||||
# ports (list of ContainerPort)
|
||||
# environment_variables (list of EnvironmentVariable)
|
||||
# resources (ResourceRequirements)
|
||||
# volume mounts (list of VolumeMount)
|
||||
)
|
||||
|
||||
if container.instance_view is not None:
|
||||
# instance_view (ContainerPropertiesInstanceView)
|
||||
results["instance_restart_count"] = container.instance_view.restart_count
|
||||
if container.instance_view.current_state:
|
||||
results["instance_current_state"] = container.instance_view.current_state.state
|
||||
results["instance_current_start_time"] = container.instance_view.current_state.start_time
|
||||
results["instance_current_exit_code"] = container.instance_view.current_state.exit_code
|
||||
results["instance_current_finish_time"] = container.instance_view.current_state.finish_time
|
||||
results["instance_current_detail_status"] = container.instance_view.current_state.detail_status
|
||||
if container.instance_view.previous_state:
|
||||
results["instance_previous_state"] = container.instance_view.previous_state.state
|
||||
results["instance_previous_start_time"] = container.instance_view.previous_state.start_time
|
||||
results["instance_previous_exit_code"] = container.instance_view.previous_state.exit_code
|
||||
results["instance_previous_finish_time"] = container.instance_view.previous_state.finish_time
|
||||
results["instance_previous_detail_status"] = container.instance_view.previous_state.detail_status
|
||||
# events (list of ContainerEvent)
|
||||
return results
|
||||
|
||||
|
||||
env_var_spec = dict(
|
||||
name=dict(type='str', required=True),
|
||||
value=dict(type='str', required=True),
|
||||
is_secure=dict(type='bool')
|
||||
)
|
||||
|
||||
|
||||
container_spec = dict(
|
||||
name=dict(type='str', required=True),
|
||||
image=dict(type='str', required=True),
|
||||
memory=dict(type='float', default=1.5),
|
||||
cpu=dict(type='float', default=1),
|
||||
ports=dict(type='list', elements='int'),
|
||||
commands=dict(type='list', elements='str'),
|
||||
environment_variables=dict(type='list', elements='dict', options=env_var_spec)
|
||||
)
|
||||
|
||||
|
||||
class AzureRMContainerInstance(AzureRMModuleBase):
|
||||
"""Configuration class for an Azure RM container instance resource"""
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
os_type=dict(
|
||||
type='str',
|
||||
default='linux',
|
||||
choices=['linux', 'windows']
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
),
|
||||
location=dict(
|
||||
type='str',
|
||||
),
|
||||
ip_address=dict(
|
||||
type='str',
|
||||
default='none',
|
||||
choices=['public', 'none']
|
||||
),
|
||||
dns_name_label=dict(
|
||||
type='str',
|
||||
),
|
||||
ports=dict(
|
||||
type='list',
|
||||
default=[]
|
||||
),
|
||||
registry_login_server=dict(
|
||||
type='str',
|
||||
default=None
|
||||
),
|
||||
registry_username=dict(
|
||||
type='str',
|
||||
default=None
|
||||
),
|
||||
registry_password=dict(
|
||||
type='str',
|
||||
default=None,
|
||||
no_log=True
|
||||
),
|
||||
containers=dict(
|
||||
type='list',
|
||||
elements='dict',
|
||||
options=container_spec
|
||||
),
|
||||
restart_policy=dict(
|
||||
type='str',
|
||||
choices=['always', 'on_failure', 'never']
|
||||
),
|
||||
force_update=dict(
|
||||
type='bool',
|
||||
default=False
|
||||
),
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.location = None
|
||||
self.state = None
|
||||
self.ip_address = None
|
||||
self.dns_name_label = None
|
||||
self.containers = None
|
||||
self.restart_policy = None
|
||||
|
||||
self.tags = None
|
||||
|
||||
self.results = dict(changed=False, state=dict())
|
||||
self.cgmodels = None
|
||||
|
||||
required_if = [
|
||||
('state', 'present', ['containers'])
|
||||
]
|
||||
|
||||
super(AzureRMContainerInstance, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=True,
|
||||
required_if=required_if)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
"""Main module execution method"""
|
||||
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
resource_group = None
|
||||
response = None
|
||||
results = dict()
|
||||
|
||||
# since this client hasn't been upgraded to expose models directly off the OperationClass, fish them out
|
||||
self.cgmodels = self.containerinstance_client.container_groups.models
|
||||
|
||||
resource_group = self.get_resource_group(self.resource_group)
|
||||
|
||||
if not self.location:
|
||||
self.location = resource_group.location
|
||||
|
||||
response = self.get_containerinstance()
|
||||
|
||||
if not response:
|
||||
self.log("Container Group doesn't exist")
|
||||
|
||||
if self.state == 'absent':
|
||||
self.log("Nothing to delete")
|
||||
else:
|
||||
self.force_update = True
|
||||
else:
|
||||
self.log("Container instance already exists")
|
||||
|
||||
if self.state == 'absent':
|
||||
if not self.check_mode:
|
||||
self.delete_containerinstance()
|
||||
self.results['changed'] = True
|
||||
self.log("Container instance deleted")
|
||||
elif self.state == 'present':
|
||||
self.log("Need to check if container group has to be deleted or may be updated")
|
||||
update_tags, newtags = self.update_tags(response.get('tags', dict()))
|
||||
if update_tags:
|
||||
self.tags = newtags
|
||||
|
||||
if self.force_update:
|
||||
self.log('Deleting container instance before update')
|
||||
if not self.check_mode:
|
||||
self.delete_containerinstance()
|
||||
|
||||
if self.state == 'present':
|
||||
|
||||
self.log("Need to Create / Update the container instance")
|
||||
|
||||
if self.force_update:
|
||||
self.results['changed'] = True
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
response = self.create_update_containerinstance()
|
||||
|
||||
self.results['id'] = response['id']
|
||||
self.results['provisioning_state'] = response['provisioning_state']
|
||||
self.results['ip_address'] = response['ip_address']['ip'] if 'ip_address' in response else ''
|
||||
|
||||
self.log("Creation / Update done")
|
||||
|
||||
return self.results
|
||||
|
||||
def create_update_containerinstance(self):
|
||||
'''
|
||||
Creates or updates a container service with the specified configuration of orchestrator, masters, and agents.
|
||||
|
||||
:return: deserialized container instance state dictionary
|
||||
'''
|
||||
self.log("Creating / Updating the container instance {0}".format(self.name))
|
||||
|
||||
registry_credentials = None
|
||||
|
||||
if self.registry_login_server is not None:
|
||||
registry_credentials = [self.cgmodels.ImageRegistryCredential(server=self.registry_login_server,
|
||||
username=self.registry_username,
|
||||
password=self.registry_password)]
|
||||
|
||||
ip_address = None
|
||||
|
||||
containers = []
|
||||
all_ports = set([])
|
||||
for container_def in self.containers:
|
||||
name = container_def.get("name")
|
||||
image = container_def.get("image")
|
||||
memory = container_def.get("memory")
|
||||
cpu = container_def.get("cpu")
|
||||
commands = container_def.get("commands")
|
||||
ports = []
|
||||
variables = []
|
||||
|
||||
port_list = container_def.get("ports")
|
||||
if port_list:
|
||||
for port in port_list:
|
||||
all_ports.add(port)
|
||||
ports.append(self.cgmodels.ContainerPort(port=port))
|
||||
|
||||
variable_list = container_def.get("environment_variables")
|
||||
if variable_list:
|
||||
for variable in variable_list:
|
||||
variables.append(self.cgmodels.EnvironmentVariable(name=variable.get('name'),
|
||||
value=variable.get('value') if not variable.get('is_secure') else None,
|
||||
secure_value=variable.get('value') if variable.get('is_secure') else None))
|
||||
|
||||
containers.append(self.cgmodels.Container(name=name,
|
||||
image=image,
|
||||
resources=self.cgmodels.ResourceRequirements(
|
||||
requests=self.cgmodels.ResourceRequests(memory_in_gb=memory, cpu=cpu)
|
||||
),
|
||||
ports=ports,
|
||||
command=commands,
|
||||
environment_variables=variables))
|
||||
|
||||
if self.ip_address == 'public':
|
||||
# get list of ports
|
||||
if len(all_ports) > 0:
|
||||
ports = []
|
||||
for port in all_ports:
|
||||
ports.append(self.cgmodels.Port(port=port, protocol="TCP"))
|
||||
ip_address = self.cgmodels.IpAddress(ports=ports, dns_name_label=self.dns_name_label, type='public')
|
||||
|
||||
parameters = self.cgmodels.ContainerGroup(location=self.location,
|
||||
containers=containers,
|
||||
image_registry_credentials=registry_credentials,
|
||||
restart_policy=_snake_to_camel(self.restart_policy, True) if self.restart_policy else None,
|
||||
ip_address=ip_address,
|
||||
os_type=self.os_type,
|
||||
volumes=None,
|
||||
tags=self.tags)
|
||||
|
||||
try:
|
||||
response = self.containerinstance_client.container_groups.create_or_update(resource_group_name=self.resource_group,
|
||||
container_group_name=self.name,
|
||||
container_group=parameters)
|
||||
if isinstance(response, LROPoller):
|
||||
response = self.get_poller_result(response)
|
||||
except CloudError as exc:
|
||||
self.fail("Error when creating ACI {0}: {1}".format(self.name, exc.message or str(exc)))
|
||||
|
||||
return response.as_dict()
|
||||
|
||||
def delete_containerinstance(self):
|
||||
'''
|
||||
Deletes the specified container group instance in the specified subscription and resource group.
|
||||
|
||||
:return: True
|
||||
'''
|
||||
self.log("Deleting the container instance {0}".format(self.name))
|
||||
try:
|
||||
response = self.containerinstance_client.container_groups.delete(resource_group_name=self.resource_group, container_group_name=self.name)
|
||||
return True
|
||||
except CloudError as exc:
|
||||
self.fail('Error when deleting ACI {0}: {1}'.format(self.name, exc.message or str(exc)))
|
||||
return False
|
||||
|
||||
def get_containerinstance(self):
|
||||
'''
|
||||
Gets the properties of the specified container service.
|
||||
|
||||
:return: deserialized container instance state dictionary
|
||||
'''
|
||||
self.log("Checking if the container instance {0} is present".format(self.name))
|
||||
found = False
|
||||
try:
|
||||
response = self.containerinstance_client.container_groups.get(resource_group_name=self.resource_group, container_group_name=self.name)
|
||||
found = True
|
||||
self.log("Response : {0}".format(response))
|
||||
self.log("Container instance : {0} found".format(response.name))
|
||||
except CloudError as e:
|
||||
self.log('Did not find the container instance.')
|
||||
if found is True:
|
||||
return response.as_dict()
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution"""
|
||||
AzureRMContainerInstance()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,320 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_containerinstance_info
|
||||
version_added: "2.9"
|
||||
short_description: Get Azure Container Instance facts
|
||||
description:
|
||||
- Get facts of Container Instance.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
required: True
|
||||
name:
|
||||
description:
|
||||
- The name of the container instance.
|
||||
tags:
|
||||
description:
|
||||
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get specific Container Instance facts
|
||||
azure_rm_containerinstance_info:
|
||||
resource_group: myResourceGroup
|
||||
name: myContainer
|
||||
|
||||
- name: List Container Instances in a specified resource group name
|
||||
azure_rm_containerinstance_info:
|
||||
resource_group: myResourceGroup
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
container_groups:
|
||||
description: A list of Container Instance dictionaries.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- The resource id.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.ContainerInstance/contain
|
||||
erGroups/myContainer"
|
||||
resource_group:
|
||||
description:
|
||||
- Resource group where the container exists.
|
||||
returned: always
|
||||
type: str
|
||||
sample: testrg
|
||||
name:
|
||||
description:
|
||||
- The resource name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: mycontainers
|
||||
location:
|
||||
description:
|
||||
- The resource location.
|
||||
returned: always
|
||||
type: str
|
||||
sample: westus
|
||||
os_type:
|
||||
description:
|
||||
- The OS type of containers.
|
||||
returned: always
|
||||
type: str
|
||||
sample: linux
|
||||
ip_address:
|
||||
description:
|
||||
- IP address of the container instance.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 173.15.18.1
|
||||
dns_name_label:
|
||||
description:
|
||||
- The Dns name label for the IP.
|
||||
returned: always
|
||||
type: str
|
||||
sample: mydomain
|
||||
ports:
|
||||
description:
|
||||
- List of ports exposed by the container instance.
|
||||
returned: always
|
||||
type: list
|
||||
sample: [ 80, 81 ]
|
||||
containers:
|
||||
description:
|
||||
- The containers within the container group.
|
||||
returned: always
|
||||
type: complex
|
||||
sample: containers
|
||||
contains:
|
||||
name:
|
||||
description:
|
||||
- The name of the container instance.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.ContainerInstance
|
||||
/containerGroups/myContainer"
|
||||
image:
|
||||
description:
|
||||
- The container image name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.ContainerInstance
|
||||
/containerGroups/myContainer"
|
||||
memory:
|
||||
description:
|
||||
- The required memory of the containers in GB.
|
||||
returned: always
|
||||
type: float
|
||||
sample: 1.5
|
||||
cpu:
|
||||
description:
|
||||
- The required number of CPU cores of the containers.
|
||||
returned: always
|
||||
type: int
|
||||
sample: 1
|
||||
ports:
|
||||
description:
|
||||
- List of ports exposed within the container group.
|
||||
returned: always
|
||||
type: list
|
||||
sample: [ 80, 81 ]
|
||||
commands:
|
||||
description:
|
||||
- List of commands to execute within the container instance in exec form.
|
||||
returned: always
|
||||
type: list
|
||||
sample: [ "pip install abc" ]
|
||||
environment_variables:
|
||||
description:
|
||||
- List of container environment variables.
|
||||
type: complex
|
||||
contains:
|
||||
name:
|
||||
description:
|
||||
- Environment variable name.
|
||||
type: str
|
||||
value:
|
||||
description:
|
||||
- Environment variable value.
|
||||
type: str
|
||||
tags:
|
||||
description: Tags assigned to the resource. Dictionary of string:string pairs.
|
||||
type: dict
|
||||
sample: { "tag1": "abc" }
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
from ansible.module_utils.common.dict_transformations import _camel_to_snake
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrestazure.azure_operation import AzureOperationPoller
|
||||
from azure.mgmt.containerinstance import ContainerInstanceManagementClient
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMContainerInstanceInfo(AzureRMModuleBase):
|
||||
def __init__(self):
|
||||
# define user inputs into argument
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str'
|
||||
),
|
||||
tags=dict(
|
||||
type='list'
|
||||
)
|
||||
)
|
||||
# store the results of the module operation
|
||||
self.results = dict(
|
||||
changed=False,
|
||||
)
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
|
||||
super(AzureRMContainerInstanceInfo, self).__init__(self.module_arg_spec, supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
is_old_facts = self.module._name == 'azure_rm_containerinstance_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_containerinstance_facts' module has been renamed to 'azure_rm_containerinstance_info'", version='2.13')
|
||||
|
||||
for key in self.module_arg_spec:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
if (self.name is not None):
|
||||
self.results['containerinstances'] = self.get()
|
||||
elif (self.resource_group is not None):
|
||||
self.results['containerinstances'] = self.list_by_resource_group()
|
||||
else:
|
||||
self.results['containerinstances'] = self.list_all()
|
||||
return self.results
|
||||
|
||||
def get(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.containerinstance_client.container_groups.get(resource_group_name=self.resource_group,
|
||||
container_group_name=self.name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get facts for Container Instances.')
|
||||
|
||||
if response is not None and self.has_tags(response.tags, self.tags):
|
||||
results.append(self.format_item(response))
|
||||
|
||||
return results
|
||||
|
||||
def list_by_resource_group(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.containerinstance_client.container_groups.list_by_resource_group(resource_group_name=self.resource_group)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.fail('Could not list facts for Container Instances.')
|
||||
|
||||
if response is not None:
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(self.format_item(item))
|
||||
|
||||
return results
|
||||
|
||||
def list_all(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.containerinstance_client.container_groups.list()
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.fail('Could not list facts for Container Instances.')
|
||||
|
||||
if response is not None:
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(self.format_item(item))
|
||||
|
||||
return results
|
||||
|
||||
def format_item(self, item):
|
||||
d = item.as_dict()
|
||||
containers = d['containers']
|
||||
ports = d['ip_address']['ports'] if 'ip_address' in d else []
|
||||
resource_group = d['id'].split('resourceGroups/')[1].split('/')[0]
|
||||
|
||||
for port_index in range(len(ports)):
|
||||
ports[port_index] = ports[port_index]['port']
|
||||
|
||||
for container_index in range(len(containers)):
|
||||
old_container = containers[container_index]
|
||||
new_container = {
|
||||
'name': old_container['name'],
|
||||
'image': old_container['image'],
|
||||
'memory': old_container['resources']['requests']['memory_in_gb'],
|
||||
'cpu': old_container['resources']['requests']['cpu'],
|
||||
'ports': [],
|
||||
'commands': old_container.get('command'),
|
||||
'environment_variables': old_container.get('environment_variables')
|
||||
}
|
||||
for port_index in range(len(old_container['ports'])):
|
||||
new_container['ports'].append(old_container['ports'][port_index]['port'])
|
||||
containers[container_index] = new_container
|
||||
|
||||
d = {
|
||||
'id': d['id'],
|
||||
'resource_group': resource_group,
|
||||
'name': d['name'],
|
||||
'os_type': d['os_type'],
|
||||
'dns_name_label': d['ip_address'].get('dns_name_label'),
|
||||
'ip_address': d['ip_address']['ip'] if 'ip_address' in d else '',
|
||||
'ports': ports,
|
||||
'location': d['location'],
|
||||
'containers': containers,
|
||||
'restart_policy': _camel_to_snake(d.get('restart_policy')) if d.get('restart_policy') else None,
|
||||
'tags': d.get('tags', None)
|
||||
}
|
||||
return d
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMContainerInstanceInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,411 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2017 Yawei Wang, <yaweiw@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_containerregistry
|
||||
version_added: "2.5"
|
||||
short_description: Manage an Azure Container Registry
|
||||
description:
|
||||
- Create, update and delete an Azure Container Registry.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- Name of a resource group where the Container Registry exists or will be created.
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
- Name of the Container Registry.
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the container registry. Use C(present) to create or update an container registry and C(absent) to delete it.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
location:
|
||||
description:
|
||||
- Valid azure location. Defaults to location of the resource group.
|
||||
admin_user_enabled:
|
||||
description:
|
||||
- If enabled, you can use the registry name as username and admin user access key as password to docker login to your container registry.
|
||||
type: bool
|
||||
default: no
|
||||
sku:
|
||||
description:
|
||||
- Specifies the SKU to use. Currently can be either C(Basic), C(Standard) or C(Premium).
|
||||
default: Standard
|
||||
choices:
|
||||
- Basic
|
||||
- Standard
|
||||
- Premium
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Yawei Wang (@yaweiw)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create an azure container registry
|
||||
azure_rm_containerregistry:
|
||||
name: myRegistry
|
||||
location: eastus
|
||||
resource_group: myResourceGroup
|
||||
admin_user_enabled: true
|
||||
sku: Premium
|
||||
tags:
|
||||
Release: beta1
|
||||
Environment: Production
|
||||
|
||||
- name: Remove an azure container registry
|
||||
azure_rm_containerregistry:
|
||||
name: myRegistry
|
||||
resource_group: myResourceGroup
|
||||
state: absent
|
||||
'''
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- Resource ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.ContainerRegistry/registries/myRegistry
|
||||
name:
|
||||
description:
|
||||
- Registry name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myregistry
|
||||
location:
|
||||
description:
|
||||
- Resource location.
|
||||
returned: always
|
||||
type: str
|
||||
sample: westus
|
||||
admin_user_enabled:
|
||||
description:
|
||||
- Is admin user enabled.
|
||||
returned: always
|
||||
type: bool
|
||||
sample: true
|
||||
sku:
|
||||
description:
|
||||
- The SKU name of the container registry.
|
||||
returned: always
|
||||
type: str
|
||||
sample: Standard
|
||||
provisioning_state:
|
||||
description:
|
||||
- Provisioning state.
|
||||
returned: always
|
||||
type: str
|
||||
sample: Succeeded
|
||||
login_server:
|
||||
description:
|
||||
- Registry login server.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myregistry.azurecr.io
|
||||
credentials:
|
||||
description:
|
||||
- Passwords defined for the registry.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
password:
|
||||
description:
|
||||
- password value.
|
||||
returned: when registry exists and C(admin_user_enabled) is set
|
||||
type: str
|
||||
sample: pass1value
|
||||
password2:
|
||||
description:
|
||||
- password2 value.
|
||||
returned: when registry exists and C(admin_user_enabled) is set
|
||||
type: str
|
||||
sample: pass2value
|
||||
tags:
|
||||
description:
|
||||
- Tags assigned to the resource. Dictionary of string:string pairs.
|
||||
returned: always
|
||||
type: dict
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.mgmt.containerregistry.models import (
|
||||
Registry,
|
||||
RegistryUpdateParameters,
|
||||
StorageAccountProperties,
|
||||
Sku,
|
||||
SkuName,
|
||||
SkuTier,
|
||||
ProvisioningState,
|
||||
PasswordName,
|
||||
WebhookCreateParameters,
|
||||
WebhookUpdateParameters,
|
||||
WebhookAction,
|
||||
WebhookStatus
|
||||
)
|
||||
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
|
||||
except ImportError as exc:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
def create_containerregistry_dict(registry, credentials):
|
||||
'''
|
||||
Helper method to deserialize a ContainerRegistry to a dict
|
||||
:param: registry: return container registry object from Azure rest API call
|
||||
:param: credentials: return credential objects from Azure rest API call
|
||||
:return: dict of return container registry and it's credentials
|
||||
'''
|
||||
results = dict(
|
||||
id=registry.id if registry is not None else "",
|
||||
name=registry.name if registry is not None else "",
|
||||
location=registry.location if registry is not None else "",
|
||||
admin_user_enabled=registry.admin_user_enabled if registry is not None else "",
|
||||
sku=registry.sku.name if registry is not None else "",
|
||||
provisioning_state=registry.provisioning_state if registry is not None else "",
|
||||
login_server=registry.login_server if registry is not None else "",
|
||||
credentials=dict(),
|
||||
tags=registry.tags if registry is not None else ""
|
||||
)
|
||||
if credentials:
|
||||
results['credentials'] = dict(
|
||||
password=credentials.passwords[0].value,
|
||||
password2=credentials.passwords[1].value
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
class Actions:
|
||||
NoAction, Create, Update = range(3)
|
||||
|
||||
|
||||
class AzureRMContainerRegistry(AzureRMModuleBase):
|
||||
"""Configuration class for an Azure RM container registry resource"""
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
),
|
||||
location=dict(
|
||||
type='str'
|
||||
),
|
||||
admin_user_enabled=dict(
|
||||
type='bool',
|
||||
default=False
|
||||
),
|
||||
sku=dict(
|
||||
type='str',
|
||||
default='Standard',
|
||||
choices=['Basic', 'Standard', 'Premium']
|
||||
)
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.location = None
|
||||
self.state = None
|
||||
self.sku = None
|
||||
self.tags = None
|
||||
|
||||
self.results = dict(changed=False, state=dict())
|
||||
|
||||
super(AzureRMContainerRegistry, self).__init__(
|
||||
derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=True)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
"""Main module execution method"""
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
resource_group = None
|
||||
response = None
|
||||
to_do = Actions.NoAction
|
||||
|
||||
resource_group = self.get_resource_group(self.resource_group)
|
||||
if not self.location:
|
||||
self.location = resource_group.location
|
||||
|
||||
# Check if the container registry instance already present in the RG
|
||||
if self.state == 'present':
|
||||
response = self.get_containerregistry()
|
||||
|
||||
if not response:
|
||||
to_do = Actions.Create
|
||||
else:
|
||||
self.log('Results : {0}'.format(response))
|
||||
self.results.update(response)
|
||||
if response['provisioning_state'] == "Succeeded":
|
||||
to_do = Actions.NoAction
|
||||
if (self.location is not None) and self.location != response['location']:
|
||||
to_do = Actions.Update
|
||||
elif (self.sku is not None) and self.sku != response['sku']:
|
||||
to_do = Actions.Update
|
||||
else:
|
||||
to_do = Actions.NoAction
|
||||
|
||||
self.log("Create / Update the container registry instance")
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
self.results.update(self.create_update_containerregistry(to_do))
|
||||
if to_do != Actions.NoAction:
|
||||
self.results['changed'] = True
|
||||
else:
|
||||
self.results['changed'] = False
|
||||
|
||||
self.log("Container registry instance created or updated")
|
||||
elif self.state == 'absent':
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
self.delete_containerregistry()
|
||||
self.log("Container registry instance deleted")
|
||||
|
||||
return self.results
|
||||
|
||||
def create_update_containerregistry(self, to_do):
|
||||
'''
|
||||
Creates or updates a container registry.
|
||||
|
||||
:return: deserialized container registry instance state dictionary
|
||||
'''
|
||||
self.log("Creating / Updating the container registry instance {0}".format(self.name))
|
||||
|
||||
try:
|
||||
if to_do != Actions.NoAction:
|
||||
if to_do == Actions.Create:
|
||||
name_status = self.containerregistry_client.registries.check_name_availability(self.name)
|
||||
if name_status.name_available:
|
||||
poller = self.containerregistry_client.registries.create(
|
||||
resource_group_name=self.resource_group,
|
||||
registry_name=self.name,
|
||||
registry=Registry(
|
||||
location=self.location,
|
||||
sku=Sku(
|
||||
name=self.sku
|
||||
),
|
||||
tags=self.tags,
|
||||
admin_user_enabled=self.admin_user_enabled
|
||||
)
|
||||
)
|
||||
else:
|
||||
raise Exception("Invalid registry name. reason: " + name_status.reason + " message: " + name_status.message)
|
||||
else:
|
||||
registry = self.containerregistry_client.registries.get(self.resource_group, self.name)
|
||||
if registry is not None:
|
||||
poller = self.containerregistry_client.registries.update(
|
||||
resource_group_name=self.resource_group,
|
||||
registry_name=self.name,
|
||||
registry_update_parameters=RegistryUpdateParameters(
|
||||
sku=Sku(
|
||||
name=self.sku
|
||||
),
|
||||
tags=self.tags,
|
||||
admin_user_enabled=self.admin_user_enabled
|
||||
)
|
||||
)
|
||||
else:
|
||||
raise Exception("Update registry failed as registry '" + self.name + "' doesn't exist.")
|
||||
response = self.get_poller_result(poller)
|
||||
if self.admin_user_enabled:
|
||||
credentials = self.containerregistry_client.registries.list_credentials(self.resource_group, self.name)
|
||||
else:
|
||||
self.log('Cannot perform credential operations as admin user is disabled')
|
||||
credentials = None
|
||||
else:
|
||||
response = None
|
||||
credentials = None
|
||||
except (CloudError, Exception) as exc:
|
||||
self.log('Error attempting to create / update the container registry instance.')
|
||||
self.fail("Error creating / updating the container registry instance: {0}".format(str(exc)))
|
||||
return create_containerregistry_dict(response, credentials)
|
||||
|
||||
def delete_containerregistry(self):
|
||||
'''
|
||||
Deletes the specified container registry in the specified subscription and resource group.
|
||||
|
||||
:return: True
|
||||
'''
|
||||
self.log("Deleting the container registry instance {0}".format(self.name))
|
||||
try:
|
||||
self.containerregistry_client.registries.delete(self.resource_group, self.name).wait()
|
||||
except CloudError as e:
|
||||
self.log('Error attempting to delete the container registry instance.')
|
||||
self.fail("Error deleting the container registry instance: {0}".format(str(e)))
|
||||
|
||||
return True
|
||||
|
||||
def get_containerregistry(self):
|
||||
'''
|
||||
Gets the properties of the specified container registry.
|
||||
|
||||
:return: deserialized container registry state dictionary
|
||||
'''
|
||||
self.log("Checking if the container registry instance {0} is present".format(self.name))
|
||||
found = False
|
||||
try:
|
||||
response = self.containerregistry_client.registries.get(self.resource_group, self.name)
|
||||
found = True
|
||||
self.log("Response : {0}".format(response))
|
||||
self.log("Container registry instance : {0} found".format(response.name))
|
||||
except CloudError as e:
|
||||
if e.error.error == 'ResourceNotFound':
|
||||
self.log('Did not find the container registry instance: {0}'.format(str(e)))
|
||||
else:
|
||||
self.fail('Error while trying to get container registry instance: {0}'.format(str(e)))
|
||||
response = None
|
||||
if found is True and self.admin_user_enabled is True:
|
||||
try:
|
||||
credentials = self.containerregistry_client.registries.list_credentials(self.resource_group, self.name)
|
||||
except CloudError as e:
|
||||
self.fail('List registry credentials failed: {0}'.format(str(e)))
|
||||
credentials = None
|
||||
elif found is True and self.admin_user_enabled is False:
|
||||
credentials = None
|
||||
else:
|
||||
return None
|
||||
return create_containerregistry_dict(response, credentials)
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution"""
|
||||
AzureRMContainerRegistry()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,283 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_containerregistry_info
|
||||
version_added: "2.9"
|
||||
short_description: Get Azure Container Registry facts
|
||||
description:
|
||||
- Get facts for Container Registry.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group to which the container registry belongs.
|
||||
required: True
|
||||
name:
|
||||
description:
|
||||
- The name of the container registry.
|
||||
retrieve_credentials:
|
||||
description:
|
||||
- Retrieve credentials for container registry.
|
||||
type: bool
|
||||
default: no
|
||||
tags:
|
||||
description:
|
||||
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get instance of Registry
|
||||
azure_rm_containerregistry_info:
|
||||
resource_group: myResourceGroup
|
||||
name: myRegistry
|
||||
|
||||
- name: List instances of Registry
|
||||
azure_rm_containerregistry_info:
|
||||
resource_group: myResourceGroup
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
registries:
|
||||
description:
|
||||
- A list of dictionaries containing facts for registries.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- The resource ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.ContainerRegistry/registr
|
||||
ies/myRegistry"
|
||||
name:
|
||||
description:
|
||||
- The name of the resource.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myRegistry
|
||||
location:
|
||||
description:
|
||||
- The location of the resource. This cannot be changed after the resource is created.
|
||||
returned: always
|
||||
type: str
|
||||
sample: westus
|
||||
admin_user_enabled:
|
||||
description:
|
||||
- Is admin user enabled.
|
||||
returned: always
|
||||
type: bool
|
||||
sample: yes
|
||||
sku:
|
||||
description:
|
||||
- The SKU name of the container registry.
|
||||
returned: always
|
||||
type: str
|
||||
sample: Premium
|
||||
provisioning_state:
|
||||
description:
|
||||
- Provisioning state of the container registry.
|
||||
returned: always
|
||||
type: str
|
||||
sample: Succeeded
|
||||
login_server:
|
||||
description:
|
||||
- Login server for the registry.
|
||||
returned: always
|
||||
type: str
|
||||
sample: acrd08521b.azurecr.io
|
||||
credentials:
|
||||
description:
|
||||
- Credentials, fields will be empty if admin user is not enabled for ACR.
|
||||
returned: when C(retrieve_credentials) is set and C(admin_user_enabled) is set on ACR
|
||||
type: complex
|
||||
contains:
|
||||
username:
|
||||
description:
|
||||
- The user name for container registry.
|
||||
returned: when registry exists and C(admin_user_enabled) is set
|
||||
type: str
|
||||
sample: zim
|
||||
password:
|
||||
description:
|
||||
- password value.
|
||||
returned: when registry exists and C(admin_user_enabled) is set
|
||||
type: str
|
||||
sample: pass1value
|
||||
password2:
|
||||
description:
|
||||
- password2 value.
|
||||
returned: when registry exists and C(admin_user_enabled) is set
|
||||
type: str
|
||||
sample: pass2value
|
||||
tags:
|
||||
description:
|
||||
- Tags assigned to the resource. Dictionary of string:string pairs.
|
||||
type: dict
|
||||
sample: { "tag1": "abc" }
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrestazure.azure_operation import AzureOperationPoller
|
||||
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMContainerRegistryInfo(AzureRMModuleBase):
|
||||
def __init__(self):
|
||||
# define user inputs into argument
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str'
|
||||
),
|
||||
tags=dict(
|
||||
type='list'
|
||||
),
|
||||
retrieve_credentials=dict(
|
||||
type='bool',
|
||||
default=False
|
||||
)
|
||||
)
|
||||
# store the results of the module operation
|
||||
self.results = dict(
|
||||
changed=False
|
||||
)
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.retrieve_credentials = False
|
||||
|
||||
super(AzureRMContainerRegistryInfo, self).__init__(self.module_arg_spec, supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
is_old_facts = self.module._name == 'azure_rm_containerregistry_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_containerregistry_facts' module has been renamed to 'azure_rm_containerregistry_info'", version='2.13')
|
||||
|
||||
for key in self.module_arg_spec:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
if self.name:
|
||||
self.results['registries'] = self.get()
|
||||
elif self.resource_group:
|
||||
self.results['registries'] = self.list_by_resource_group()
|
||||
else:
|
||||
self.results['registries'] = self.list_all()
|
||||
|
||||
return self.results
|
||||
|
||||
def get(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.containerregistry_client.registries.get(resource_group_name=self.resource_group,
|
||||
registry_name=self.name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get facts for Registries.')
|
||||
|
||||
if response is not None:
|
||||
if self.has_tags(response.tags, self.tags):
|
||||
results.append(self.format_item(response))
|
||||
|
||||
return results
|
||||
|
||||
def list_all(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.containerregistry_client.registries.list()
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.fail('Could not get facts for Registries.')
|
||||
|
||||
if response is not None:
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(self.format_item(item))
|
||||
return results
|
||||
|
||||
def list_by_resource_group(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.containerregistry_client.registries.list_by_resource_group(resource_group_name=self.resource_group)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.fail('Could not get facts for Registries.')
|
||||
|
||||
if response is not None:
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(self.format_item(item))
|
||||
return results
|
||||
|
||||
def format_item(self, item):
|
||||
d = item.as_dict()
|
||||
resource_group = d['id'].split('resourceGroups/')[1].split('/')[0]
|
||||
name = d['name']
|
||||
credentials = {}
|
||||
admin_user_enabled = d['admin_user_enabled']
|
||||
|
||||
if self.retrieve_credentials and admin_user_enabled:
|
||||
credentials = self.containerregistry_client.registries.list_credentials(resource_group, name).as_dict()
|
||||
for index in range(len(credentials['passwords'])):
|
||||
password = credentials['passwords'][index]
|
||||
if password['name'] == 'password':
|
||||
credentials['password'] = password['value']
|
||||
elif password['name'] == 'password2':
|
||||
credentials['password2'] = password['value']
|
||||
credentials.pop('passwords')
|
||||
|
||||
d = {
|
||||
'resource_group': resource_group,
|
||||
'name': d['name'],
|
||||
'location': d['location'],
|
||||
'admin_user_enabled': admin_user_enabled,
|
||||
'sku': d['sku']['tier'].lower(),
|
||||
'provisioning_state': d['provisioning_state'],
|
||||
'login_server': d['login_server'],
|
||||
'id': d['id'],
|
||||
'tags': d.get('tags', None),
|
||||
'credentials': credentials
|
||||
}
|
||||
return d
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMContainerRegistryInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,587 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_cosmosdbaccount
|
||||
version_added: "2.8"
|
||||
short_description: Manage Azure Database Account instance
|
||||
description:
|
||||
- Create, update and delete instance of Azure Database Account.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- Name of an Azure resource group.
|
||||
required: True
|
||||
name:
|
||||
description:
|
||||
- Cosmos DB database account name.
|
||||
required: True
|
||||
location:
|
||||
description:
|
||||
- The location of the resource group to which the resource belongs.
|
||||
- Required when I(state=present).
|
||||
kind:
|
||||
description:
|
||||
- Indicates the type of database account. This can only be set at database account creation.
|
||||
choices:
|
||||
- 'global_document_db'
|
||||
- 'mongo_db'
|
||||
- 'parse'
|
||||
consistency_policy:
|
||||
description:
|
||||
- The consistency policy for the Cosmos DB account.
|
||||
suboptions:
|
||||
default_consistency_level:
|
||||
description:
|
||||
- The default consistency level and configuration settings of the Cosmos DB account.
|
||||
- Required when I(state=present).
|
||||
choices:
|
||||
- 'eventual'
|
||||
- 'session'
|
||||
- 'bounded_staleness'
|
||||
- 'strong'
|
||||
- 'consistent_prefix'
|
||||
max_staleness_prefix:
|
||||
description:
|
||||
- When used with the Bounded Staleness consistency level, this value represents the number of stale requests tolerated.
|
||||
- Accepted range for this value is 1 - 2,147,483,647. Required when I(default_consistency_policy=bounded_staleness).
|
||||
type: int
|
||||
max_interval_in_seconds:
|
||||
description:
|
||||
- When used with the Bounded Staleness consistency level, this value represents the time amount of staleness (in seconds) tolerated.
|
||||
- Accepted range for this value is 5 - 86400. Required when I(default_consistency_policy=bounded_staleness).
|
||||
type: int
|
||||
geo_rep_locations:
|
||||
description:
|
||||
- An array that contains the georeplication locations enabled for the Cosmos DB account.
|
||||
- Required when I(state=present).
|
||||
type: list
|
||||
suboptions:
|
||||
name:
|
||||
description:
|
||||
- The name of the region.
|
||||
failover_priority:
|
||||
description:
|
||||
- The failover priority of the region. A failover priority of 0 indicates a write region.
|
||||
- The maximum value for a failover priority = (total number of regions - 1).
|
||||
- Failover priority values must be unique for each of the regions in which the database account exists.
|
||||
type: int
|
||||
database_account_offer_type:
|
||||
description:
|
||||
- Database account offer type, for example I(Standard)
|
||||
- Required when I(state=present).
|
||||
ip_range_filter:
|
||||
description:
|
||||
- Cosmos DB Firewall support. This value specifies the set of IP addresses or IP address ranges.
|
||||
- In CIDR form to be included as the allowed list of client IPs for a given database account.
|
||||
- IP addresses/ranges must be comma separated and must not contain any spaces.
|
||||
is_virtual_network_filter_enabled:
|
||||
description:
|
||||
- Flag to indicate whether to enable/disable Virtual Network ACL rules.
|
||||
type: bool
|
||||
enable_automatic_failover:
|
||||
description:
|
||||
- Enables automatic failover of the write region in the rare event that the region is unavailable due to an outage.
|
||||
- Automatic failover will result in a new write region for the account and is chosen based on the failover priorities configured for the account.
|
||||
type: bool
|
||||
enable_cassandra:
|
||||
description:
|
||||
- Enable Cassandra.
|
||||
type: bool
|
||||
enable_table:
|
||||
description:
|
||||
- Enable Table.
|
||||
type: bool
|
||||
enable_gremlin:
|
||||
description:
|
||||
- Enable Gremlin.
|
||||
type: bool
|
||||
virtual_network_rules:
|
||||
description:
|
||||
- List of Virtual Network ACL rules configured for the Cosmos DB account.
|
||||
type: list
|
||||
suboptions:
|
||||
subnet:
|
||||
description:
|
||||
- It can be a string containing resource id of a subnet.
|
||||
- It can be a dictionary containing 'resource_group', 'virtual_network_name' and 'subnet_name'
|
||||
ignore_missing_vnet_service_endpoint:
|
||||
description:
|
||||
- Create Cosmos DB account without existing virtual network service endpoint.
|
||||
type: bool
|
||||
|
||||
enable_multiple_write_locations:
|
||||
description:
|
||||
- Enables the account to write in multiple locations
|
||||
type: bool
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the Database Account.
|
||||
- Use C(present) to create or update an Database Account and C(absent) to delete it.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create Cosmos DB Account - min
|
||||
azure_rm_cosmosdbaccount:
|
||||
resource_group: myResourceGroup
|
||||
name: myDatabaseAccount
|
||||
location: westus
|
||||
geo_rep_locations:
|
||||
- name: southcentralus
|
||||
failover_priority: 0
|
||||
database_account_offer_type: Standard
|
||||
|
||||
- name: Create Cosmos DB Account - max
|
||||
azure_rm_cosmosdbaccount:
|
||||
resource_group: myResourceGroup
|
||||
name: myDatabaseAccount
|
||||
location: westus
|
||||
kind: mongo_db
|
||||
geo_rep_locations:
|
||||
- name: southcentralus
|
||||
failover_priority: 0
|
||||
database_account_offer_type: Standard
|
||||
ip_range_filter: 10.10.10.10
|
||||
enable_multiple_write_locations: yes
|
||||
virtual_network_rules:
|
||||
- subnet: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/myVi
|
||||
rtualNetwork/subnets/mySubnet"
|
||||
consistency_policy:
|
||||
default_consistency_level: bounded_staleness
|
||||
max_staleness_prefix: 10
|
||||
max_interval_in_seconds: 1000
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- The unique resource identifier of the database account.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DocumentDB/databaseAccounts/myData
|
||||
baseAccount"
|
||||
'''
|
||||
|
||||
import time
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
from ansible.module_utils.common.dict_transformations import _snake_to_camel
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrest.polling import LROPoller
|
||||
from msrestazure.azure_operation import AzureOperationPoller
|
||||
from azure.mgmt.cosmosdb import CosmosDB
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class Actions:
|
||||
NoAction, Create, Update, Delete = range(4)
|
||||
|
||||
|
||||
class AzureRMCosmosDBAccount(AzureRMModuleBase):
|
||||
"""Configuration class for an Azure RM Database Account resource"""
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
location=dict(
|
||||
type='str'
|
||||
),
|
||||
kind=dict(
|
||||
type='str',
|
||||
choices=['global_document_db',
|
||||
'mongo_db',
|
||||
'parse']
|
||||
),
|
||||
consistency_policy=dict(
|
||||
type='dict',
|
||||
options=dict(
|
||||
default_consistency_level=dict(
|
||||
type='str',
|
||||
choices=['eventual',
|
||||
'session',
|
||||
'bounded_staleness',
|
||||
'strong',
|
||||
'consistent_prefix']
|
||||
),
|
||||
max_staleness_prefix=dict(
|
||||
type='int'
|
||||
),
|
||||
max_interval_in_seconds=dict(
|
||||
type='int'
|
||||
)
|
||||
)
|
||||
),
|
||||
geo_rep_locations=dict(
|
||||
type='list',
|
||||
options=dict(
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
failover_priority=dict(
|
||||
type='int',
|
||||
required=True
|
||||
)
|
||||
)
|
||||
),
|
||||
database_account_offer_type=dict(
|
||||
type='str'
|
||||
),
|
||||
ip_range_filter=dict(
|
||||
type='str'
|
||||
),
|
||||
is_virtual_network_filter_enabled=dict(
|
||||
type='bool'
|
||||
),
|
||||
enable_automatic_failover=dict(
|
||||
type='bool'
|
||||
),
|
||||
enable_cassandra=dict(
|
||||
type='bool'
|
||||
),
|
||||
enable_table=dict(
|
||||
type='bool'
|
||||
),
|
||||
enable_gremlin=dict(
|
||||
type='bool'
|
||||
),
|
||||
virtual_network_rules=dict(
|
||||
type='list',
|
||||
options=dict(
|
||||
id=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
ignore_missing_vnet_service_endpoint=dict(
|
||||
type='bool'
|
||||
)
|
||||
)
|
||||
),
|
||||
enable_multiple_write_locations=dict(
|
||||
type='bool'
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
)
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.parameters = dict()
|
||||
|
||||
self.results = dict(changed=False)
|
||||
self.mgmt_client = None
|
||||
self.state = None
|
||||
self.to_do = Actions.NoAction
|
||||
|
||||
super(AzureRMCosmosDBAccount, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=True)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
"""Main module execution method"""
|
||||
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
if hasattr(self, key):
|
||||
setattr(self, key, kwargs[key])
|
||||
elif kwargs[key] is not None:
|
||||
self.parameters[key] = kwargs[key]
|
||||
|
||||
kind = self.parameters.get('kind')
|
||||
if kind == 'global_document_db':
|
||||
self.parameters['kind'] = 'GlobalDocumentDB'
|
||||
elif kind == 'mongo_db':
|
||||
self.parameters['kind'] = 'MongoDB'
|
||||
elif kind == 'parse':
|
||||
self.parameters['kind'] = 'Parse'
|
||||
|
||||
dict_camelize(self.parameters, ['consistency_policy', 'default_consistency_level'], True)
|
||||
dict_rename(self.parameters, ['geo_rep_locations', 'name'], 'location_name')
|
||||
dict_rename(self.parameters, ['geo_rep_locations'], 'locations')
|
||||
self.parameters['capabilities'] = []
|
||||
if self.parameters.pop('enable_cassandra', False):
|
||||
self.parameters['capabilities'].append({'name': 'EnableCassandra'})
|
||||
if self.parameters.pop('enable_table', False):
|
||||
self.parameters['capabilities'].append({'name': 'EnableTable'})
|
||||
if self.parameters.pop('enable_gremlin', False):
|
||||
self.parameters['capabilities'].append({'name': 'EnableGremlin'})
|
||||
|
||||
for rule in self.parameters.get('virtual_network_rules', []):
|
||||
subnet = rule.pop('subnet')
|
||||
if isinstance(subnet, dict):
|
||||
virtual_network_name = subnet.get('virtual_network_name')
|
||||
subnet_name = subnet.get('subnet_name')
|
||||
resource_group_name = subnet.get('resource_group', self.resource_group)
|
||||
template = "/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/virtualNetworks/{2}/subnets/{3}"
|
||||
subnet = template.format(self.subscription_id, resource_group_name, virtual_network_name, subnet_name)
|
||||
rule['id'] = subnet
|
||||
|
||||
response = None
|
||||
|
||||
self.mgmt_client = self.get_mgmt_svc_client(CosmosDB,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
|
||||
resource_group = self.get_resource_group(self.resource_group)
|
||||
|
||||
if "location" not in self.parameters:
|
||||
self.parameters["location"] = resource_group.location
|
||||
|
||||
old_response = self.get_databaseaccount()
|
||||
|
||||
if not old_response:
|
||||
self.log("Database Account instance doesn't exist")
|
||||
if self.state == 'absent':
|
||||
self.log("Old instance didn't exist")
|
||||
else:
|
||||
self.to_do = Actions.Create
|
||||
else:
|
||||
self.log("Database Account instance already exists")
|
||||
if self.state == 'absent':
|
||||
self.to_do = Actions.Delete
|
||||
elif self.state == 'present':
|
||||
old_response['locations'] = old_response['failover_policies']
|
||||
if not default_compare(self.parameters, old_response, '', self.results):
|
||||
self.to_do = Actions.Update
|
||||
|
||||
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
|
||||
self.log("Need to Create / Update the Database Account instance")
|
||||
|
||||
if self.check_mode:
|
||||
self.results['changed'] = True
|
||||
return self.results
|
||||
|
||||
response = self.create_update_databaseaccount()
|
||||
|
||||
self.results['changed'] = True
|
||||
self.log("Creation / Update done")
|
||||
elif self.to_do == Actions.Delete:
|
||||
self.log("Database Account instance deleted")
|
||||
self.results['changed'] = True
|
||||
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
self.delete_databaseaccount()
|
||||
else:
|
||||
self.log("Database Account instance unchanged")
|
||||
self.results['changed'] = False
|
||||
response = old_response
|
||||
|
||||
if self.state == 'present':
|
||||
self.results.update({'id': response.get('id', None)})
|
||||
return self.results
|
||||
|
||||
def create_update_databaseaccount(self):
|
||||
'''
|
||||
Creates or updates Database Account with the specified configuration.
|
||||
|
||||
:return: deserialized Database Account instance state dictionary
|
||||
'''
|
||||
self.log("Creating / Updating the Database Account instance {0}".format(self.name))
|
||||
|
||||
try:
|
||||
response = self.mgmt_client.database_accounts.create_or_update(resource_group_name=self.resource_group,
|
||||
account_name=self.name,
|
||||
create_update_parameters=self.parameters)
|
||||
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
|
||||
response = self.get_poller_result(response)
|
||||
|
||||
except CloudError as exc:
|
||||
self.log('Error attempting to create the Database Account instance.')
|
||||
self.fail("Error creating the Database Account instance: {0}".format(str(exc)))
|
||||
return response.as_dict()
|
||||
|
||||
def delete_databaseaccount(self):
|
||||
'''
|
||||
Deletes specified Database Account instance in the specified subscription and resource group.
|
||||
|
||||
:return: True
|
||||
'''
|
||||
self.log("Deleting the Database Account instance {0}".format(self.name))
|
||||
try:
|
||||
response = self.mgmt_client.database_accounts.delete(resource_group_name=self.resource_group,
|
||||
account_name=self.name)
|
||||
|
||||
# This currently doesn't work as there is a bug in SDK / Service
|
||||
# if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
|
||||
# response = self.get_poller_result(response)
|
||||
except CloudError as e:
|
||||
self.log('Error attempting to delete the Database Account instance.')
|
||||
self.fail("Error deleting the Database Account instance: {0}".format(str(e)))
|
||||
|
||||
return True
|
||||
|
||||
def get_databaseaccount(self):
|
||||
'''
|
||||
Gets the properties of the specified Database Account.
|
||||
|
||||
:return: deserialized Database Account instance state dictionary
|
||||
'''
|
||||
self.log("Checking if the Database Account instance {0} is present".format(self.name))
|
||||
found = False
|
||||
try:
|
||||
response = self.mgmt_client.database_accounts.get(resource_group_name=self.resource_group,
|
||||
account_name=self.name)
|
||||
found = True
|
||||
self.log("Response : {0}".format(response))
|
||||
self.log("Database Account instance : {0} found".format(response.name))
|
||||
except CloudError as e:
|
||||
self.log('Did not find the Database Account instance.')
|
||||
if found is True:
|
||||
return response.as_dict()
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def default_compare(new, old, path, result):
|
||||
if new is None:
|
||||
return True
|
||||
elif isinstance(new, dict):
|
||||
if not isinstance(old, dict):
|
||||
result['compare'] = 'changed [' + path + '] old dict is null'
|
||||
return False
|
||||
for k in new.keys():
|
||||
if not default_compare(new.get(k), old.get(k, None), path + '/' + k, result):
|
||||
return False
|
||||
return True
|
||||
elif isinstance(new, list):
|
||||
if not isinstance(old, list) or len(new) != len(old):
|
||||
result['compare'] = 'changed [' + path + '] length is different or null'
|
||||
return False
|
||||
elif len(old) == 0:
|
||||
return True
|
||||
elif isinstance(old[0], dict):
|
||||
key = None
|
||||
if 'id' in old[0] and 'id' in new[0]:
|
||||
key = 'id'
|
||||
elif 'name' in old[0] and 'name' in new[0]:
|
||||
key = 'name'
|
||||
else:
|
||||
key = list(old[0])[0]
|
||||
new = sorted(new, key=lambda x: x.get(key, ''))
|
||||
old = sorted(old, key=lambda x: x.get(key, ''))
|
||||
else:
|
||||
new = sorted(new)
|
||||
old = sorted(old)
|
||||
for i in range(len(new)):
|
||||
if not default_compare(new[i], old[i], path + '/*', result):
|
||||
return False
|
||||
return True
|
||||
else:
|
||||
if path == '/location' or path.endswith('location_name'):
|
||||
new = new.replace(' ', '').lower()
|
||||
old = new.replace(' ', '').lower()
|
||||
if new == old:
|
||||
return True
|
||||
else:
|
||||
result['compare'] = 'changed [' + path + '] ' + str(new) + ' != ' + str(old)
|
||||
return False
|
||||
|
||||
|
||||
def dict_camelize(d, path, camelize_first):
|
||||
if isinstance(d, list):
|
||||
for i in range(len(d)):
|
||||
dict_camelize(d[i], path, camelize_first)
|
||||
elif isinstance(d, dict):
|
||||
if len(path) == 1:
|
||||
old_value = d.get(path[0], None)
|
||||
if old_value is not None:
|
||||
d[path[0]] = _snake_to_camel(old_value, camelize_first)
|
||||
else:
|
||||
sd = d.get(path[0], None)
|
||||
if sd is not None:
|
||||
dict_camelize(sd, path[1:], camelize_first)
|
||||
|
||||
|
||||
def dict_upper(d, path):
|
||||
if isinstance(d, list):
|
||||
for i in range(len(d)):
|
||||
dict_upper(d[i], path)
|
||||
elif isinstance(d, dict):
|
||||
if len(path) == 1:
|
||||
old_value = d.get(path[0], None)
|
||||
if old_value is not None:
|
||||
d[path[0]] = old_value.upper()
|
||||
else:
|
||||
sd = d.get(path[0], None)
|
||||
if sd is not None:
|
||||
dict_upper(sd, path[1:])
|
||||
|
||||
|
||||
def dict_rename(d, path, new_name):
|
||||
if isinstance(d, list):
|
||||
for i in range(len(d)):
|
||||
dict_rename(d[i], path, new_name)
|
||||
elif isinstance(d, dict):
|
||||
if len(path) == 1:
|
||||
old_value = d.pop(path[0], None)
|
||||
if old_value is not None:
|
||||
d[new_name] = old_value
|
||||
else:
|
||||
sd = d.get(path[0], None)
|
||||
if sd is not None:
|
||||
dict_rename(sd, path[1:], new_name)
|
||||
|
||||
|
||||
def dict_expand(d, path, outer_dict_name):
|
||||
if isinstance(d, list):
|
||||
for i in range(len(d)):
|
||||
dict_expand(d[i], path, outer_dict_name)
|
||||
elif isinstance(d, dict):
|
||||
if len(path) == 1:
|
||||
old_value = d.pop(path[0], None)
|
||||
if old_value is not None:
|
||||
d[outer_dict_name] = d.get(outer_dict_name, {})
|
||||
d[outer_dict_name] = old_value
|
||||
else:
|
||||
sd = d.get(path[0], None)
|
||||
if sd is not None:
|
||||
dict_expand(sd, path[1:], outer_dict_name)
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution"""
|
||||
AzureRMCosmosDBAccount()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,520 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_cosmosdbaccount_info
|
||||
version_added: "2.9"
|
||||
short_description: Get Azure Cosmos DB Account facts
|
||||
description:
|
||||
- Get facts of Azure Cosmos DB Account.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- Name of an Azure resource group.
|
||||
name:
|
||||
description:
|
||||
- Cosmos DB database account name.
|
||||
tags:
|
||||
description:
|
||||
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
|
||||
retrieve_keys:
|
||||
description:
|
||||
- Retrieve keys and connection strings.
|
||||
type: str
|
||||
choices:
|
||||
- all
|
||||
- readonly
|
||||
retrieve_connection_strings:
|
||||
description:
|
||||
- Retrieve connection strings.
|
||||
type: bool
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get instance of Database Account
|
||||
azure_rm_cosmosdbaccount_info:
|
||||
resource_group: myResourceGroup
|
||||
name: testaccount
|
||||
|
||||
- name: List instances of Database Account
|
||||
azure_rm_cosmosdbaccousnt_info:
|
||||
resource_group: myResourceGroup
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
accounts:
|
||||
description: A list of dictionaries containing facts for Database Account.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- The unique resource identifier of the database account.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DocumentDB/databaseAccount
|
||||
s/testaccount"
|
||||
resource_group:
|
||||
description:
|
||||
- Name of an Azure resource group.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myResourceGroup
|
||||
name:
|
||||
description:
|
||||
- The name of the database account.
|
||||
returned: always
|
||||
type: str
|
||||
sample: testaccount
|
||||
location:
|
||||
description:
|
||||
- The location of the resource group to which the resource belongs.
|
||||
returned: always
|
||||
type: str
|
||||
sample: westus
|
||||
kind:
|
||||
description:
|
||||
- Indicates the type of database account.
|
||||
returned: always
|
||||
type: str
|
||||
sample: global_document_db
|
||||
consistency_policy:
|
||||
description:
|
||||
- Consistency policy.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
default_consistency_level:
|
||||
description:
|
||||
- Default consistency level.
|
||||
returned: always
|
||||
type: str
|
||||
sample: session
|
||||
max_interval_in_seconds:
|
||||
description:
|
||||
- Maximum interval in seconds.
|
||||
returned: always
|
||||
type: int
|
||||
sample: 5
|
||||
max_staleness_prefix:
|
||||
description:
|
||||
- Maximum staleness prefix.
|
||||
returned: always
|
||||
type: int
|
||||
sample: 100
|
||||
failover_policies:
|
||||
description:
|
||||
- The list of new failover policies for the failover priority change.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
name:
|
||||
description:
|
||||
- Location name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: eastus
|
||||
failover_priority:
|
||||
description:
|
||||
- Failover priority.
|
||||
returned: always
|
||||
type: int
|
||||
sample: 0
|
||||
id:
|
||||
description:
|
||||
- Read location ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: testaccount-eastus
|
||||
read_locations:
|
||||
description:
|
||||
- Read locations.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
name:
|
||||
description:
|
||||
- Location name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: eastus
|
||||
failover_priority:
|
||||
description:
|
||||
- Failover priority.
|
||||
returned: always
|
||||
type: int
|
||||
sample: 0
|
||||
id:
|
||||
description:
|
||||
- Read location ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: testaccount-eastus
|
||||
document_endpoint:
|
||||
description:
|
||||
- Document endpoint.
|
||||
returned: always
|
||||
type: str
|
||||
sample: https://testaccount-eastus.documents.azure.com:443/
|
||||
provisioning_state:
|
||||
description:
|
||||
- Provisioning state.
|
||||
returned: always
|
||||
type: str
|
||||
sample: Succeeded
|
||||
write_locations:
|
||||
description:
|
||||
- Write locations.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
name:
|
||||
description:
|
||||
- Location name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: eastus
|
||||
failover_priority:
|
||||
description:
|
||||
- Failover priority.
|
||||
returned: always
|
||||
type: int
|
||||
sample: 0
|
||||
id:
|
||||
description:
|
||||
- Read location ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: testaccount-eastus
|
||||
document_endpoint:
|
||||
description:
|
||||
- Document endpoint.
|
||||
returned: always
|
||||
type: str
|
||||
sample: https://testaccount-eastus.documents.azure.com:443/
|
||||
provisioning_state:
|
||||
description:
|
||||
- Provisioning state.
|
||||
returned: always
|
||||
type: str
|
||||
sample: Succeeded
|
||||
database_account_offer_type:
|
||||
description:
|
||||
- Offer type.
|
||||
returned: always
|
||||
type: str
|
||||
sample: Standard
|
||||
ip_range_filter:
|
||||
description:
|
||||
- Enable IP range filter.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 10.10.10.10
|
||||
is_virtual_network_filter_enabled:
|
||||
description:
|
||||
- Enable virtual network filter.
|
||||
returned: always
|
||||
type: bool
|
||||
sample: true
|
||||
enable_automatic_failover:
|
||||
description:
|
||||
- Enable automatic failover.
|
||||
returned: always
|
||||
type: bool
|
||||
sample: true
|
||||
enable_cassandra:
|
||||
description:
|
||||
- Enable Cassandra.
|
||||
returned: always
|
||||
type: bool
|
||||
sample: true
|
||||
enable_table:
|
||||
description:
|
||||
- Enable Table.
|
||||
returned: always
|
||||
type: bool
|
||||
sample: true
|
||||
enable_gremlin:
|
||||
description:
|
||||
- Enable Gremlin.
|
||||
returned: always
|
||||
type: bool
|
||||
sample: true
|
||||
virtual_network_rules:
|
||||
description:
|
||||
- List of Virtual Network ACL rules configured for the Cosmos DB account.
|
||||
type: list
|
||||
contains:
|
||||
subnet:
|
||||
description:
|
||||
- Resource id of a subnet.
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNet
|
||||
works/testvnet/subnets/testsubnet1"
|
||||
ignore_missing_vnet_service_endpoint:
|
||||
description:
|
||||
- Create Cosmos DB account without existing virtual network service endpoint.
|
||||
type: bool
|
||||
enable_multiple_write_locations:
|
||||
description:
|
||||
- Enable multiple write locations.
|
||||
returned: always
|
||||
type: bool
|
||||
sample: true
|
||||
document_endpoint:
|
||||
description:
|
||||
- Document endpoint.
|
||||
returned: always
|
||||
type: str
|
||||
sample: https://testaccount.documents.azure.com:443/
|
||||
provisioning_state:
|
||||
description:
|
||||
- Provisioning state of Cosmos DB.
|
||||
returned: always
|
||||
type: str
|
||||
sample: Succeeded
|
||||
primary_master_key:
|
||||
description:
|
||||
- Primary master key.
|
||||
returned: when requested
|
||||
type: str
|
||||
sample: UIWoYD4YaD4LxW6k3Jy69qcHDMLX4aSttECQkEcwWF1RflLd6crWSGJs0R9kJwujehtfLGeQx4ISVSJfTpJkYw==
|
||||
secondary_master_key:
|
||||
description:
|
||||
- Primary master key.
|
||||
returned: when requested
|
||||
type: str
|
||||
sample: UIWoYD4YaD4LxW6k3Jy69qcHDMLX4aSttECQkEcwWF1RflLd6crWSGJs0R9kJwujehtfLGeQx4ISVSJfTpJkYw==
|
||||
primary_readonly_master_key:
|
||||
description:
|
||||
- Primary master key.
|
||||
returned: when requested
|
||||
type: str
|
||||
sample: UIWoYD4YaD4LxW6k3Jy69qcHDMLX4aSttECQkEcwWF1RflLd6crWSGJs0R9kJwujehtfLGeQx4ISVSJfTpJkYw==
|
||||
secondary_readonly_master_key:
|
||||
description:
|
||||
- Primary master key.
|
||||
returned: when requested
|
||||
type: str
|
||||
sample: UIWoYD4YaD4LxW6k3Jy69qcHDMLX4aSttECQkEcwWF1RflLd6crWSGJs0R9kJwujehtfLGeQx4ISVSJfTpJkYw==
|
||||
connection_strings:
|
||||
description:
|
||||
- List of connection strings.
|
||||
type: list
|
||||
returned: when requested
|
||||
contains:
|
||||
connection_string:
|
||||
description:
|
||||
- Description of connection string.
|
||||
type: str
|
||||
returned: always
|
||||
sample: Primary SQL Connection String
|
||||
description:
|
||||
description:
|
||||
- Connection string.
|
||||
type: str
|
||||
returned: always
|
||||
sample: "AccountEndpoint=https://testaccount.documents.azure.com:443/;AccountKey=fSEjathnk6ZeBTrXkud9j5kfhtSEQ
|
||||
q3dpJxJga76h9BZkK2BJJrDzSO6DDn6yKads017OZBZ1YZWyq1cW4iuvA=="
|
||||
tags:
|
||||
description:
|
||||
- Tags assigned to the resource. Dictionary of "string":"string" pairs.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: { "tag1":"abc" }
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
from ansible.module_utils.common.dict_transformations import _camel_to_snake
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.mgmt.cosmosdb import CosmosDB
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMCosmosDBAccountInfo(AzureRMModuleBase):
|
||||
def __init__(self):
|
||||
# define user inputs into argument
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str'
|
||||
),
|
||||
name=dict(
|
||||
type='str'
|
||||
),
|
||||
tags=dict(
|
||||
type='list'
|
||||
),
|
||||
retrieve_keys=dict(
|
||||
type='str',
|
||||
choices=['all', 'readonly']
|
||||
),
|
||||
retrieve_connection_strings=dict(
|
||||
type='bool'
|
||||
)
|
||||
)
|
||||
# store the results of the module operation
|
||||
self.results = dict(
|
||||
changed=False
|
||||
)
|
||||
self.mgmt_client = None
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.tags = None
|
||||
self.retrieve_keys = None
|
||||
self.retrieve_connection_strings = None
|
||||
|
||||
super(AzureRMCosmosDBAccountInfo, self).__init__(self.module_arg_spec, supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
is_old_facts = self.module._name == 'azure_rm_cosmosdbaccount_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_cosmosdbaccount_facts' module has been renamed to 'azure_rm_cosmosdbaccount_info'", version='2.13')
|
||||
|
||||
for key in self.module_arg_spec:
|
||||
setattr(self, key, kwargs[key])
|
||||
self.mgmt_client = self.get_mgmt_svc_client(CosmosDB,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
|
||||
if self.name is not None:
|
||||
self.results['accounts'] = self.get()
|
||||
elif self.resource_group is not None:
|
||||
self.results['accounts'] = self.list_all()
|
||||
return self.results
|
||||
|
||||
def get(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mgmt_client.database_accounts.get(resource_group_name=self.resource_group,
|
||||
account_name=self.name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get facts for Database Account.')
|
||||
|
||||
if response and self.has_tags(response.tags, self.tags):
|
||||
results.append(self.format_response(response))
|
||||
|
||||
return results
|
||||
|
||||
def list_by_resource_group(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mgmt_client.database_accounts.list_by_resource_group(resource_group_name=self.resource_group)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get facts for Database Account.')
|
||||
|
||||
if response is not None:
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(self.format_response(item))
|
||||
|
||||
return results
|
||||
|
||||
def list_all(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mgmt_client.database_accounts.list()
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get facts for Database Account.')
|
||||
|
||||
if response is not None:
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(self.format_response(item))
|
||||
|
||||
return results
|
||||
|
||||
def format_response(self, item):
|
||||
d = item.as_dict()
|
||||
d = {
|
||||
'id': d.get('id'),
|
||||
'resource_group': self.parse_resource_to_dict(d.get('id')).get('resource_group'),
|
||||
'name': d.get('name', None),
|
||||
'location': d.get('location', '').replace(' ', '').lower(),
|
||||
'kind': _camel_to_snake(d.get('kind', None)),
|
||||
'consistency_policy': {'default_consistency_level': _camel_to_snake(d['consistency_policy']['default_consistency_level']),
|
||||
'max_interval_in_seconds': d['consistency_policy']['max_interval_in_seconds'],
|
||||
'max_staleness_prefix': d['consistency_policy']['max_staleness_prefix']},
|
||||
'failover_policies': [{'name': fp['location_name'].replace(' ', '').lower(),
|
||||
'failover_priority': fp['failover_priority'],
|
||||
'id': fp['id']} for fp in d['failover_policies']],
|
||||
'read_locations': [{'name': rl['location_name'].replace(' ', '').lower(),
|
||||
'failover_priority': rl['failover_priority'],
|
||||
'id': rl['id'],
|
||||
'document_endpoint': rl['document_endpoint'],
|
||||
'provisioning_state': rl['provisioning_state']} for rl in d['read_locations']],
|
||||
'write_locations': [{'name': wl['location_name'].replace(' ', '').lower(),
|
||||
'failover_priority': wl['failover_priority'],
|
||||
'id': wl['id'],
|
||||
'document_endpoint': wl['document_endpoint'],
|
||||
'provisioning_state': wl['provisioning_state']} for wl in d['write_locations']],
|
||||
'database_account_offer_type': d.get('database_account_offer_type'),
|
||||
'ip_range_filter': d['ip_range_filter'],
|
||||
'is_virtual_network_filter_enabled': d.get('is_virtual_network_filter_enabled'),
|
||||
'enable_automatic_failover': d.get('enable_automatic_failover'),
|
||||
'enable_cassandra': 'EnableCassandra' in d.get('capabilities', []),
|
||||
'enable_table': 'EnableTable' in d.get('capabilities', []),
|
||||
'enable_gremlin': 'EnableGremlin' in d.get('capabilities', []),
|
||||
'virtual_network_rules': d.get('virtual_network_rules'),
|
||||
'enable_multiple_write_locations': d.get('enable_multiple_write_locations'),
|
||||
'document_endpoint': d.get('document_endpoint'),
|
||||
'provisioning_state': d.get('provisioning_state'),
|
||||
'tags': d.get('tags', None)
|
||||
}
|
||||
|
||||
if self.retrieve_keys == 'all':
|
||||
keys = self.mgmt_client.database_accounts.list_keys(resource_group_name=self.resource_group,
|
||||
account_name=self.name)
|
||||
d['primary_master_key'] = keys.primary_master_key
|
||||
d['secondary_master_key'] = keys.secondary_master_key
|
||||
d['primary_readonly_master_key'] = keys.primary_readonly_master_key
|
||||
d['secondary_readonly_master_key'] = keys.secondary_readonly_master_key
|
||||
elif self.retrieve_keys == 'readonly':
|
||||
keys = self.mgmt_client.database_accounts.get_read_only_keys(resource_group_name=self.resource_group,
|
||||
account_name=self.name)
|
||||
d['primary_readonly_master_key'] = keys.primary_readonly_master_key
|
||||
d['secondary_readonly_master_key'] = keys.secondary_readonly_master_key
|
||||
if self.retrieve_connection_strings:
|
||||
connection_strings = self.mgmt_client.database_accounts.list_connection_strings(resource_group_name=self.resource_group,
|
||||
account_name=self.name)
|
||||
d['connection_strings'] = connection_strings.as_dict()
|
||||
return d
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMCosmosDBAccountInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,702 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_deployment
|
||||
|
||||
short_description: Create or destroy Azure Resource Manager template deployments
|
||||
|
||||
version_added: "2.1"
|
||||
|
||||
description:
|
||||
- Create or destroy Azure Resource Manager template deployments via the Azure SDK for Python.
|
||||
- You can find some quick start templates in GitHub here U(https://github.com/azure/azure-quickstart-templates).
|
||||
- For more information on Azure Resource Manager templates see U(https://azure.microsoft.com/en-us/documentation/articles/resource-group-template-deploy/).
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The resource group name to use or create to host the deployed template.
|
||||
required: true
|
||||
aliases:
|
||||
- resource_group_name
|
||||
name:
|
||||
description:
|
||||
- The name of the deployment to be tracked in the resource group deployment history.
|
||||
- Re-using a deployment name will overwrite the previous value in the resource group's deployment history.
|
||||
default: ansible-arm
|
||||
aliases:
|
||||
- deployment_name
|
||||
location:
|
||||
description:
|
||||
- The geo-locations in which the resource group will be located.
|
||||
default: westus
|
||||
deployment_mode:
|
||||
description:
|
||||
- In incremental mode, resources are deployed without deleting existing resources that are not included in the template.
|
||||
- In complete mode resources are deployed and existing resources in the resource group not included in the template are deleted.
|
||||
default: incremental
|
||||
choices:
|
||||
- complete
|
||||
- incremental
|
||||
template:
|
||||
description:
|
||||
- A hash containing the templates inline. This parameter is mutually exclusive with I(template_link).
|
||||
- Either I(template) or I(template_link) is required if I(state=present).
|
||||
type: dict
|
||||
template_link:
|
||||
description:
|
||||
- Uri of file containing the template body. This parameter is mutually exclusive with I(template).
|
||||
- Either I(template) or I(template_link) is required if I(state=present).
|
||||
parameters:
|
||||
description:
|
||||
- A hash of all the required template variables for the deployment template. This parameter is mutually exclusive with I(parameters_link).
|
||||
- Either I(parameters_link) or I(parameters) is required if I(state=present).
|
||||
type: dict
|
||||
parameters_link:
|
||||
description:
|
||||
- Uri of file containing the parameters body. This parameter is mutually exclusive with I(parameters).
|
||||
- Either I(parameters_link) or I(parameters) is required if I(state=present).
|
||||
wait_for_deployment_completion:
|
||||
description:
|
||||
- Whether or not to block until the deployment has completed.
|
||||
type: bool
|
||||
default: 'yes'
|
||||
wait_for_deployment_polling_period:
|
||||
description:
|
||||
- Time (in seconds) to wait between polls when waiting for deployment completion.
|
||||
default: 10
|
||||
state:
|
||||
description:
|
||||
- If I(state=present), template will be created.
|
||||
- If I(state=present) and deployment exists, it will be updated.
|
||||
- If I(state=absent), stack will be removed.
|
||||
default: present
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- David Justice (@devigned)
|
||||
- Laurent Mazuel (@lmazuel)
|
||||
- Andre Price (@obsoleted)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Destroy a template deployment
|
||||
- name: Destroy Azure Deploy
|
||||
azure_rm_deployment:
|
||||
resource_group: myResourceGroup
|
||||
name: myDeployment
|
||||
state: absent
|
||||
|
||||
# Create or update a template deployment based on uris using parameter and template links
|
||||
- name: Create Azure Deploy
|
||||
azure_rm_deployment:
|
||||
resource_group: myResourceGroup
|
||||
name: myDeployment
|
||||
template_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-vm-simple-linux/azuredeploy.json'
|
||||
parameters_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-vm-simple-linux/azuredeploy.parameters.json'
|
||||
|
||||
# Create or update a template deployment based on a uri to the template and parameters specified inline.
|
||||
# This deploys a VM with SSH support for a given public key, then stores the result in 'azure_vms'. The result is then
|
||||
# used to create a new host group. This host group is then used to wait for each instance to respond to the public IP SSH.
|
||||
---
|
||||
- name: Create Azure Deploy
|
||||
azure_rm_deployment:
|
||||
resource_group: myResourceGroup
|
||||
name: myDeployment
|
||||
parameters:
|
||||
newStorageAccountName:
|
||||
value: devopsclestorage1
|
||||
adminUsername:
|
||||
value: devopscle
|
||||
dnsNameForPublicIP:
|
||||
value: devopscleazure
|
||||
location:
|
||||
value: West US
|
||||
vmSize:
|
||||
value: Standard_A2
|
||||
vmName:
|
||||
value: ansibleSshVm
|
||||
sshKeyData:
|
||||
value: YOUR_SSH_PUBLIC_KEY
|
||||
template_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-vm-sshkey/azuredeploy.json'
|
||||
register: azure
|
||||
- name: Add new instance to host group
|
||||
add_host:
|
||||
hostname: "{{ item['ips'][0].public_ip }}"
|
||||
groupname: azure_vms
|
||||
loop: "{{ azure.deployment.instances }}"
|
||||
|
||||
# Deploy an Azure WebApp running a hello world'ish node app
|
||||
- name: Create Azure WebApp Deployment at http://devopscleweb.azurewebsites.net/hello.js
|
||||
azure_rm_deployment:
|
||||
resource_group: myResourceGroup
|
||||
name: myDeployment
|
||||
parameters:
|
||||
repoURL:
|
||||
value: 'https://github.com/devigned/az-roadshow-oss.git'
|
||||
siteName:
|
||||
value: devopscleweb
|
||||
hostingPlanName:
|
||||
value: someplan
|
||||
siteLocation:
|
||||
value: westus
|
||||
sku:
|
||||
value: Standard
|
||||
template_link: 'https://raw.githubusercontent.com/azure/azure-quickstart-templates/master/201-web-app-github-deploy/azuredeploy.json'
|
||||
|
||||
# Create or update a template deployment based on an inline template and parameters
|
||||
- name: Create Azure Deploy
|
||||
azure_rm_deployment:
|
||||
resource_group: myResourceGroup
|
||||
name: myDeployment
|
||||
template:
|
||||
$schema: "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#"
|
||||
contentVersion: "1.0.0.0"
|
||||
parameters:
|
||||
newStorageAccountName:
|
||||
type: "string"
|
||||
metadata:
|
||||
description: "Unique DNS Name for the Storage Account where the Virtual Machine's disks will be placed."
|
||||
adminUsername:
|
||||
type: "string"
|
||||
metadata:
|
||||
description: "User name for the Virtual Machine."
|
||||
adminPassword:
|
||||
type: "securestring"
|
||||
metadata:
|
||||
description: "Password for the Virtual Machine."
|
||||
dnsNameForPublicIP:
|
||||
type: "string"
|
||||
metadata:
|
||||
description: "Unique DNS Name for the Public IP used to access the Virtual Machine."
|
||||
ubuntuOSVersion:
|
||||
type: "string"
|
||||
defaultValue: "14.04.2-LTS"
|
||||
allowedValues:
|
||||
- "12.04.5-LTS"
|
||||
- "14.04.2-LTS"
|
||||
- "15.04"
|
||||
metadata:
|
||||
description: >
|
||||
The Ubuntu version for the VM. This will pick a fully patched image of this given Ubuntu version.
|
||||
Allowed values: 12.04.5-LTS, 14.04.2-LTS, 15.04."
|
||||
variables:
|
||||
location: "West US"
|
||||
imagePublisher: "Canonical"
|
||||
imageOffer: "UbuntuServer"
|
||||
OSDiskName: "osdiskforlinuxsimple"
|
||||
nicName: "myVMNic"
|
||||
addressPrefix: "192.0.2.0/24"
|
||||
subnetName: "Subnet"
|
||||
subnetPrefix: "10.0.0.0/24"
|
||||
storageAccountType: "Standard_LRS"
|
||||
publicIPAddressName: "myPublicIP"
|
||||
publicIPAddressType: "Dynamic"
|
||||
vmStorageAccountContainerName: "vhds"
|
||||
vmName: "MyUbuntuVM"
|
||||
vmSize: "Standard_D1"
|
||||
virtualNetworkName: "MyVNET"
|
||||
vnetID: "[resourceId('Microsoft.Network/virtualNetworks',variables('virtualNetworkName'))]"
|
||||
subnetRef: "[concat(variables('vnetID'),'/subnets/',variables('subnetName'))]"
|
||||
resources:
|
||||
- type: "Microsoft.Storage/storageAccounts"
|
||||
name: "[parameters('newStorageAccountName')]"
|
||||
apiVersion: "2015-05-01-preview"
|
||||
location: "[variables('location')]"
|
||||
properties:
|
||||
accountType: "[variables('storageAccountType')]"
|
||||
- apiVersion: "2015-05-01-preview"
|
||||
type: "Microsoft.Network/publicIPAddresses"
|
||||
name: "[variables('publicIPAddressName')]"
|
||||
location: "[variables('location')]"
|
||||
properties:
|
||||
publicIPAllocationMethod: "[variables('publicIPAddressType')]"
|
||||
dnsSettings:
|
||||
domainNameLabel: "[parameters('dnsNameForPublicIP')]"
|
||||
- type: "Microsoft.Network/virtualNetworks"
|
||||
apiVersion: "2015-05-01-preview"
|
||||
name: "[variables('virtualNetworkName')]"
|
||||
location: "[variables('location')]"
|
||||
properties:
|
||||
addressSpace:
|
||||
addressPrefixes:
|
||||
- "[variables('addressPrefix')]"
|
||||
subnets:
|
||||
-
|
||||
name: "[variables('subnetName')]"
|
||||
properties:
|
||||
addressPrefix: "[variables('subnetPrefix')]"
|
||||
- type: "Microsoft.Network/networkInterfaces"
|
||||
apiVersion: "2015-05-01-preview"
|
||||
name: "[variables('nicName')]"
|
||||
location: "[variables('location')]"
|
||||
dependsOn:
|
||||
- "[concat('Microsoft.Network/publicIPAddresses/', variables('publicIPAddressName'))]"
|
||||
- "[concat('Microsoft.Network/virtualNetworks/', variables('virtualNetworkName'))]"
|
||||
properties:
|
||||
ipConfigurations:
|
||||
-
|
||||
name: "ipconfig1"
|
||||
properties:
|
||||
privateIPAllocationMethod: "Dynamic"
|
||||
publicIPAddress:
|
||||
id: "[resourceId('Microsoft.Network/publicIPAddresses',variables('publicIPAddressName'))]"
|
||||
subnet:
|
||||
id: "[variables('subnetRef')]"
|
||||
- type: "Microsoft.Compute/virtualMachines"
|
||||
apiVersion: "2015-06-15"
|
||||
name: "[variables('vmName')]"
|
||||
location: "[variables('location')]"
|
||||
dependsOn:
|
||||
- "[concat('Microsoft.Storage/storageAccounts/', parameters('newStorageAccountName'))]"
|
||||
- "[concat('Microsoft.Network/networkInterfaces/', variables('nicName'))]"
|
||||
properties:
|
||||
hardwareProfile:
|
||||
vmSize: "[variables('vmSize')]"
|
||||
osProfile:
|
||||
computername: "[variables('vmName')]"
|
||||
adminUsername: "[parameters('adminUsername')]"
|
||||
adminPassword: "[parameters('adminPassword')]"
|
||||
storageProfile:
|
||||
imageReference:
|
||||
publisher: "[variables('imagePublisher')]"
|
||||
offer: "[variables('imageOffer')]"
|
||||
sku: "[parameters('ubuntuOSVersion')]"
|
||||
version: "latest"
|
||||
osDisk:
|
||||
name: "osdisk"
|
||||
vhd:
|
||||
uri: >
|
||||
[concat('http://',parameters('newStorageAccountName'),'.blob.core.windows.net/',variables('vmStorageAccountContainerName'),'/',
|
||||
variables('OSDiskName'),'.vhd')]
|
||||
caching: "ReadWrite"
|
||||
createOption: "FromImage"
|
||||
networkProfile:
|
||||
networkInterfaces:
|
||||
-
|
||||
id: "[resourceId('Microsoft.Network/networkInterfaces',variables('nicName'))]"
|
||||
diagnosticsProfile:
|
||||
bootDiagnostics:
|
||||
enabled: "true"
|
||||
storageUri: "[concat('http://',parameters('newStorageAccountName'),'.blob.core.windows.net')]"
|
||||
parameters:
|
||||
newStorageAccountName:
|
||||
value: devopsclestorage
|
||||
adminUsername:
|
||||
value: devopscle
|
||||
adminPassword:
|
||||
value: Password1!
|
||||
dnsNameForPublicIP:
|
||||
value: devopscleazure
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
deployment:
|
||||
description: Deployment details.
|
||||
type: complex
|
||||
returned: always
|
||||
contains:
|
||||
group_name:
|
||||
description:
|
||||
- Name of the resource group.
|
||||
type: str
|
||||
returned: always
|
||||
sample: myResourceGroup
|
||||
id:
|
||||
description:
|
||||
- The Azure ID of the deployment.
|
||||
type: str
|
||||
returned: always
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Resources/deployments/myDeployment"
|
||||
instances:
|
||||
description:
|
||||
- Provides the public IP addresses for each VM instance.
|
||||
type: list
|
||||
returned: always
|
||||
contains:
|
||||
ips:
|
||||
description:
|
||||
- List of Public IP addresses.
|
||||
type: list
|
||||
returned: always
|
||||
contains:
|
||||
dns_settings:
|
||||
description:
|
||||
- DNS Settings.
|
||||
type: complex
|
||||
returned: always
|
||||
contains:
|
||||
domain_name_label:
|
||||
description:
|
||||
- Domain Name Label.
|
||||
type: str
|
||||
returned: always
|
||||
sample: myvirtualmachine
|
||||
fqdn:
|
||||
description:
|
||||
- Fully Qualified Domain Name.
|
||||
type: str
|
||||
returned: always
|
||||
sample: myvirtualmachine.eastus2.cloudapp.azure.com
|
||||
id:
|
||||
description:
|
||||
- Public IP resource id.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/p
|
||||
ublicIPAddresses/myPublicIP"
|
||||
name:
|
||||
description:
|
||||
- Public IP resource name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myPublicIP
|
||||
public_ip:
|
||||
description:
|
||||
- Public IP address value.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 104.209.244.123
|
||||
public_ip_allocation_method:
|
||||
description:
|
||||
- Public IP allocation method.
|
||||
returned: always
|
||||
type: str
|
||||
sample: Dynamic
|
||||
vm_name:
|
||||
description:
|
||||
- Virtual machine name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myvirtualmachine
|
||||
name:
|
||||
description:
|
||||
- Name of the deployment.
|
||||
type: str
|
||||
returned: always
|
||||
sample: myDeployment
|
||||
outputs:
|
||||
description:
|
||||
- Dictionary of outputs received from the deployment.
|
||||
type: complex
|
||||
returned: always
|
||||
sample: { "hostname": { "type": "String", "value": "myvirtualmachine.eastus2.cloudapp.azure.com" } }
|
||||
'''
|
||||
|
||||
import time
|
||||
|
||||
try:
|
||||
from azure.common.credentials import ServicePrincipalCredentials
|
||||
import time
|
||||
import yaml
|
||||
except ImportError as exc:
|
||||
IMPORT_ERROR = "Error importing module prerequisites: %s" % exc
|
||||
|
||||
try:
|
||||
from itertools import chain
|
||||
from azure.common.exceptions import CloudError
|
||||
from azure.mgmt.resource.resources import ResourceManagementClient
|
||||
from azure.mgmt.network import NetworkManagementClient
|
||||
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
|
||||
class AzureRMDeploymentManager(AzureRMModuleBase):
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(type='str', required=True, aliases=['resource_group_name']),
|
||||
name=dict(type='str', default="ansible-arm", aliases=['deployment_name']),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent']),
|
||||
template=dict(type='dict', default=None),
|
||||
parameters=dict(type='dict', default=None),
|
||||
template_link=dict(type='str', default=None),
|
||||
parameters_link=dict(type='str', default=None),
|
||||
location=dict(type='str', default="westus"),
|
||||
deployment_mode=dict(type='str', default='incremental', choices=['complete', 'incremental']),
|
||||
wait_for_deployment_completion=dict(type='bool', default=True),
|
||||
wait_for_deployment_polling_period=dict(type='int', default=10)
|
||||
)
|
||||
|
||||
mutually_exclusive = [('template', 'template_link'),
|
||||
('parameters', 'parameters_link')]
|
||||
|
||||
self.resource_group = None
|
||||
self.state = None
|
||||
self.template = None
|
||||
self.parameters = None
|
||||
self.template_link = None
|
||||
self.parameters_link = None
|
||||
self.location = None
|
||||
self.deployment_mode = None
|
||||
self.name = None
|
||||
self.wait_for_deployment_completion = None
|
||||
self.wait_for_deployment_polling_period = None
|
||||
self.tags = None
|
||||
self.append_tags = None
|
||||
|
||||
self.results = dict(
|
||||
deployment=dict(),
|
||||
changed=False,
|
||||
msg=""
|
||||
)
|
||||
|
||||
super(AzureRMDeploymentManager, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
mutually_exclusive=mutually_exclusive,
|
||||
supports_check_mode=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
for key in list(self.module_arg_spec.keys()) + ['append_tags', 'tags']:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
if self.state == 'present':
|
||||
deployment = self.deploy_template()
|
||||
if deployment is None:
|
||||
self.results['deployment'] = dict(
|
||||
name=self.name,
|
||||
group_name=self.resource_group,
|
||||
id=None,
|
||||
outputs=None,
|
||||
instances=None
|
||||
)
|
||||
else:
|
||||
self.results['deployment'] = dict(
|
||||
name=deployment.name,
|
||||
group_name=self.resource_group,
|
||||
id=deployment.id,
|
||||
outputs=deployment.properties.outputs,
|
||||
instances=self._get_instances(deployment)
|
||||
)
|
||||
|
||||
self.results['changed'] = True
|
||||
self.results['msg'] = 'deployment succeeded'
|
||||
else:
|
||||
try:
|
||||
if self.get_resource_group(self.resource_group):
|
||||
self.destroy_resource_group()
|
||||
self.results['changed'] = True
|
||||
self.results['msg'] = "deployment deleted"
|
||||
except CloudError:
|
||||
# resource group does not exist
|
||||
pass
|
||||
|
||||
return self.results
|
||||
|
||||
def deploy_template(self):
|
||||
"""
|
||||
Deploy the targeted template and parameters
|
||||
:param module: Ansible module containing the validated configuration for the deployment template
|
||||
:param client: resource management client for azure
|
||||
:param conn_info: connection info needed
|
||||
:return:
|
||||
"""
|
||||
|
||||
deploy_parameter = self.rm_models.DeploymentProperties(mode=self.deployment_mode)
|
||||
if not self.parameters_link:
|
||||
deploy_parameter.parameters = self.parameters
|
||||
else:
|
||||
deploy_parameter.parameters_link = self.rm_models.ParametersLink(
|
||||
uri=self.parameters_link
|
||||
)
|
||||
if not self.template_link:
|
||||
deploy_parameter.template = self.template
|
||||
else:
|
||||
deploy_parameter.template_link = self.rm_models.TemplateLink(
|
||||
uri=self.template_link
|
||||
)
|
||||
|
||||
if self.append_tags and self.tags:
|
||||
try:
|
||||
# fetch the RG directly (instead of using the base helper) since we don't want to exit if it's missing
|
||||
rg = self.rm_client.resource_groups.get(self.resource_group)
|
||||
if rg.tags:
|
||||
self.tags = dict(self.tags, **rg.tags)
|
||||
except CloudError:
|
||||
# resource group does not exist
|
||||
pass
|
||||
|
||||
params = self.rm_models.ResourceGroup(location=self.location, tags=self.tags)
|
||||
|
||||
try:
|
||||
self.rm_client.resource_groups.create_or_update(self.resource_group, params)
|
||||
except CloudError as exc:
|
||||
self.fail("Resource group create_or_update failed with status code: %s and message: %s" %
|
||||
(exc.status_code, exc.message))
|
||||
try:
|
||||
result = self.rm_client.deployments.create_or_update(self.resource_group,
|
||||
self.name,
|
||||
deploy_parameter)
|
||||
|
||||
deployment_result = None
|
||||
if self.wait_for_deployment_completion:
|
||||
deployment_result = self.get_poller_result(result)
|
||||
while deployment_result.properties is None or deployment_result.properties.provisioning_state not in ['Canceled', 'Failed', 'Deleted',
|
||||
'Succeeded']:
|
||||
time.sleep(self.wait_for_deployment_polling_period)
|
||||
deployment_result = self.rm_client.deployments.get(self.resource_group, self.name)
|
||||
except CloudError as exc:
|
||||
failed_deployment_operations = self._get_failed_deployment_operations(self.name)
|
||||
self.log("Deployment failed %s: %s" % (exc.status_code, exc.message))
|
||||
self.fail("Deployment failed with status code: %s and message: %s" % (exc.status_code, exc.message),
|
||||
failed_deployment_operations=failed_deployment_operations)
|
||||
|
||||
if self.wait_for_deployment_completion and deployment_result.properties.provisioning_state != 'Succeeded':
|
||||
self.log("provisioning state: %s" % deployment_result.properties.provisioning_state)
|
||||
failed_deployment_operations = self._get_failed_deployment_operations(self.name)
|
||||
self.fail('Deployment failed. Deployment id: %s' % deployment_result.id,
|
||||
failed_deployment_operations=failed_deployment_operations)
|
||||
|
||||
return deployment_result
|
||||
|
||||
def destroy_resource_group(self):
|
||||
"""
|
||||
Destroy the targeted resource group
|
||||
"""
|
||||
try:
|
||||
result = self.rm_client.resource_groups.delete(self.resource_group)
|
||||
result.wait() # Blocking wait till the delete is finished
|
||||
except CloudError as e:
|
||||
if e.status_code == 404 or e.status_code == 204:
|
||||
return
|
||||
else:
|
||||
self.fail("Delete resource group and deploy failed with status code: %s and message: %s" %
|
||||
(e.status_code, e.message))
|
||||
|
||||
def _get_failed_nested_operations(self, current_operations):
|
||||
new_operations = []
|
||||
for operation in current_operations:
|
||||
if operation.properties.provisioning_state == 'Failed':
|
||||
new_operations.append(operation)
|
||||
if operation.properties.target_resource and \
|
||||
'Microsoft.Resources/deployments' in operation.properties.target_resource.id:
|
||||
nested_deployment = operation.properties.target_resource.resource_name
|
||||
try:
|
||||
nested_operations = self.rm_client.deployment_operations.list(self.resource_group,
|
||||
nested_deployment)
|
||||
except CloudError as exc:
|
||||
self.fail("List nested deployment operations failed with status code: %s and message: %s" %
|
||||
(exc.status_code, exc.message))
|
||||
new_nested_operations = self._get_failed_nested_operations(nested_operations)
|
||||
new_operations += new_nested_operations
|
||||
return new_operations
|
||||
|
||||
def _get_failed_deployment_operations(self, name):
|
||||
results = []
|
||||
# time.sleep(15) # there is a race condition between when we ask for deployment status and when the
|
||||
# # status is available.
|
||||
|
||||
try:
|
||||
operations = self.rm_client.deployment_operations.list(self.resource_group, name)
|
||||
except CloudError as exc:
|
||||
self.fail("Get deployment failed with status code: %s and message: %s" %
|
||||
(exc.status_code, exc.message))
|
||||
try:
|
||||
results = [
|
||||
dict(
|
||||
id=op.id,
|
||||
operation_id=op.operation_id,
|
||||
status_code=op.properties.status_code,
|
||||
status_message=op.properties.status_message,
|
||||
target_resource=dict(
|
||||
id=op.properties.target_resource.id,
|
||||
resource_name=op.properties.target_resource.resource_name,
|
||||
resource_type=op.properties.target_resource.resource_type
|
||||
) if op.properties.target_resource else None,
|
||||
provisioning_state=op.properties.provisioning_state,
|
||||
)
|
||||
for op in self._get_failed_nested_operations(operations)
|
||||
]
|
||||
except Exception:
|
||||
# If we fail here, the original error gets lost and user receives wrong error message/stacktrace
|
||||
pass
|
||||
self.log(dict(failed_deployment_operations=results), pretty_print=True)
|
||||
return results
|
||||
|
||||
def _get_instances(self, deployment):
|
||||
dep_tree = self._build_hierarchy(deployment.properties.dependencies)
|
||||
vms = self._get_dependencies(dep_tree, resource_type="Microsoft.Compute/virtualMachines")
|
||||
vms_and_nics = [(vm, self._get_dependencies(vm['children'], "Microsoft.Network/networkInterfaces"))
|
||||
for vm in vms]
|
||||
vms_and_ips = [(vm['dep'], self._nic_to_public_ips_instance(nics))
|
||||
for vm, nics in vms_and_nics]
|
||||
return [dict(vm_name=vm.resource_name, ips=[self._get_ip_dict(ip)
|
||||
for ip in ips]) for vm, ips in vms_and_ips if len(ips) > 0]
|
||||
|
||||
def _get_dependencies(self, dep_tree, resource_type):
|
||||
matches = [value for value in dep_tree.values() if value['dep'].resource_type == resource_type]
|
||||
for child_tree in [value['children'] for value in dep_tree.values()]:
|
||||
matches += self._get_dependencies(child_tree, resource_type)
|
||||
return matches
|
||||
|
||||
def _build_hierarchy(self, dependencies, tree=None):
|
||||
tree = dict(top=True) if tree is None else tree
|
||||
for dep in dependencies:
|
||||
if dep.resource_name not in tree:
|
||||
tree[dep.resource_name] = dict(dep=dep, children=dict())
|
||||
if isinstance(dep, self.rm_models.Dependency) and dep.depends_on is not None and len(dep.depends_on) > 0:
|
||||
self._build_hierarchy(dep.depends_on, tree[dep.resource_name]['children'])
|
||||
|
||||
if 'top' in tree:
|
||||
tree.pop('top', None)
|
||||
keys = list(tree.keys())
|
||||
for key1 in keys:
|
||||
for key2 in keys:
|
||||
if key2 in tree and key1 in tree[key2]['children'] and key1 in tree:
|
||||
tree[key2]['children'][key1] = tree[key1]
|
||||
tree.pop(key1)
|
||||
return tree
|
||||
|
||||
def _get_ip_dict(self, ip):
|
||||
ip_dict = dict(name=ip.name,
|
||||
id=ip.id,
|
||||
public_ip=ip.ip_address,
|
||||
public_ip_allocation_method=str(ip.public_ip_allocation_method)
|
||||
)
|
||||
if ip.dns_settings:
|
||||
ip_dict['dns_settings'] = {
|
||||
'domain_name_label': ip.dns_settings.domain_name_label,
|
||||
'fqdn': ip.dns_settings.fqdn
|
||||
}
|
||||
return ip_dict
|
||||
|
||||
def _nic_to_public_ips_instance(self, nics):
|
||||
return [self.network_client.public_ip_addresses.get(public_ip_id.split('/')[4], public_ip_id.split('/')[-1])
|
||||
for nic_obj in (self.network_client.network_interfaces.get(self.resource_group,
|
||||
nic['dep'].resource_name) for nic in nics)
|
||||
for public_ip_id in [ip_conf_instance.public_ip_address.id
|
||||
for ip_conf_instance in nic_obj.ip_configurations
|
||||
if ip_conf_instance.public_ip_address]]
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMDeploymentManager()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,249 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_deployment_info
|
||||
version_added: "2.9"
|
||||
short_description: Get Azure Deployment facts
|
||||
description:
|
||||
- Get facts of Azure Deployment.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
required: True
|
||||
name:
|
||||
description:
|
||||
- The name of the deployment.
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get instance of Deployment
|
||||
azure_rm_deployment_info:
|
||||
resource_group: myResourceGroup
|
||||
name: myDeployment
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
deployments:
|
||||
description:
|
||||
- A list of dictionaries containing facts for deployments.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- The identifier of the resource.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Resources/deployments/myDeployment"
|
||||
resource_group:
|
||||
description:
|
||||
- Resource group name.
|
||||
returned: always
|
||||
sample: myResourceGroup
|
||||
name:
|
||||
description:
|
||||
- Deployment name.
|
||||
returned: always
|
||||
sample: myDeployment
|
||||
provisioning_state:
|
||||
description:
|
||||
- Provisioning state of the deployment.
|
||||
returned: always
|
||||
sample: Succeeded
|
||||
template_link:
|
||||
description:
|
||||
- Link to the template.
|
||||
returned: always
|
||||
sample: "https://raw.githubusercontent.com/Azure/azure-quickstart-templates/d01a5c06f4f1bc03a049ca17bbbd6e06d62657b3/101-vm-simple-linux/
|
||||
azuredeploy.json"
|
||||
parameters:
|
||||
description:
|
||||
- Dictionary containing deployment parameters.
|
||||
returned: always
|
||||
type: complex
|
||||
outputs:
|
||||
description:
|
||||
- Dictionary containing deployment outputs.
|
||||
returned: always
|
||||
output_resources:
|
||||
description:
|
||||
- List of resources.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- Resource id.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/networkI
|
||||
nterfaces/myNetworkInterface"
|
||||
name:
|
||||
description:
|
||||
- Resource name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myNetworkInterface
|
||||
type:
|
||||
description:
|
||||
- Resource type.
|
||||
returned: always
|
||||
type: str
|
||||
sample: Microsoft.Network/networkInterfaces
|
||||
depends_on:
|
||||
description:
|
||||
- List of resource ids.
|
||||
type: list
|
||||
returned: always
|
||||
sample:
|
||||
- "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGropup/providers/Microsoft.Network/virtualNet
|
||||
works/myVirtualNetwork"
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.mgmt.devtestlabs import DevTestLabsClient
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMDeploymentInfo(AzureRMModuleBase):
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str'
|
||||
)
|
||||
)
|
||||
self.results = dict(
|
||||
changed=False
|
||||
)
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
|
||||
super(AzureRMDeploymentInfo, self).__init__(self.module_arg_spec, supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
is_old_facts = self.module._name == 'azure_rm_deployment_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_deployment_facts' module has been renamed to 'azure_rm_deployment_info'", version='2.13')
|
||||
|
||||
for key in self.module_arg_spec:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
if self.name:
|
||||
self.results['deployments'] = self.get()
|
||||
else:
|
||||
self.results['deployments'] = self.list()
|
||||
|
||||
return self.results
|
||||
|
||||
def get(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.rm_client.deployments.get(self.resource_group, deployment_name=self.name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get facts for Deployment.')
|
||||
|
||||
if response:
|
||||
results.append(self.format_response(response))
|
||||
|
||||
return results
|
||||
|
||||
def list(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.rm_client.deployments.list(self.resource_group)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get facts for Deployment.')
|
||||
|
||||
if response is not None:
|
||||
for item in response:
|
||||
results.append(self.format_response(item))
|
||||
|
||||
return results
|
||||
|
||||
def format_response(self, item):
|
||||
d = item.as_dict()
|
||||
output_resources = {}
|
||||
for dependency in d.get('properties', {}).get('dependencies'):
|
||||
# go through dependent resources
|
||||
depends_on = []
|
||||
for depends_on_resource in dependency['depends_on']:
|
||||
depends_on.append(depends_on_resource['id'])
|
||||
# append if not in list
|
||||
if not output_resources.get(depends_on_resource['id']):
|
||||
sub_resource = {
|
||||
'id': depends_on_resource['id'],
|
||||
'name': depends_on_resource['resource_name'],
|
||||
'type': depends_on_resource['resource_type'],
|
||||
'depends_on': []
|
||||
}
|
||||
output_resources[depends_on_resource['id']] = sub_resource
|
||||
resource = {
|
||||
'id': dependency['id'],
|
||||
'name': dependency['resource_name'],
|
||||
'type': dependency['resource_type'],
|
||||
'depends_on': depends_on
|
||||
}
|
||||
output_resources[dependency['id']] = resource
|
||||
|
||||
# convert dictionary to list
|
||||
output_resources_list = []
|
||||
for r in output_resources:
|
||||
output_resources_list.append(output_resources[r])
|
||||
|
||||
d = {
|
||||
'id': d.get('id'),
|
||||
'resource_group': self.resource_group,
|
||||
'name': d.get('name'),
|
||||
'provisioning_state': d.get('properties', {}).get('provisioning_state'),
|
||||
'parameters': d.get('properties', {}).get('parameters'),
|
||||
'outputs': d.get('properties', {}).get('outputs'),
|
||||
'output_resources': output_resources_list,
|
||||
'template_link': d.get('properties', {}).get('template_link').get('uri')
|
||||
}
|
||||
return d
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMDeploymentInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,284 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Zim Kalinowski, <zikalino@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_devtestlab
|
||||
version_added: "2.8"
|
||||
short_description: Manage Azure DevTest Lab instance
|
||||
description:
|
||||
- Create, update and delete instance of Azure DevTest Lab.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
required: True
|
||||
name:
|
||||
description:
|
||||
- The name of the lab.
|
||||
required: True
|
||||
location:
|
||||
description:
|
||||
- The location of the resource.
|
||||
storage_type:
|
||||
description:
|
||||
- Type of storage used by the lab. It can be either C(premium) or C(standard).
|
||||
choices:
|
||||
- 'standard'
|
||||
- 'premium'
|
||||
premium_data_disks:
|
||||
description:
|
||||
- Allow creation of premium data disks.
|
||||
type: bool
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the DevTest Lab.
|
||||
- Use C(present) to create or update an DevTest Lab and C(absent) to delete it.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create (or update) DevTest Lab
|
||||
azure_rm_devtestlab:
|
||||
resource_group: myResourceGroup
|
||||
name: mylab
|
||||
storage_type: standard
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- The identifier of the resource.
|
||||
returned: always
|
||||
type: str
|
||||
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/mylab
|
||||
'''
|
||||
|
||||
import time
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
from ansible.module_utils.common.dict_transformations import _snake_to_camel
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrest.polling import LROPoller
|
||||
from msrestazure.azure_operation import AzureOperationPoller
|
||||
from azure.mgmt.devtestlabs import DevTestLabsClient
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class Actions:
|
||||
NoAction, Create, Update, Delete = range(4)
|
||||
|
||||
|
||||
class AzureRMDevTestLab(AzureRMModuleBase):
|
||||
"""Configuration class for an Azure RM DevTest Lab resource"""
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
location=dict(
|
||||
type='str'
|
||||
),
|
||||
storage_type=dict(
|
||||
type='str',
|
||||
choices=['standard',
|
||||
'premium']
|
||||
),
|
||||
premium_data_disks=dict(
|
||||
type='bool'
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
)
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.lab = {}
|
||||
|
||||
self.results = dict(changed=False)
|
||||
self.mgmt_client = None
|
||||
self.state = None
|
||||
self.to_do = Actions.NoAction
|
||||
|
||||
super(AzureRMDevTestLab, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=True)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
"""Main module execution method"""
|
||||
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
if hasattr(self, key):
|
||||
setattr(self, key, kwargs[key])
|
||||
elif kwargs[key] is not None:
|
||||
self.lab[key] = kwargs[key]
|
||||
|
||||
if self.lab.get('storage_type'):
|
||||
self.lab['lab_storage_type'] = _snake_to_camel(self.lab['storage_type'], True)
|
||||
self.lab.pop('storage_type', None)
|
||||
if self.lab.get('premium_data_disks') is not None:
|
||||
self.lab['premium_data_disks'] = 'Enabled' if self.lab['premium_data_disks'] else 'Disabled'
|
||||
|
||||
response = None
|
||||
|
||||
self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager,
|
||||
api_version='2018-10-15')
|
||||
|
||||
resource_group = self.get_resource_group(self.resource_group)
|
||||
if self.lab.get('location') is None:
|
||||
self.lab['location'] = resource_group.location
|
||||
|
||||
old_response = self.get_devtestlab()
|
||||
|
||||
if not old_response:
|
||||
self.log("DevTest Lab instance doesn't exist")
|
||||
if self.state == 'absent':
|
||||
self.log("Old instance didn't exist")
|
||||
else:
|
||||
self.to_do = Actions.Create
|
||||
else:
|
||||
self.log("DevTest Lab instance already exists")
|
||||
if self.state == 'absent':
|
||||
self.to_do = Actions.Delete
|
||||
elif self.state == 'present':
|
||||
if self.lab.get('lab_storage_type') is not None and \
|
||||
self.lab.get('lab_storage_type').lower() != old_response.get('lab_storage_type', '').lower():
|
||||
self.to_do = Actions.Update
|
||||
if (self.lab.get('premium_data_disks') is not None and
|
||||
self.lab.get('premium_data_disks').lower() != old_response.get('premium_data_disks').lower()):
|
||||
self.to_do = Actions.Update
|
||||
|
||||
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
|
||||
self.log("Need to Create / Update the DevTest Lab instance")
|
||||
self.results['changed'] = True
|
||||
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
response = self.create_update_devtestlab()
|
||||
self.log("Creation / Update done")
|
||||
elif self.to_do == Actions.Delete:
|
||||
self.log("DevTest Lab instance deleted")
|
||||
self.results['changed'] = True
|
||||
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
self.delete_devtestlab()
|
||||
# This currently doesnt' work as there is a bug in SDK / Service
|
||||
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
|
||||
response = self.get_poller_result(response)
|
||||
else:
|
||||
self.log("DevTest Lab instance unchanged")
|
||||
self.results['changed'] = False
|
||||
response = old_response
|
||||
|
||||
if self.state == 'present':
|
||||
self.results.update({
|
||||
'id': response.get('id', None)
|
||||
})
|
||||
return self.results
|
||||
|
||||
def create_update_devtestlab(self):
|
||||
'''
|
||||
Creates or updates DevTest Lab with the specified configuration.
|
||||
|
||||
:return: deserialized DevTest Lab instance state dictionary
|
||||
'''
|
||||
self.log("Creating / Updating the DevTest Lab instance {0}".format(self.name))
|
||||
|
||||
try:
|
||||
response = self.mgmt_client.labs.create_or_update(resource_group_name=self.resource_group,
|
||||
name=self.name,
|
||||
lab=self.lab)
|
||||
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
|
||||
response = self.get_poller_result(response)
|
||||
|
||||
except CloudError as exc:
|
||||
self.log('Error attempting to create the DevTest Lab instance.')
|
||||
self.fail("Error creating the DevTest Lab instance: {0}".format(str(exc)))
|
||||
return response.as_dict()
|
||||
|
||||
def delete_devtestlab(self):
|
||||
'''
|
||||
Deletes specified DevTest Lab instance in the specified subscription and resource group.
|
||||
|
||||
:return: True
|
||||
'''
|
||||
self.log("Deleting the DevTest Lab instance {0}".format(self.name))
|
||||
try:
|
||||
response = self.mgmt_client.labs.delete(resource_group_name=self.resource_group,
|
||||
name=self.name)
|
||||
except CloudError as e:
|
||||
self.log('Error attempting to delete the DevTest Lab instance.')
|
||||
self.fail("Error deleting the DevTest Lab instance: {0}".format(str(e)))
|
||||
|
||||
return True
|
||||
|
||||
def get_devtestlab(self):
|
||||
'''
|
||||
Gets the properties of the specified DevTest Lab.
|
||||
|
||||
:return: deserialized DevTest Lab instance state dictionary
|
||||
'''
|
||||
self.log("Checking if the DevTest Lab instance {0} is present".format(self.name))
|
||||
found = False
|
||||
try:
|
||||
response = self.mgmt_client.labs.get(resource_group_name=self.resource_group,
|
||||
name=self.name)
|
||||
found = True
|
||||
self.log("Response : {0}".format(response))
|
||||
self.log("DevTest Lab instance : {0} found".format(response.name))
|
||||
except CloudError as e:
|
||||
self.log('Did not find the DevTest Lab instance.')
|
||||
if found is True:
|
||||
return response.as_dict()
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution"""
|
||||
AzureRMDevTestLab()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,272 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_devtestlab_info
|
||||
version_added: "2.9"
|
||||
short_description: Get Azure DevTest Lab facts
|
||||
description:
|
||||
- Get facts of Azure DevTest Lab.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- The name of the lab.
|
||||
type: str
|
||||
tags:
|
||||
description:
|
||||
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
|
||||
type: list
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: List instances of DevTest Lab by resource group
|
||||
azure_rm_devtestlab_info:
|
||||
resource_group: testrg
|
||||
|
||||
- name: List instances of DevTest Lab in subscription
|
||||
azure_rm_devtestlab_info:
|
||||
|
||||
- name: Get instance of DevTest Lab
|
||||
azure_rm_devtestlab_info:
|
||||
resource_group: testrg
|
||||
name: testlab
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
labs:
|
||||
description:
|
||||
- A list of dictionaries containing facts for Lab.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- The identifier of the resource.
|
||||
returned: always
|
||||
type: str
|
||||
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/myLab
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource.
|
||||
returned: always
|
||||
type: str
|
||||
sample: testrg
|
||||
name:
|
||||
description:
|
||||
- The name of the resource.
|
||||
returned: always
|
||||
type: str
|
||||
sample: testlab
|
||||
location:
|
||||
description:
|
||||
- The location of the resource.
|
||||
returned: always
|
||||
type: str
|
||||
sample: eastus
|
||||
storage_type:
|
||||
description:
|
||||
- Lab storage type.
|
||||
returned: always
|
||||
type: str
|
||||
sample: standard
|
||||
premium_data_disks:
|
||||
description:
|
||||
- Are premium data disks allowed.
|
||||
returned: always
|
||||
type: bool
|
||||
sample: false
|
||||
provisioning_state:
|
||||
description:
|
||||
- Lab provisioning state.
|
||||
returned: always
|
||||
type: str
|
||||
sample: Succeeded
|
||||
artifacts_storage_account:
|
||||
description:
|
||||
- Artifacts storage account ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Storage/storageAccounts/myLab6346
|
||||
default_premium_storage_account:
|
||||
description:
|
||||
- Default premium storage account ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Storage/storageAccounts/myLab6346
|
||||
default_storage_account:
|
||||
description:
|
||||
- Default storage account ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Storage/storageAccounts/myLab6346
|
||||
premium_data_disk_storage_account:
|
||||
description:
|
||||
- Default storage account ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Storage/storageAccounts/myLab6346
|
||||
vault_name:
|
||||
description:
|
||||
- Key vault ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.KeyVault/vaults/myLab6788
|
||||
tags:
|
||||
description:
|
||||
- The tags of the resource.
|
||||
returned: always
|
||||
type: complex
|
||||
sample: "{ 'MyTag': 'MyValue' }"
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.mgmt.devtestlabs import DevTestLabsClient
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMDevTestLabInfo(AzureRMModuleBase):
|
||||
def __init__(self):
|
||||
# define user inputs into argument
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str'
|
||||
),
|
||||
name=dict(
|
||||
type='str'
|
||||
),
|
||||
tags=dict(
|
||||
type='list'
|
||||
)
|
||||
)
|
||||
# store the results of the module operation
|
||||
self.results = dict(
|
||||
changed=False
|
||||
)
|
||||
self.mgmt_client = None
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.tags = None
|
||||
super(AzureRMDevTestLabInfo, self).__init__(self.module_arg_spec, supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
is_old_facts = self.module._name == 'azure_rm_devtestlab_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_devtestlab_facts' module has been renamed to 'azure_rm_devtestlab_info'", version='2.13')
|
||||
|
||||
for key in self.module_arg_spec:
|
||||
setattr(self, key, kwargs[key])
|
||||
self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
|
||||
if self.resource_group is not None:
|
||||
if self.name is not None:
|
||||
self.results['labs'] = self.get()
|
||||
else:
|
||||
self.results['labs'] = self.list_by_resource_group()
|
||||
else:
|
||||
self.results['labs'] = self.list_by_subscription()
|
||||
return self.results
|
||||
|
||||
def list_by_resource_group(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mgmt_client.labs.list_by_resource_group(resource_group_name=self.resource_group)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get facts for Lab.')
|
||||
|
||||
if response is not None:
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(self.format_response(item))
|
||||
|
||||
return results
|
||||
|
||||
def list_by_subscription(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mgmt_client.labs.list_by_subscription()
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get facts for Lab.')
|
||||
|
||||
if response is not None:
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(self.format_response(item))
|
||||
|
||||
return results
|
||||
|
||||
def get(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mgmt_client.labs.get(resource_group_name=self.resource_group,
|
||||
name=self.name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get facts for Lab.')
|
||||
|
||||
if response and self.has_tags(response.tags, self.tags):
|
||||
results.append(self.format_response(response))
|
||||
|
||||
return results
|
||||
|
||||
def format_response(self, item):
|
||||
d = item.as_dict()
|
||||
d = {
|
||||
'id': d.get('id', None),
|
||||
'resource_group': self.resource_group,
|
||||
'name': d.get('name', None),
|
||||
'location': d.get('location', '').replace(' ', '').lower(),
|
||||
'storage_type': d.get('lab_storage_type', '').lower(),
|
||||
'premium_data_disks': d.get('premium_data_disks') == 'Enabled',
|
||||
'provisioning_state': d.get('provisioning_state'),
|
||||
'artifacts_storage_account': d.get('artifacts_storage_account'),
|
||||
'default_premium_storage_account': d.get('default_premium_storage_account'),
|
||||
'default_storage_account': d.get('default_storage_account'),
|
||||
'premium_data_disk_storage_account': d.get('premium_data_disk_storage_account'),
|
||||
'vault_name': d.get('vault_name'),
|
||||
'tags': d.get('tags', None)
|
||||
}
|
||||
return d
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMDevTestLabInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,226 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_devtestlabarmtemplate_info
|
||||
version_added: "2.9"
|
||||
short_description: Get Azure DevTest Lab ARM Template facts
|
||||
description:
|
||||
- Get facts of Azure DevTest Lab ARM Template.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
required: True
|
||||
type: str
|
||||
lab_name:
|
||||
description:
|
||||
- The name of the lab.
|
||||
required: True
|
||||
type: str
|
||||
artifact_source_name:
|
||||
description:
|
||||
- The name of the artifact source.
|
||||
required: True
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- The name of the ARM template.
|
||||
type: str
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get information on DevTest Lab ARM Template
|
||||
azure_rm_devtestlabarmtemplate_info:
|
||||
resource_group: myResourceGroup
|
||||
lab_name: myLab
|
||||
artifact_source_name: public environment repo
|
||||
name: WebApp
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
arm_templates:
|
||||
description:
|
||||
- A list of dictionaries containing facts for DevTest Lab ARM Template.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- The identifier of the resource.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DevTestLab/labs/myLab/art
|
||||
ifactSources/public environment repo/armTemplates/WebApp"
|
||||
resource_group:
|
||||
description:
|
||||
- Resource group name.
|
||||
returned: always
|
||||
sample: myResourceGroup
|
||||
lab_name:
|
||||
description:
|
||||
- DevTest Lab name.
|
||||
returned: always
|
||||
sample: myLab
|
||||
artifact_source_name:
|
||||
description:
|
||||
- Artifact source name.
|
||||
returned: always
|
||||
sample: public environment repo
|
||||
name:
|
||||
description:
|
||||
- ARM Template name.
|
||||
returned: always
|
||||
sample: WebApp
|
||||
display_name:
|
||||
description:
|
||||
- The tags of the resource.
|
||||
returned: always
|
||||
sample: Web App
|
||||
description:
|
||||
description:
|
||||
- The tags of the resource.
|
||||
returned: always
|
||||
sample: This template creates an Azure Web App without a data store.
|
||||
publisher:
|
||||
description:
|
||||
- The tags of the resource.
|
||||
returned: always
|
||||
sample: Microsoft
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.mgmt.devtestlabs import DevTestLabsClient
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMDtlArmTemplateInfo(AzureRMModuleBase):
|
||||
def __init__(self):
|
||||
# define user inputs into argument
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
lab_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
artifact_source_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str'
|
||||
)
|
||||
)
|
||||
# store the results of the module operation
|
||||
self.results = dict(
|
||||
changed=False
|
||||
)
|
||||
self.mgmt_client = None
|
||||
self.resource_group = None
|
||||
self.lab_name = None
|
||||
self.artifact_source_name = None
|
||||
self.name = None
|
||||
super(AzureRMDtlArmTemplateInfo, self).__init__(self.module_arg_spec, supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
is_old_facts = self.module._name == 'azure_rm_devtestlabarmtemplate_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_devtestlabarmtemplate_facts' module has been renamed to 'azure_rm_devtestlabarmtemplate_info'", version='2.13')
|
||||
|
||||
for key in self.module_arg_spec:
|
||||
setattr(self, key, kwargs[key])
|
||||
self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
|
||||
if self.name:
|
||||
self.results['armtemplates'] = self.get()
|
||||
else:
|
||||
self.results['armtemplates'] = self.list()
|
||||
|
||||
return self.results
|
||||
|
||||
def list(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mgmt_client.arm_templates.list(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
artifact_source_name=self.artifact_source_name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.fail('Could not get facts for DTL ARM Template.')
|
||||
|
||||
if response is not None:
|
||||
for item in response:
|
||||
results.append(self.format_response(item))
|
||||
|
||||
return results
|
||||
|
||||
def get(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mgmt_client.arm_templates.get(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
artifact_source_name=self.artifact_source_name,
|
||||
name=self.name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.fail('Could not get facts for DTL ARM Template.')
|
||||
|
||||
if response:
|
||||
results.append(self.format_response(response))
|
||||
|
||||
return results
|
||||
|
||||
def format_response(self, item):
|
||||
d = item.as_dict()
|
||||
d = {
|
||||
'resource_group': self.parse_resource_to_dict(d.get('id')).get('resource_group'),
|
||||
'lab_name': self.parse_resource_to_dict(d.get('id')).get('name'),
|
||||
'artifact_source_name': self.parse_resource_to_dict(d.get('id')).get('child_name_1'),
|
||||
'id': d.get('id', None),
|
||||
'name': d.get('name'),
|
||||
'display_name': d.get('display_name'),
|
||||
'description': d.get('description'),
|
||||
'publisher': d.get('publisher')
|
||||
}
|
||||
return d
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMDtlArmTemplateInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,250 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_devtestlabartifact_info
|
||||
version_added: "2.9"
|
||||
short_description: Get Azure DevTest Lab Artifact facts
|
||||
description:
|
||||
- Get facts of Azure DevTest Lab Artifact.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
required: True
|
||||
type: str
|
||||
lab_name:
|
||||
description:
|
||||
- The name of the lab.
|
||||
required: True
|
||||
type: str
|
||||
artifact_source_name:
|
||||
description:
|
||||
- The name of the artifact source.
|
||||
required: True
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- The name of the artifact.
|
||||
type: str
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get instance of DevTest Lab Artifact
|
||||
azure_rm_devtestlabartifact_info:
|
||||
resource_group: myResourceGroup
|
||||
lab_name: myLab
|
||||
artifact_source_name: myArtifactSource
|
||||
name: myArtifact
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
artifacts:
|
||||
description:
|
||||
- A list of dictionaries containing facts for DevTest Lab Artifact.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- The identifier of the artifact.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DevTestLab/labs/myLab/ar
|
||||
tifactSources/myArtifactSource/artifacts/myArtifact"
|
||||
resource_group:
|
||||
description:
|
||||
- Name of the resource group.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myResourceGroup
|
||||
lab_name:
|
||||
description:
|
||||
- Name of the lab.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myLab
|
||||
artifact_source_name:
|
||||
description:
|
||||
- The name of the artifact source.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myArtifactSource
|
||||
name:
|
||||
description:
|
||||
- The name of the artifact.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myArtifact
|
||||
description:
|
||||
description:
|
||||
- Description of the artifact.
|
||||
returned: always
|
||||
type: str
|
||||
sample: Installs My Software
|
||||
file_path:
|
||||
description:
|
||||
- Artifact's path in the repo.
|
||||
returned: always
|
||||
type: str
|
||||
sample: Artifacts/myArtifact
|
||||
publisher:
|
||||
description:
|
||||
- Publisher name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: MyPublisher
|
||||
target_os_type:
|
||||
description:
|
||||
- Target OS type.
|
||||
returned: always
|
||||
type: str
|
||||
sample: Linux
|
||||
title:
|
||||
description:
|
||||
- Title of the artifact.
|
||||
returned: always
|
||||
type: str
|
||||
sample: My Software
|
||||
parameters:
|
||||
description:
|
||||
- A dictionary containing parameters definition of the artifact.
|
||||
returned: always
|
||||
type: complex
|
||||
sample: {}
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.mgmt.devtestlabs import DevTestLabsClient
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMArtifactInfo(AzureRMModuleBase):
|
||||
def __init__(self):
|
||||
# define user inputs into argument
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
lab_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
artifact_source_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str'
|
||||
)
|
||||
)
|
||||
# store the results of the module operation
|
||||
self.results = dict(
|
||||
changed=False
|
||||
)
|
||||
self.mgmt_client = None
|
||||
self.resource_group = None
|
||||
self.lab_name = None
|
||||
self.artifact_source_name = None
|
||||
self.name = None
|
||||
super(AzureRMArtifactInfo, self).__init__(self.module_arg_spec, supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
for key in self.module_arg_spec:
|
||||
setattr(self, key, kwargs[key])
|
||||
self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
|
||||
if self.name:
|
||||
self.results['artifacts'] = self.get()
|
||||
else:
|
||||
self.results['artifacts'] = self.list()
|
||||
|
||||
return self.results
|
||||
|
||||
def get(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mgmt_client.artifacts.get(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
artifact_source_name=self.artifact_source_name,
|
||||
name=self.name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get facts for Artifact.')
|
||||
|
||||
if response:
|
||||
results.append(self.format_response(response))
|
||||
|
||||
return results
|
||||
|
||||
def list(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mgmt_client.artifacts.list(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
artifact_source_name=self.artifact_source_name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get facts for Artifact.')
|
||||
|
||||
if response is not None:
|
||||
for item in response:
|
||||
results.append(self.format_response(item))
|
||||
|
||||
return results
|
||||
|
||||
def format_response(self, item):
|
||||
d = item.as_dict()
|
||||
d = {
|
||||
'resource_group': self.parse_resource_to_dict(d.get('id')).get('resource_group'),
|
||||
'lab_name': self.parse_resource_to_dict(d.get('id')).get('name'),
|
||||
'artifact_source_name': self.parse_resource_to_dict(d.get('id')).get('child_name_1'),
|
||||
'id': d.get('id'),
|
||||
'description': d.get('description'),
|
||||
'file_path': d.get('file_path'),
|
||||
'name': d.get('name'),
|
||||
'parameters': d.get('parameters'),
|
||||
'publisher': d.get('publisher'),
|
||||
'target_os_type': d.get('target_os_type'),
|
||||
'title': d.get('title')
|
||||
}
|
||||
return d
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMArtifactInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,365 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_devtestlabartifactsource
|
||||
version_added: "2.8"
|
||||
short_description: Manage Azure DevTest Labs Artifacts Source instance
|
||||
description:
|
||||
- Create, update and delete instance of Azure DevTest Labs Artifacts Source.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
required: True
|
||||
lab_name:
|
||||
description:
|
||||
- The name of the lab.
|
||||
required: True
|
||||
name:
|
||||
description:
|
||||
- The name of the artifact source.
|
||||
required: True
|
||||
display_name:
|
||||
description:
|
||||
- The artifact source's display name.
|
||||
uri:
|
||||
description:
|
||||
- The artifact source's URI.
|
||||
source_type:
|
||||
description:
|
||||
- The artifact source's type.
|
||||
choices:
|
||||
- 'vso'
|
||||
- 'github'
|
||||
folder_path:
|
||||
description:
|
||||
- The folder containing artifacts.
|
||||
arm_template_folder_path:
|
||||
description:
|
||||
- The folder containing Azure Resource Manager templates.
|
||||
branch_ref:
|
||||
description:
|
||||
- The artifact source's branch reference.
|
||||
security_token:
|
||||
description:
|
||||
- The security token to authenticate to the artifact source.
|
||||
is_enabled:
|
||||
description:
|
||||
- Indicates whether the artifact source is enabled.
|
||||
type: bool
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the DevTest Labs Artifacts Source.
|
||||
- Use C(present) to create or update an DevTest Labs Artifacts Source and C(absent) to delete it.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create (or update) DevTest Labs Artifacts Source
|
||||
azure_rm_devtestlabartifactsource:
|
||||
resource_group: myrg
|
||||
lab_name: mylab
|
||||
name: myartifacts
|
||||
uri: https://github.com/myself/myrepo.git
|
||||
source_type: github
|
||||
folder_path: /
|
||||
security_token: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- The identifier of the resource.
|
||||
returned: always
|
||||
type: str
|
||||
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myrg/providers/microsoft.devtestlab/labs/mylab/artifactsources/myartifacts
|
||||
is_enabled:
|
||||
description:
|
||||
- Indicates whether the artifact source is enabled.
|
||||
returned: always
|
||||
type: bool
|
||||
sample: true
|
||||
'''
|
||||
|
||||
import time
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
from ansible.module_utils.common.dict_transformations import _snake_to_camel
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrest.polling import LROPoller
|
||||
from msrestazure.azure_operation import AzureOperationPoller
|
||||
from azure.mgmt.devtestlabs import DevTestLabsClient
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class Actions:
|
||||
NoAction, Create, Update, Delete = range(4)
|
||||
|
||||
|
||||
class AzureRMDevTestLabArtifactsSource(AzureRMModuleBase):
|
||||
"""Configuration class for an Azure RM DevTest Labs Artifacts Source resource"""
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
lab_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
display_name=dict(
|
||||
type='str'
|
||||
),
|
||||
uri=dict(
|
||||
type='str'
|
||||
),
|
||||
source_type=dict(
|
||||
type='str',
|
||||
choices=['vso',
|
||||
'github']
|
||||
),
|
||||
folder_path=dict(
|
||||
type='str'
|
||||
),
|
||||
arm_template_folder_path=dict(
|
||||
type='str'
|
||||
),
|
||||
branch_ref=dict(
|
||||
type='str'
|
||||
),
|
||||
security_token=dict(
|
||||
type='str'
|
||||
),
|
||||
is_enabled=dict(
|
||||
type='bool'
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
)
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.lab_name = None
|
||||
self.name = None
|
||||
self.artifact_source = dict()
|
||||
|
||||
self.results = dict(changed=False)
|
||||
self.mgmt_client = None
|
||||
self.state = None
|
||||
self.to_do = Actions.NoAction
|
||||
|
||||
required_if = [
|
||||
('state', 'present', [
|
||||
'source_type', 'uri', 'security_token'])
|
||||
]
|
||||
|
||||
super(AzureRMDevTestLabArtifactsSource, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=True,
|
||||
required_if=required_if)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
"""Main module execution method"""
|
||||
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
if hasattr(self, key):
|
||||
setattr(self, key, kwargs[key])
|
||||
elif kwargs[key] is not None:
|
||||
self.artifact_source[key] = kwargs[key]
|
||||
|
||||
if self.artifact_source.get('source_type') == 'github':
|
||||
self.artifact_source['source_type'] = 'GitHub'
|
||||
elif self.artifact_source.get('source_type') == 'vso':
|
||||
self.artifact_source['source_type'] = 'VsoGit'
|
||||
|
||||
if self.artifact_source.get('status') is not None:
|
||||
self.artifact_source['status'] = 'Enabled' if self.artifact_source.get('status') else 'Disabled'
|
||||
|
||||
response = None
|
||||
|
||||
self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager,
|
||||
api_version='2018-10-15')
|
||||
|
||||
old_response = self.get_devtestlabartifactssource()
|
||||
|
||||
if not old_response:
|
||||
self.log("DevTest Labs Artifacts Source instance doesn't exist")
|
||||
if self.state == 'absent':
|
||||
self.log("Old instance didn't exist")
|
||||
else:
|
||||
self.to_do = Actions.Create
|
||||
else:
|
||||
self.log("DevTest Labs Artifacts Source instance already exists")
|
||||
if self.state == 'absent':
|
||||
self.to_do = Actions.Delete
|
||||
elif self.state == 'present':
|
||||
self.results['old_response'] = old_response
|
||||
|
||||
if self.artifact_source.get('display_name') is not None:
|
||||
if self.artifact_source.get('display_name') != old_response.get('display_name'):
|
||||
self.to_do = Actions.Update
|
||||
else:
|
||||
self.artifact_source['display_name'] = old_response.get('display_name')
|
||||
|
||||
if self.artifact_source.get('source_type').lower() != old_response.get('source_type').lower():
|
||||
self.to_do = Actions.Update
|
||||
|
||||
if self.artifact_source.get('uri') != old_response.get('uri'):
|
||||
self.to_do = Actions.Update
|
||||
|
||||
if self.artifact_source.get('branch_ref') is not None:
|
||||
if self.artifact_source.get('branch_ref') != old_response.get('branch_ref'):
|
||||
self.to_do = Actions.Update
|
||||
else:
|
||||
self.artifact_source['branch_ref'] = old_response.get('branch_ref')
|
||||
|
||||
if self.artifact_source.get('status') is not None:
|
||||
if self.artifact_source.get('status') != old_response.get('status'):
|
||||
self.to_do = Actions.Update
|
||||
else:
|
||||
self.artifact_source['status'] = old_response.get('status')
|
||||
|
||||
if self.artifact_source.get('folder_path') is not None:
|
||||
if self.artifact_source.get('folder_path') != old_response.get('folder_path'):
|
||||
self.to_do = Actions.Update
|
||||
else:
|
||||
self.artifact_source['folder_path'] = old_response.get('folder_path')
|
||||
|
||||
if self.artifact_source.get('arm_template_folder_path') is not None:
|
||||
if self.artifact_source.get('arm_template_folder_path') != old_response.get('arm_template_folder_path'):
|
||||
self.to_do = Actions.Update
|
||||
else:
|
||||
self.artifact_source['arm_template_folder_path'] = old_response.get('arm_template_folder_path')
|
||||
|
||||
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
|
||||
self.log("Need to Create / Update the DevTest Labs Artifacts Source instance")
|
||||
self.results['changed'] = True
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
response = self.create_update_devtestlabartifactssource()
|
||||
self.log("Creation / Update done")
|
||||
elif self.to_do == Actions.Delete:
|
||||
self.log("DevTest Labs Artifacts Source instance deleted")
|
||||
self.results['changed'] = True
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
self.delete_devtestlabartifactssource()
|
||||
else:
|
||||
self.log("DevTest Labs Artifacts Source instance unchanged")
|
||||
self.results['changed'] = False
|
||||
response = old_response
|
||||
|
||||
if self.state == 'present':
|
||||
self.results.update({
|
||||
'id': response.get('id', None),
|
||||
'is_enabled': (response.get('status', None).lower() == 'enabled')
|
||||
})
|
||||
return self.results
|
||||
|
||||
def create_update_devtestlabartifactssource(self):
|
||||
'''
|
||||
Creates or updates DevTest Labs Artifacts Source with the specified configuration.
|
||||
|
||||
:return: deserialized DevTest Labs Artifacts Source instance state dictionary
|
||||
'''
|
||||
self.log("Creating / Updating the DevTest Labs Artifacts Source instance {0}".format(self.name))
|
||||
|
||||
try:
|
||||
response = self.mgmt_client.artifact_sources.create_or_update(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
name=self.name,
|
||||
artifact_source=self.artifact_source)
|
||||
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
|
||||
response = self.get_poller_result(response)
|
||||
|
||||
except CloudError as exc:
|
||||
self.log('Error attempting to create the DevTest Labs Artifacts Source instance.')
|
||||
self.fail("Error creating the DevTest Labs Artifacts Source instance: {0}".format(str(exc)))
|
||||
return response.as_dict()
|
||||
|
||||
def delete_devtestlabartifactssource(self):
|
||||
'''
|
||||
Deletes specified DevTest Labs Artifacts Source instance in the specified subscription and resource group.
|
||||
|
||||
:return: True
|
||||
'''
|
||||
self.log("Deleting the DevTest Labs Artifacts Source instance {0}".format(self.name))
|
||||
try:
|
||||
response = self.mgmt_client.artifact_sources.delete(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
name=self.name)
|
||||
except CloudError as e:
|
||||
self.log('Error attempting to delete the DevTest Labs Artifacts Source instance.')
|
||||
self.fail("Error deleting the DevTest Labs Artifacts Source instance: {0}".format(str(e)))
|
||||
|
||||
return True
|
||||
|
||||
def get_devtestlabartifactssource(self):
|
||||
'''
|
||||
Gets the properties of the specified DevTest Labs Artifacts Source.
|
||||
|
||||
:return: deserialized DevTest Labs Artifacts Source instance state dictionary
|
||||
'''
|
||||
self.log("Checking if the DevTest Labs Artifacts Source instance {0} is present".format(self.name))
|
||||
found = False
|
||||
try:
|
||||
response = self.mgmt_client.artifact_sources.get(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
name=self.name)
|
||||
found = True
|
||||
self.log("Response : {0}".format(response))
|
||||
self.log("DevTest Labs Artifacts Source instance : {0} found".format(response.name))
|
||||
except CloudError as e:
|
||||
self.log('Did not find the DevTest Labs Artifacts Source instance.')
|
||||
if found is True:
|
||||
return response.as_dict()
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution"""
|
||||
AzureRMDevTestLabArtifactsSource()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,258 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_devtestlabartifactsource_info
|
||||
version_added: "2.9"
|
||||
short_description: Get Azure DevTest Lab Artifact Source facts
|
||||
description:
|
||||
- Get facts of Azure DevTest Lab Artifact Source.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
required: True
|
||||
type: str
|
||||
lab_name:
|
||||
description:
|
||||
- The name of DevTest Lab.
|
||||
required: True
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- The name of DevTest Lab Artifact Source.
|
||||
type: str
|
||||
tags:
|
||||
description:
|
||||
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
|
||||
type: list
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get instance of DevTest Lab Artifact Source
|
||||
azure_rm_devtestlabartifactsource_info:
|
||||
resource_group: myResourceGroup
|
||||
lab_name: myLab
|
||||
name: myArtifactSource
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
artifactsources:
|
||||
description:
|
||||
- A list of dictionaries containing facts for DevTest Lab Artifact Source.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- The identifier of the artifact source.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DevTestLab/labs/myLab/ar
|
||||
tifactSources/myArtifactSource"
|
||||
resource_group:
|
||||
description:
|
||||
- Name of the resource group.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myResourceGroup
|
||||
lab_name:
|
||||
description:
|
||||
- Name of the lab.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myLab
|
||||
name:
|
||||
description:
|
||||
- The name of the artifact source.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myArtifactSource
|
||||
display_name:
|
||||
description:
|
||||
- The artifact source's display name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: Public Artifact Repo
|
||||
source_type:
|
||||
description:
|
||||
- The artifact source's type.
|
||||
returned: always
|
||||
type: str
|
||||
sample: github
|
||||
is_enabled:
|
||||
description:
|
||||
- Is the artifact source enabled.
|
||||
returned: always
|
||||
type: str
|
||||
sample: True
|
||||
uri:
|
||||
description:
|
||||
- URI of the artifact source.
|
||||
returned: always
|
||||
type: str
|
||||
sample: https://github.com/Azure/azure-devtestlab.git
|
||||
folder_path:
|
||||
description:
|
||||
- The folder containing artifacts.
|
||||
returned: always
|
||||
type: str
|
||||
sample: /Artifacts
|
||||
arm_template_folder_path:
|
||||
description:
|
||||
- The folder containing Azure Resource Manager templates.
|
||||
returned: always
|
||||
type: str
|
||||
sample: /Environments
|
||||
provisioning_state:
|
||||
description:
|
||||
- Provisioning state of artifact source.
|
||||
returned: always
|
||||
type: str
|
||||
sample: Succeeded
|
||||
tags:
|
||||
description:
|
||||
- The tags of the resource.
|
||||
returned: always
|
||||
type: complex
|
||||
sample: "{ 'MyTag': 'MyValue' }"
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.mgmt.devtestlabs import DevTestLabsClient
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMDtlArtifactSourceInfo(AzureRMModuleBase):
|
||||
def __init__(self):
|
||||
# define user inputs into argument
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
lab_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str'
|
||||
),
|
||||
tags=dict(
|
||||
type='list'
|
||||
)
|
||||
)
|
||||
# store the results of the module operation
|
||||
self.results = dict(
|
||||
changed=False
|
||||
)
|
||||
self.mgmt_client = None
|
||||
self.resource_group = None
|
||||
self.lab_name = None
|
||||
self.name = None
|
||||
self.tags = None
|
||||
super(AzureRMDtlArtifactSourceInfo, self).__init__(self.module_arg_spec, supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
is_old_facts = self.module._name == 'azure_rm_devtestlabartifactsource_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_devtestlabartifactsource_facts' module has been renamed to 'azure_rm_devtestlabartifactsource_info'",
|
||||
version='2.13')
|
||||
|
||||
for key in self.module_arg_spec:
|
||||
setattr(self, key, kwargs[key])
|
||||
self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
|
||||
if self.name:
|
||||
self.results['artifactsources'] = self.get()
|
||||
else:
|
||||
self.results['artifactsources'] = self.list()
|
||||
|
||||
return self.results
|
||||
|
||||
def get(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mgmt_client.artifact_sources.get(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
name=self.name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.fail('Could not get facts for Artifact Source.')
|
||||
|
||||
if response and self.has_tags(response.tags, self.tags):
|
||||
results.append(self.format_response(response))
|
||||
|
||||
return results
|
||||
|
||||
def list(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mgmt_client.artifact_sources.list(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.fail('Could not get facts for Artifact Source.')
|
||||
|
||||
if response is not None:
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(self.format_response(item))
|
||||
|
||||
return results
|
||||
|
||||
def format_response(self, item):
|
||||
d = item.as_dict()
|
||||
d = {
|
||||
'id': d.get('id'),
|
||||
'resource_group': self.parse_resource_to_dict(d.get('id')).get('resource_group'),
|
||||
'lab_name': self.parse_resource_to_dict(d.get('id')).get('name'),
|
||||
'name': d.get('name'),
|
||||
'display_name': d.get('display_name'),
|
||||
'tags': d.get('tags'),
|
||||
'source_type': d.get('source_type').lower(),
|
||||
'is_enabled': d.get('status') == 'Enabled',
|
||||
'uri': d.get('uri'),
|
||||
'arm_template_folder_path': d.get('arm_template_folder_path'),
|
||||
'folder_path': d.get('folder_path'),
|
||||
'provisioning_state': d.get('provisioning_state')
|
||||
}
|
||||
return d
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMDtlArtifactSourceInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,383 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_devtestlabcustomimage
|
||||
version_added: "2.8"
|
||||
short_description: Manage Azure DevTest Lab Custom Image instance
|
||||
description:
|
||||
- Create, update and delete instance of Azure DevTest Lab Custom Image.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
required: True
|
||||
lab_name:
|
||||
description:
|
||||
- The name of the lab.
|
||||
required: True
|
||||
name:
|
||||
description:
|
||||
- The name of the custom image.
|
||||
required: True
|
||||
source_vm:
|
||||
description:
|
||||
- Source DevTest Lab virtual machine name.
|
||||
windows_os_state:
|
||||
description:
|
||||
- The state of the Windows OS.
|
||||
choices:
|
||||
- 'non_sysprepped'
|
||||
- 'sysprep_requested'
|
||||
- 'sysprep_applied'
|
||||
linux_os_state:
|
||||
description:
|
||||
- The state of the Linux OS.
|
||||
choices:
|
||||
- 'non_deprovisioned'
|
||||
- 'deprovision_requested'
|
||||
- 'deprovision_applied'
|
||||
description:
|
||||
description:
|
||||
- The description of the custom image.
|
||||
author:
|
||||
description:
|
||||
- The author of the custom image.
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the Custom Image.
|
||||
- Use C(present) to create or update an Custom Image and C(absent) to delete it.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create instance of DevTest Lab Image
|
||||
azure_rm_devtestlabcustomimage:
|
||||
resource_group: myResourceGroup
|
||||
lab_name: myLab
|
||||
name: myImage
|
||||
source_vm: myDevTestLabVm
|
||||
linux_os_state: non_deprovisioned
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- The identifier of the resource.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/myLab/images/myImage"
|
||||
'''
|
||||
|
||||
import time
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
from ansible.module_utils.common.dict_transformations import _snake_to_camel
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrest.polling import LROPoller
|
||||
from msrestazure.azure_operation import AzureOperationPoller
|
||||
from azure.mgmt.devtestlabs import DevTestLabsClient
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class Actions:
|
||||
NoAction, Create, Update, Delete = range(4)
|
||||
|
||||
|
||||
class AzureRMDtlCustomImage(AzureRMModuleBase):
|
||||
"""Configuration class for an Azure RM Custom Image resource"""
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
lab_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
source_vm=dict(
|
||||
type='str'
|
||||
),
|
||||
windows_os_state=dict(
|
||||
type='str',
|
||||
choices=['non_sysprepped',
|
||||
'sysprep_requested',
|
||||
'sysprep_applied']
|
||||
),
|
||||
linux_os_state=dict(
|
||||
type='str',
|
||||
choices=['non_deprovisioned',
|
||||
'deprovision_requested',
|
||||
'deprovision_applied']
|
||||
),
|
||||
description=dict(
|
||||
type='str'
|
||||
),
|
||||
author=dict(
|
||||
type='str'
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
)
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.lab_name = None
|
||||
self.name = None
|
||||
self.custom_image = dict()
|
||||
|
||||
self.results = dict(changed=False)
|
||||
self.mgmt_client = None
|
||||
self.state = None
|
||||
self.to_do = Actions.NoAction
|
||||
|
||||
required_if = [
|
||||
('state', 'present', [
|
||||
'source_vm'])
|
||||
]
|
||||
|
||||
super(AzureRMDtlCustomImage, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=True,
|
||||
required_if=required_if)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
"""Main module execution method"""
|
||||
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
if hasattr(self, key):
|
||||
setattr(self, key, kwargs[key])
|
||||
elif kwargs[key] is not None:
|
||||
self.custom_image[key] = kwargs[key]
|
||||
|
||||
if self.state == 'present':
|
||||
windows_os_state = self.custom_image.pop('windows_os_state', False)
|
||||
linux_os_state = self.custom_image.pop('linux_os_state', False)
|
||||
source_vm_name = self.custom_image.pop('source_vm')
|
||||
temp = "/subscriptions/{0}/resourcegroups/{1}/providers/microsoft.devtestlab/labs/{2}/virtualmachines/{3}"
|
||||
self.custom_image['vm'] = {}
|
||||
self.custom_image['vm']['source_vm_id'] = temp.format(self.subscription_id, self.resource_group, self.lab_name, source_vm_name)
|
||||
if windows_os_state:
|
||||
self.custom_image['vm']['windows_os_info'] = {'windows_os_state': _snake_to_camel(windows_os_state, True)}
|
||||
elif linux_os_state:
|
||||
self.custom_image['vm']['linux_os_info'] = {'linux_os_state': _snake_to_camel(linux_os_state, True)}
|
||||
else:
|
||||
self.fail("Either 'linux_os_state' or 'linux_os_state' must be specified")
|
||||
|
||||
response = None
|
||||
|
||||
self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
|
||||
old_response = self.get_customimage()
|
||||
|
||||
if not old_response:
|
||||
self.log("Custom Image instance doesn't exist")
|
||||
if self.state == 'absent':
|
||||
self.log("Old instance didn't exist")
|
||||
else:
|
||||
self.to_do = Actions.Create
|
||||
else:
|
||||
self.log("Custom Image instance already exists")
|
||||
if self.state == 'absent':
|
||||
self.to_do = Actions.Delete
|
||||
elif self.state == 'present':
|
||||
if (not default_compare(self.custom_image, old_response, '', self.results)):
|
||||
self.to_do = Actions.Update
|
||||
|
||||
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
|
||||
self.log("Need to Create / Update the Custom Image instance")
|
||||
|
||||
if self.check_mode:
|
||||
self.results['changed'] = True
|
||||
return self.results
|
||||
|
||||
response = self.create_update_customimage()
|
||||
|
||||
self.results['changed'] = True
|
||||
self.log("Creation / Update done")
|
||||
elif self.to_do == Actions.Delete:
|
||||
self.log("Custom Image instance deleted")
|
||||
self.results['changed'] = True
|
||||
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
self.delete_customimage()
|
||||
# This currently doesnt' work as there is a bug in SDK / Service
|
||||
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
|
||||
response = self.get_poller_result(response)
|
||||
else:
|
||||
self.log("Custom Image instance unchanged")
|
||||
self.results['changed'] = False
|
||||
response = old_response
|
||||
|
||||
if self.state == 'present':
|
||||
self.results.update({
|
||||
'id': response.get('id', None)
|
||||
})
|
||||
return self.results
|
||||
|
||||
def create_update_customimage(self):
|
||||
'''
|
||||
Creates or updates Custom Image with the specified configuration.
|
||||
|
||||
:return: deserialized Custom Image instance state dictionary
|
||||
'''
|
||||
self.log("Creating / Updating the Custom Image instance {0}".format(self.name))
|
||||
|
||||
try:
|
||||
response = self.mgmt_client.custom_images.create_or_update(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
name=self.name,
|
||||
custom_image=self.custom_image)
|
||||
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
|
||||
response = self.get_poller_result(response)
|
||||
|
||||
except CloudError as exc:
|
||||
self.log('Error attempting to create the Custom Image instance.')
|
||||
self.fail("Error creating the Custom Image instance: {0}".format(str(exc)))
|
||||
return response.as_dict()
|
||||
|
||||
def delete_customimage(self):
|
||||
'''
|
||||
Deletes specified Custom Image instance in the specified subscription and resource group.
|
||||
|
||||
:return: True
|
||||
'''
|
||||
self.log("Deleting the Custom Image instance {0}".format(self.name))
|
||||
try:
|
||||
response = self.mgmt_client.custom_images.delete(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
name=self.name)
|
||||
except CloudError as e:
|
||||
self.log('Error attempting to delete the Custom Image instance.')
|
||||
self.fail("Error deleting the Custom Image instance: {0}".format(str(e)))
|
||||
|
||||
return True
|
||||
|
||||
def get_customimage(self):
|
||||
'''
|
||||
Gets the properties of the specified Custom Image.
|
||||
|
||||
:return: deserialized Custom Image instance state dictionary
|
||||
'''
|
||||
self.log("Checking if the Custom Image instance {0} is present".format(self.name))
|
||||
found = False
|
||||
try:
|
||||
response = self.mgmt_client.custom_images.get(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
name=self.name)
|
||||
found = True
|
||||
self.log("Response : {0}".format(response))
|
||||
self.log("Custom Image instance : {0} found".format(response.name))
|
||||
except CloudError as e:
|
||||
self.log('Did not find the Custom Image instance.')
|
||||
if found is True:
|
||||
return response.as_dict()
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def default_compare(new, old, path, result):
|
||||
if new is None:
|
||||
return True
|
||||
elif isinstance(new, dict):
|
||||
if not isinstance(old, dict):
|
||||
result['compare'] = 'changed [' + path + '] old dict is null'
|
||||
return False
|
||||
for k in new.keys():
|
||||
if not default_compare(new.get(k), old.get(k, None), path + '/' + k, result):
|
||||
return False
|
||||
return True
|
||||
elif isinstance(new, list):
|
||||
if not isinstance(old, list) or len(new) != len(old):
|
||||
result['compare'] = 'changed [' + path + '] length is different or null'
|
||||
return False
|
||||
if isinstance(old[0], dict):
|
||||
key = None
|
||||
if 'id' in old[0] and 'id' in new[0]:
|
||||
key = 'id'
|
||||
elif 'name' in old[0] and 'name' in new[0]:
|
||||
key = 'name'
|
||||
else:
|
||||
key = list(old[0])[0]
|
||||
new = sorted(new, key=lambda x: x.get(key, None))
|
||||
old = sorted(old, key=lambda x: x.get(key, None))
|
||||
else:
|
||||
new = sorted(new)
|
||||
old = sorted(old)
|
||||
for i in range(len(new)):
|
||||
if not default_compare(new[i], old[i], path + '/*', result):
|
||||
return False
|
||||
return True
|
||||
else:
|
||||
if path == '/location':
|
||||
new = new.replace(' ', '').lower()
|
||||
old = new.replace(' ', '').lower()
|
||||
if new == old:
|
||||
return True
|
||||
else:
|
||||
result['compare'] = 'changed [' + path + '] ' + str(new) + ' != ' + str(old)
|
||||
return False
|
||||
|
||||
|
||||
def dict_camelize(d, path, camelize_first):
|
||||
if isinstance(d, list):
|
||||
for i in range(len(d)):
|
||||
dict_camelize(d[i], path, camelize_first)
|
||||
elif isinstance(d, dict):
|
||||
if len(path) == 1:
|
||||
old_value = d.get(path[0], None)
|
||||
if old_value is not None:
|
||||
d[path[0]] = _snake_to_camel(old_value, camelize_first)
|
||||
else:
|
||||
sd = d.get(path[0], None)
|
||||
if sd is not None:
|
||||
dict_camelize(sd, path[1:], camelize_first)
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution"""
|
||||
AzureRMDtlCustomImage()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,229 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_devtestlabcustomimage_info
|
||||
version_added: "2.9"
|
||||
short_description: Get Azure DevTest Lab Custom Image facts
|
||||
description:
|
||||
- Get facts of Azure Azure DevTest Lab Custom Image.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
required: True
|
||||
type: str
|
||||
lab_name:
|
||||
description:
|
||||
- The name of the lab.
|
||||
required: True
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- The name of the custom image.
|
||||
type: str
|
||||
tags:
|
||||
description:
|
||||
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
|
||||
type: list
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get instance of Custom Image
|
||||
azure_rm_devtestlabcustomimage_info:
|
||||
resource_group: myResourceGroup
|
||||
lab_name: myLab
|
||||
name: myImage
|
||||
|
||||
- name: List instances of Custom Image in the lab
|
||||
azure_rm_devtestlabcustomimage_info:
|
||||
resource_group: myResourceGroup
|
||||
lab_name: myLab
|
||||
name: myImage
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
custom_images:
|
||||
description:
|
||||
- A list of dictionaries containing facts for Custom Image.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- The identifier of the artifact source.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DevTestLab/labs/myLab/cu
|
||||
stomimages/myImage"
|
||||
resource_group:
|
||||
description:
|
||||
- Name of the resource group.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myResourceGroup
|
||||
lab_name:
|
||||
description:
|
||||
- Name of the lab.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myLab
|
||||
name:
|
||||
description:
|
||||
- The name of the image.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myImage
|
||||
managed_shapshot_id:
|
||||
description:
|
||||
- Managed snapshot id.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.compute/snapshots/myImage"
|
||||
source_vm_id:
|
||||
description:
|
||||
- Source VM id.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx//resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/myLab/v
|
||||
irtualmachines/myLabVm"
|
||||
tags:
|
||||
description:
|
||||
- The tags of the resource.
|
||||
returned: always
|
||||
type: complex
|
||||
sample: "{ 'MyTag':'MyValue' }"
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.mgmt.devtestlabs import DevTestLabsClient
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMDtlCustomImageInfo(AzureRMModuleBase):
|
||||
def __init__(self):
|
||||
# define user inputs into argument
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
lab_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
tags=dict(
|
||||
type='list'
|
||||
)
|
||||
)
|
||||
# store the results of the module operation
|
||||
self.results = dict(
|
||||
changed=False
|
||||
)
|
||||
self.mgmt_client = None
|
||||
self.resource_group = None
|
||||
self.lab_name = None
|
||||
self.name = None
|
||||
self.tags = None
|
||||
super(AzureRMDtlCustomImageInfo, self).__init__(self.module_arg_spec, supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
is_old_facts = self.module._name == 'azure_rm_devtestlabcustomimage_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_devtestlabcustomimage_facts' module has been renamed to 'azure_rm_devtestlabcustomimage_info'", version='2.13')
|
||||
|
||||
for key in self.module_arg_spec:
|
||||
setattr(self, key, kwargs[key])
|
||||
self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
|
||||
if self.name:
|
||||
self.results['custom_images'] = self.get()
|
||||
else:
|
||||
self.results['custom_images'] = self.list()
|
||||
return self.results
|
||||
|
||||
def get(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mgmt_client.custom_images.get(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
name=self.name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get facts for Custom Image.')
|
||||
|
||||
if response and self.has_tags(response.tags, self.tags):
|
||||
results.append(self.format_response(response))
|
||||
|
||||
return results
|
||||
|
||||
def list(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mgmt_client.custom_images.list(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get facts for Custom Image.')
|
||||
|
||||
if response is not None:
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(self.format_response(item))
|
||||
|
||||
return results
|
||||
|
||||
def format_response(self, item):
|
||||
d = item.as_dict()
|
||||
d = {
|
||||
'resource_group': self.resource_group,
|
||||
'lab_name': self.lab_name,
|
||||
'name': d.get('name'),
|
||||
'id': d.get('id'),
|
||||
'managed_snapshot_id': d.get('managed_snapshot_id'),
|
||||
'source_vm_id': d.get('vm', {}).get('source_vm_id'),
|
||||
'tags': d.get('tags')
|
||||
}
|
||||
return d
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMDtlCustomImageInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,379 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_devtestlabenvironment
|
||||
version_added: "2.8"
|
||||
short_description: Manage Azure DevTest Lab Environment instance
|
||||
description:
|
||||
- Create, update and delete instance of Azure DevTest Lab Environment.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
required: True
|
||||
lab_name:
|
||||
description:
|
||||
- The name of the lab.
|
||||
required: True
|
||||
user_name:
|
||||
description:
|
||||
- The name of the user profile.
|
||||
required: True
|
||||
name:
|
||||
description:
|
||||
- The name of the environment.
|
||||
required: True
|
||||
location:
|
||||
description:
|
||||
- The location of the resource.
|
||||
deployment_template:
|
||||
description:
|
||||
- The Azure Resource Manager template's identifier.
|
||||
deployment_parameters:
|
||||
description:
|
||||
- The parameters of the Azure Resource Manager template.
|
||||
type: list
|
||||
suboptions:
|
||||
name:
|
||||
description:
|
||||
- The name of the template parameter.
|
||||
value:
|
||||
description:
|
||||
- The value of the template parameter.
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the Environment.
|
||||
- Use C((present) to create or update an Environment and C(absent) to delete it.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create instance of DevTest Lab Environment from public environment repo
|
||||
azure_rm_devtestlabenvironment:
|
||||
resource_group: myResourceGroup
|
||||
lab_name: myLab
|
||||
user_name: user
|
||||
name: myEnvironment
|
||||
location: eastus
|
||||
deployment_template:
|
||||
artifact_source_name: public environment repo
|
||||
name: WebApp
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- The identifier of the resource.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/myLab/environment
|
||||
s/myEnvironment"
|
||||
|
||||
'''
|
||||
|
||||
import time
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
from ansible.module_utils.common.dict_transformations import _snake_to_camel
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrest.polling import LROPoller
|
||||
from msrestazure.azure_operation import AzureOperationPoller
|
||||
from azure.mgmt.devtestlabs import DevTestLabsClient
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class Actions:
|
||||
NoAction, Create, Update, Delete = range(4)
|
||||
|
||||
|
||||
class AzureRMDtlEnvironment(AzureRMModuleBase):
|
||||
"""Configuration class for an Azure RM Environment resource"""
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
lab_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
user_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
location=dict(
|
||||
type='str'
|
||||
),
|
||||
deployment_template=dict(
|
||||
type='raw'
|
||||
),
|
||||
deployment_parameters=dict(
|
||||
type='list',
|
||||
options=dict(
|
||||
name=dict(
|
||||
type='str'
|
||||
),
|
||||
value=dict(
|
||||
type='str'
|
||||
)
|
||||
)
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
)
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.lab_name = None
|
||||
self.user_name = None
|
||||
self.name = None
|
||||
self.dtl_environment = dict()
|
||||
|
||||
self.results = dict(changed=False)
|
||||
self.mgmt_client = None
|
||||
self.state = None
|
||||
self.to_do = Actions.NoAction
|
||||
|
||||
super(AzureRMDtlEnvironment, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=True)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
"""Main module execution method"""
|
||||
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
if hasattr(self, key):
|
||||
setattr(self, key, kwargs[key])
|
||||
elif kwargs[key] is not None:
|
||||
self.dtl_environment[key] = kwargs[key]
|
||||
|
||||
response = None
|
||||
|
||||
self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
|
||||
resource_group = self.get_resource_group(self.resource_group)
|
||||
deployment_template = self.dtl_environment.pop('deployment_template', None)
|
||||
if deployment_template:
|
||||
if isinstance(deployment_template, dict):
|
||||
if all(key in deployment_template for key in ('artifact_source_name', 'name')):
|
||||
tmp = '/subscriptions/{0}/resourcegroups/{1}/providers/microsoft.devtestlab/labs/{2}/artifactSources/{3}/armTemplates/{4}'
|
||||
deployment_template = tmp.format(self.subscription_id,
|
||||
self.resource_group,
|
||||
self.lab_name,
|
||||
deployment_template['artifact_source_name'],
|
||||
deployment_template['name'])
|
||||
if not isinstance(deployment_template, str):
|
||||
self.fail("parameter error: expecting deployment_template to contain [artifact_source, name]")
|
||||
self.dtl_environment['deployment_properties'] = {}
|
||||
self.dtl_environment['deployment_properties']['arm_template_id'] = deployment_template
|
||||
self.dtl_environment['deployment_properties']['parameters'] = self.dtl_environment.pop('deployment_parameters', None)
|
||||
|
||||
old_response = self.get_environment()
|
||||
|
||||
if not old_response:
|
||||
self.log("Environment instance doesn't exist")
|
||||
if self.state == 'absent':
|
||||
self.log("Old instance didn't exist")
|
||||
else:
|
||||
self.to_do = Actions.Create
|
||||
else:
|
||||
self.log("Environment instance already exists")
|
||||
if self.state == 'absent':
|
||||
self.to_do = Actions.Delete
|
||||
elif self.state == 'present':
|
||||
if (not default_compare(self.dtl_environment, old_response, '', self.results)):
|
||||
self.to_do = Actions.Update
|
||||
|
||||
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
|
||||
self.log("Need to Create / Update the Environment instance")
|
||||
|
||||
if self.check_mode:
|
||||
self.results['changed'] = True
|
||||
return self.results
|
||||
|
||||
response = self.create_update_environment()
|
||||
|
||||
self.results['changed'] = True
|
||||
self.log("Creation / Update done")
|
||||
elif self.to_do == Actions.Delete:
|
||||
self.log("Environment instance deleted")
|
||||
self.results['changed'] = True
|
||||
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
self.delete_environment()
|
||||
# This currently doesn't work as there is a bug in SDK / Service
|
||||
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
|
||||
response = self.get_poller_result(response)
|
||||
else:
|
||||
self.log("Environment instance unchanged")
|
||||
self.results['changed'] = False
|
||||
response = old_response
|
||||
|
||||
if self.state == 'present':
|
||||
self.results.update({
|
||||
'id': response.get('id', None)
|
||||
})
|
||||
return self.results
|
||||
|
||||
def create_update_environment(self):
|
||||
'''
|
||||
Creates or updates Environment with the specified configuration.
|
||||
|
||||
:return: deserialized Environment instance state dictionary
|
||||
'''
|
||||
self.log("Creating / Updating the Environment instance {0}".format(self.name))
|
||||
|
||||
try:
|
||||
if self.to_do == Actions.Create:
|
||||
response = self.mgmt_client.environments.create_or_update(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
user_name=self.user_name,
|
||||
name=self.name,
|
||||
dtl_environment=self.dtl_environment)
|
||||
else:
|
||||
response = self.mgmt_client.environments.update(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
user_name=self.user_name,
|
||||
name=self.name,
|
||||
dtl_environment=self.dtl_environment)
|
||||
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
|
||||
response = self.get_poller_result(response)
|
||||
|
||||
except CloudError as exc:
|
||||
self.log('Error attempting to create the Environment instance.')
|
||||
self.fail("Error creating the Environment instance: {0}".format(str(exc)))
|
||||
return response.as_dict()
|
||||
|
||||
def delete_environment(self):
|
||||
'''
|
||||
Deletes specified Environment instance in the specified subscription and resource group.
|
||||
|
||||
:return: True
|
||||
'''
|
||||
self.log("Deleting the Environment instance {0}".format(self.name))
|
||||
try:
|
||||
response = self.mgmt_client.environments.delete(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
user_name=self.user_name,
|
||||
name=self.name)
|
||||
except CloudError as e:
|
||||
self.log('Error attempting to delete the Environment instance.')
|
||||
self.fail("Error deleting the Environment instance: {0}".format(str(e)))
|
||||
|
||||
return True
|
||||
|
||||
def get_environment(self):
|
||||
'''
|
||||
Gets the properties of the specified Environment.
|
||||
|
||||
:return: deserialized Environment instance state dictionary
|
||||
'''
|
||||
self.log("Checking if the Environment instance {0} is present".format(self.name))
|
||||
found = False
|
||||
try:
|
||||
response = self.mgmt_client.environments.get(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
user_name=self.user_name,
|
||||
name=self.name)
|
||||
found = True
|
||||
self.log("Response : {0}".format(response))
|
||||
self.log("Environment instance : {0} found".format(response.name))
|
||||
except CloudError as e:
|
||||
self.log('Did not find the Environment instance.')
|
||||
if found is True:
|
||||
return response.as_dict()
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def default_compare(new, old, path, result):
|
||||
if new is None:
|
||||
return True
|
||||
elif isinstance(new, dict):
|
||||
if not isinstance(old, dict):
|
||||
result['compare'] = 'changed [' + path + '] old dict is null'
|
||||
return False
|
||||
for k in new.keys():
|
||||
if not default_compare(new.get(k), old.get(k, None), path + '/' + k, result):
|
||||
return False
|
||||
return True
|
||||
elif isinstance(new, list):
|
||||
if not isinstance(old, list) or len(new) != len(old):
|
||||
result['compare'] = 'changed [' + path + '] length is different or null'
|
||||
return False
|
||||
if isinstance(old[0], dict):
|
||||
key = None
|
||||
if 'id' in old[0] and 'id' in new[0]:
|
||||
key = 'id'
|
||||
elif 'name' in old[0] and 'name' in new[0]:
|
||||
key = 'name'
|
||||
else:
|
||||
key = list(old[0])[0]
|
||||
new = sorted(new, key=lambda x: x.get(key, None))
|
||||
old = sorted(old, key=lambda x: x.get(key, None))
|
||||
else:
|
||||
new = sorted(new)
|
||||
old = sorted(old)
|
||||
for i in range(len(new)):
|
||||
if not default_compare(new[i], old[i], path + '/*', result):
|
||||
return False
|
||||
return True
|
||||
else:
|
||||
if path == '/location':
|
||||
new = new.replace(' ', '').lower()
|
||||
old = new.replace(' ', '').lower()
|
||||
if new == old:
|
||||
return True
|
||||
else:
|
||||
result['compare'] = 'changed [' + path + '] ' + str(new) + ' != ' + str(old)
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution"""
|
||||
AzureRMDtlEnvironment()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,245 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_devtestlabenvironment_info
|
||||
version_added: "2.9"
|
||||
short_description: Get Azure Environment facts
|
||||
description:
|
||||
- Get facts of Azure Environment.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
required: True
|
||||
type: str
|
||||
lab_name:
|
||||
description:
|
||||
- The name of the lab.
|
||||
required: True
|
||||
type: str
|
||||
user_name:
|
||||
description:
|
||||
- The name of the user profile.
|
||||
required: True
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- The name of the environment.
|
||||
type: str
|
||||
tags:
|
||||
description:
|
||||
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
|
||||
type: list
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get instance of Environment
|
||||
azure_rm_devtestlabenvironment_info:
|
||||
resource_group: myResourceGroup
|
||||
lab_name: myLab
|
||||
user_name: myUser
|
||||
name: myEnvironment
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
environments:
|
||||
description:
|
||||
- A list of dictionaries containing facts for Environment.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- The identifier of the artifact source.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DevTestLab/labs/myLab/sc
|
||||
hedules/xxxxxxxx-xxxx-xxxx-xxxxx-xxxxxxxxxxxxx/environments/myEnvironment"
|
||||
resource_group:
|
||||
description:
|
||||
- Name of the resource group.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myResourceGroup
|
||||
lab_name:
|
||||
description:
|
||||
- Name of the lab.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myLab
|
||||
name:
|
||||
description:
|
||||
- The name of the environment.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myEnvironment
|
||||
deployment_template:
|
||||
description:
|
||||
- The identifier of the artifact source.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/mylab/art
|
||||
ifactSources/public environment repo/armTemplates/WebApp"
|
||||
resource_group_id:
|
||||
description:
|
||||
- Target resource group id.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myLab-myEnvironment-982571"
|
||||
state:
|
||||
description:
|
||||
- Deployment state.
|
||||
returned: always
|
||||
type: str
|
||||
sample: Succeeded
|
||||
tags:
|
||||
description:
|
||||
- The tags of the resource.
|
||||
returned: always
|
||||
type: complex
|
||||
sample: "{ 'MyTag': 'MyValue' }"
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.mgmt.devtestlabs import DevTestLabsClient
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMDtlEnvironmentInfo(AzureRMModuleBase):
|
||||
def __init__(self):
|
||||
# define user inputs into argument
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
lab_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
user_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str'
|
||||
),
|
||||
tags=dict(
|
||||
type='list'
|
||||
)
|
||||
)
|
||||
# store the results of the module operation
|
||||
self.results = dict(
|
||||
changed=False
|
||||
)
|
||||
self.mgmt_client = None
|
||||
self.resource_group = None
|
||||
self.lab_name = None
|
||||
self.user_name = None
|
||||
self.name = None
|
||||
self.tags = None
|
||||
super(AzureRMDtlEnvironmentInfo, self).__init__(self.module_arg_spec, supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
is_old_facts = self.module._name == 'azure_rm_devtestlabenvironment_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_devtestlabenvironment_facts' module has been renamed to 'azure_rm_devtestlabenvironment_info'", version='2.13')
|
||||
|
||||
for key in self.module_arg_spec:
|
||||
setattr(self, key, kwargs[key])
|
||||
self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
|
||||
if self.name:
|
||||
self.results['environments'] = self.get()
|
||||
else:
|
||||
self.results['environments'] = self.list()
|
||||
|
||||
return self.results
|
||||
|
||||
def get(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mgmt_client.environments.get(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
user_name=self.user_name,
|
||||
name=self.name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get facts for Environment.')
|
||||
|
||||
if response and self.has_tags(response.tags, self.tags):
|
||||
results.append(self.format_response(response))
|
||||
|
||||
return results
|
||||
|
||||
def list(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mgmt_client.environments.list(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
user_name=self.user_name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get facts for Environment.')
|
||||
|
||||
if response is not None:
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(self.format_response(item))
|
||||
|
||||
return results
|
||||
|
||||
def format_response(self, item):
|
||||
d = item.as_dict()
|
||||
d = {
|
||||
'resource_group': self.resource_group,
|
||||
'lab_name': self.lab_name,
|
||||
'name': d.get('name'),
|
||||
'user_name': self.user_name,
|
||||
'id': d.get('id', None),
|
||||
'deployment_template': d.get('deployment_properties', {}).get('arm_template_id'),
|
||||
'location': d.get('location'),
|
||||
'provisioning_state': d.get('provisioning_state'),
|
||||
'resource_group_id': d.get('resource_group_id'),
|
||||
'tags': d.get('tags', None)
|
||||
}
|
||||
return d
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMDtlEnvironmentInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,401 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_devtestlabpolicy
|
||||
version_added: "2.8"
|
||||
short_description: Manage Azure Policy instance
|
||||
description:
|
||||
- Create, update and delete instance of Azure Policy.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
required: True
|
||||
lab_name:
|
||||
description:
|
||||
- The name of the lab.
|
||||
required: True
|
||||
policy_set_name:
|
||||
description:
|
||||
- The name of the policy set.
|
||||
required: True
|
||||
name:
|
||||
description:
|
||||
- The name of the policy.
|
||||
required: True
|
||||
description:
|
||||
description:
|
||||
- The description of the policy.
|
||||
fact_name:
|
||||
description:
|
||||
- The fact name of the policy (e.g. C(lab_vm_count), C(lab_vm_size)), MaxVmsAllowedPerLab, etc.
|
||||
choices:
|
||||
- 'user_owned_lab_vm_count'
|
||||
- 'user_owned_lab_premium_vm_count'
|
||||
- 'lab_vm_count'
|
||||
- 'lab_premium_vm_count'
|
||||
- 'lab_vm_size'
|
||||
- 'gallery_image'
|
||||
- 'user_owned_lab_vm_count_in_subnet'
|
||||
- 'lab_target_cost'
|
||||
threshold:
|
||||
description:
|
||||
- The threshold of the policy (it could be either a maximum value or a list of allowed values).
|
||||
type: raw
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the Policy.
|
||||
- Use C(present) to create or update an Policy and C(absent) to delete it.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create DevTest Lab Policy
|
||||
azure_rm_devtestlabpolicy:
|
||||
resource_group: myResourceGroup
|
||||
lab_name: myLab
|
||||
policy_set_name: myPolicySet
|
||||
name: myPolicy
|
||||
fact_name: user_owned_lab_vm_count
|
||||
threshold: 5
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- The identifier of the resource.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/myLab/policySets/
|
||||
myPolicySet/policies/myPolicy"
|
||||
|
||||
'''
|
||||
|
||||
import time
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
from ansible.module_utils.common.dict_transformations import _snake_to_camel
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrest.polling import LROPoller
|
||||
from msrestazure.azure_operation import AzureOperationPoller
|
||||
from azure.mgmt.devtestlabs import DevTestLabsClient
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class Actions:
|
||||
NoAction, Create, Update, Delete = range(4)
|
||||
|
||||
|
||||
class AzureRMDtlPolicy(AzureRMModuleBase):
|
||||
"""Configuration class for an Azure RM Policy resource"""
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
lab_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
policy_set_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
description=dict(
|
||||
type='str'
|
||||
),
|
||||
fact_name=dict(
|
||||
type='str',
|
||||
choices=['user_owned_lab_vm_count',
|
||||
'user_owned_lab_premium_vm_count',
|
||||
'lab_vm_count',
|
||||
'lab_premium_vm_count',
|
||||
'lab_vm_size',
|
||||
'gallery_image',
|
||||
'user_owned_lab_vm_count_in_subnet',
|
||||
'lab_target_cost']
|
||||
),
|
||||
threshold=dict(
|
||||
type='raw'
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
)
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.lab_name = None
|
||||
self.policy_set_name = None
|
||||
self.name = None
|
||||
self.policy = dict()
|
||||
|
||||
self.results = dict(changed=False)
|
||||
self.mgmt_client = None
|
||||
self.state = None
|
||||
self.to_do = Actions.NoAction
|
||||
|
||||
required_if = [
|
||||
('state', 'present', ['threshold', 'fact_name'])
|
||||
]
|
||||
|
||||
super(AzureRMDtlPolicy, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=True,
|
||||
required_if=required_if)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
"""Main module execution method"""
|
||||
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
if hasattr(self, key):
|
||||
setattr(self, key, kwargs[key])
|
||||
elif kwargs[key] is not None:
|
||||
self.policy[key] = kwargs[key]
|
||||
|
||||
if self.state == 'present':
|
||||
self.policy['status'] = 'Enabled'
|
||||
dict_camelize(self.policy, ['fact_name'], True)
|
||||
if isinstance(self.policy['threshold'], list):
|
||||
self.policy['evaluator_type'] = 'AllowedValuesPolicy'
|
||||
else:
|
||||
self.policy['evaluator_type'] = 'MaxValuePolicy'
|
||||
|
||||
response = None
|
||||
|
||||
self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
|
||||
resource_group = self.get_resource_group(self.resource_group)
|
||||
|
||||
old_response = self.get_policy()
|
||||
|
||||
if not old_response:
|
||||
self.log("Policy instance doesn't exist")
|
||||
if self.state == 'absent':
|
||||
self.log("Old instance didn't exist")
|
||||
else:
|
||||
self.to_do = Actions.Create
|
||||
else:
|
||||
self.log("Policy instance already exists")
|
||||
if self.state == 'absent':
|
||||
self.to_do = Actions.Delete
|
||||
elif self.state == 'present':
|
||||
if (not default_compare(self.policy, old_response, '', self.results)):
|
||||
self.to_do = Actions.Update
|
||||
|
||||
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
|
||||
self.log("Need to Create / Update the Policy instance")
|
||||
|
||||
if self.check_mode:
|
||||
self.results['changed'] = True
|
||||
return self.results
|
||||
|
||||
response = self.create_update_policy()
|
||||
|
||||
self.results['changed'] = True
|
||||
self.log("Creation / Update done")
|
||||
elif self.to_do == Actions.Delete:
|
||||
self.log("Policy instance deleted")
|
||||
self.results['changed'] = True
|
||||
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
self.delete_policy()
|
||||
# This currently doesnt' work as there is a bug in SDK / Service
|
||||
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
|
||||
response = self.get_poller_result(response)
|
||||
else:
|
||||
self.log("Policy instance unchanged")
|
||||
self.results['changed'] = False
|
||||
response = old_response
|
||||
|
||||
if self.state == 'present':
|
||||
self.results.update({
|
||||
'id': response.get('id', None),
|
||||
'status': response.get('status', None)
|
||||
})
|
||||
return self.results
|
||||
|
||||
def create_update_policy(self):
|
||||
'''
|
||||
Creates or updates Policy with the specified configuration.
|
||||
|
||||
:return: deserialized Policy instance state dictionary
|
||||
'''
|
||||
self.log("Creating / Updating the Policy instance {0}".format(self.name))
|
||||
|
||||
try:
|
||||
response = self.mgmt_client.policies.create_or_update(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
policy_set_name=self.policy_set_name,
|
||||
name=self.name,
|
||||
policy=self.policy)
|
||||
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
|
||||
response = self.get_poller_result(response)
|
||||
|
||||
except CloudError as exc:
|
||||
self.log('Error attempting to create the Policy instance.')
|
||||
self.fail("Error creating the Policy instance: {0}".format(str(exc)))
|
||||
return response.as_dict()
|
||||
|
||||
def delete_policy(self):
|
||||
'''
|
||||
Deletes specified Policy instance in the specified subscription and resource group.
|
||||
|
||||
:return: True
|
||||
'''
|
||||
self.log("Deleting the Policy instance {0}".format(self.name))
|
||||
try:
|
||||
response = self.mgmt_client.policies.delete(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
policy_set_name=self.policy_set_name,
|
||||
name=self.name)
|
||||
except CloudError as e:
|
||||
self.log('Error attempting to delete the Policy instance.')
|
||||
self.fail("Error deleting the Policy instance: {0}".format(str(e)))
|
||||
|
||||
return True
|
||||
|
||||
def get_policy(self):
|
||||
'''
|
||||
Gets the properties of the specified Policy.
|
||||
|
||||
:return: deserialized Policy instance state dictionary
|
||||
'''
|
||||
self.log("Checking if the Policy instance {0} is present".format(self.name))
|
||||
found = False
|
||||
try:
|
||||
response = self.mgmt_client.policies.get(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
policy_set_name=self.policy_set_name,
|
||||
name=self.name)
|
||||
found = True
|
||||
self.log("Response : {0}".format(response))
|
||||
self.log("Policy instance : {0} found".format(response.name))
|
||||
except CloudError as e:
|
||||
self.log('Did not find the Policy instance.')
|
||||
if found is True:
|
||||
return response.as_dict()
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def default_compare(new, old, path, result):
|
||||
if new is None:
|
||||
return True
|
||||
elif isinstance(new, dict):
|
||||
if not isinstance(old, dict):
|
||||
result['compare'] = 'changed [' + path + '] old dict is null'
|
||||
return False
|
||||
for k in new.keys():
|
||||
if not default_compare(new.get(k), old.get(k, None), path + '/' + k, result):
|
||||
return False
|
||||
return True
|
||||
elif isinstance(new, list):
|
||||
if not isinstance(old, list) or len(new) != len(old):
|
||||
result['compare'] = 'changed [' + path + '] length is different or null'
|
||||
return False
|
||||
if isinstance(old[0], dict):
|
||||
key = None
|
||||
if 'id' in old[0] and 'id' in new[0]:
|
||||
key = 'id'
|
||||
elif 'name' in old[0] and 'name' in new[0]:
|
||||
key = 'name'
|
||||
else:
|
||||
key = list(old[0])[0]
|
||||
new = sorted(new, key=lambda x: x.get(key, None))
|
||||
old = sorted(old, key=lambda x: x.get(key, None))
|
||||
else:
|
||||
new = sorted(new)
|
||||
old = sorted(old)
|
||||
for i in range(len(new)):
|
||||
if not default_compare(new[i], old[i], path + '/*', result):
|
||||
return False
|
||||
return True
|
||||
else:
|
||||
if path == '/location':
|
||||
new = new.replace(' ', '').lower()
|
||||
old = new.replace(' ', '').lower()
|
||||
if str(new) == str(old):
|
||||
return True
|
||||
else:
|
||||
result['compare'] = 'changed [' + path + '] ' + str(new) + ' != ' + str(old)
|
||||
return False
|
||||
|
||||
|
||||
def dict_camelize(d, path, camelize_first):
|
||||
if isinstance(d, list):
|
||||
for i in range(len(d)):
|
||||
dict_camelize(d[i], path, camelize_first)
|
||||
elif isinstance(d, dict):
|
||||
if len(path) == 1:
|
||||
old_value = d.get(path[0], None)
|
||||
if old_value is not None:
|
||||
d[path[0]] = _snake_to_camel(old_value, camelize_first)
|
||||
else:
|
||||
sd = d.get(path[0], None)
|
||||
if sd is not None:
|
||||
dict_camelize(sd, path[1:], camelize_first)
|
||||
|
||||
|
||||
def dict_map(d, path, map):
|
||||
if isinstance(d, list):
|
||||
for i in range(len(d)):
|
||||
dict_map(d[i], path, map)
|
||||
elif isinstance(d, dict):
|
||||
if len(path) == 1:
|
||||
old_value = d.get(path[0], None)
|
||||
if old_value is not None:
|
||||
d[path[0]] = map.get(old_value, old_value)
|
||||
else:
|
||||
sd = d.get(path[0], None)
|
||||
if sd is not None:
|
||||
dict_map(sd, path[1:], map)
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution"""
|
||||
AzureRMDtlPolicy()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,243 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_devtestlabpolicy_info
|
||||
version_added: "2.9"
|
||||
short_description: Get Azure DTL Policy facts
|
||||
description:
|
||||
- Get facts of Azure DTL Policy.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
required: True
|
||||
type: str
|
||||
lab_name:
|
||||
description:
|
||||
- The name of the lab.
|
||||
required: True
|
||||
type: str
|
||||
policy_set_name:
|
||||
description:
|
||||
- The name of the policy set.
|
||||
required: True
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- The name of the policy.
|
||||
type: str
|
||||
tags:
|
||||
description:
|
||||
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
|
||||
type: list
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get instance of Policy
|
||||
azure_rm_devtestlabpolicy_info:
|
||||
resource_group: myResourceGroup
|
||||
lab_name: myLab
|
||||
policy_set_name: myPolicySet
|
||||
name: myPolicy
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
policies:
|
||||
description:
|
||||
- A list of dictionaries containing facts for Policy.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- The identifier of the artifact source.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DevTestLab/labs/myLab/po
|
||||
licysets/myPolicySet/policies/myPolicy"
|
||||
resource_group:
|
||||
description:
|
||||
- Name of the resource group.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myResourceGroup
|
||||
lab_name:
|
||||
description:
|
||||
- Name of the lab.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myLab
|
||||
name:
|
||||
description:
|
||||
- The name of the artifact source.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myArtifactSource
|
||||
fact_name:
|
||||
description:
|
||||
- The name of the policy fact.
|
||||
returned: always
|
||||
type: str
|
||||
sample: UserOwnedLabVmCount
|
||||
evaluator_type:
|
||||
description:
|
||||
- Evaluator type for policy fact.
|
||||
returned: always
|
||||
type: str
|
||||
sample: MaxValuePolicy
|
||||
threshold:
|
||||
description:
|
||||
- Fact's threshold.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 5
|
||||
tags:
|
||||
description:
|
||||
- The tags of the resource.
|
||||
returned: always
|
||||
type: complex
|
||||
sample: "{ 'MyTag': 'MyValue' }"
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.mgmt.devtestlabs import DevTestLabsClient
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMDtlPolicyInfo(AzureRMModuleBase):
|
||||
def __init__(self):
|
||||
# define user inputs into argument
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
lab_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
policy_set_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str'
|
||||
),
|
||||
tags=dict(
|
||||
type='list'
|
||||
)
|
||||
)
|
||||
# store the results of the module operation
|
||||
self.results = dict(
|
||||
changed=False
|
||||
)
|
||||
self.mgmt_client = None
|
||||
self.resource_group = None
|
||||
self.lab_name = None
|
||||
self.policy_set_name = None
|
||||
self.name = None
|
||||
self.tags = None
|
||||
super(AzureRMDtlPolicyInfo, self).__init__(self.module_arg_spec, supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
is_old_facts = self.module._name == 'azure_rm_devtestlabpolicy_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_devtestlabpolicy_facts' module has been renamed to 'azure_rm_devtestlabpolicy_info'", version='2.13')
|
||||
|
||||
for key in self.module_arg_spec:
|
||||
setattr(self, key, kwargs[key])
|
||||
self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
|
||||
if self.name:
|
||||
self.results['policies'] = self.get()
|
||||
else:
|
||||
self.results['policies'] = self.list()
|
||||
|
||||
return self.results
|
||||
|
||||
def get(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mgmt_client.policies.get(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
policy_set_name=self.policy_set_name,
|
||||
name=self.name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get facts for Policy.')
|
||||
|
||||
if response and self.has_tags(response.tags, self.tags):
|
||||
results.append(self.format_response(response))
|
||||
|
||||
return results
|
||||
|
||||
def list(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mgmt_client.policies.list(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
policy_set_name=self.policy_set_name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get facts for Policy.')
|
||||
|
||||
if response is not None:
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(self.format_response(item))
|
||||
|
||||
return results
|
||||
|
||||
def format_response(self, item):
|
||||
d = item.as_dict()
|
||||
d = {
|
||||
'resource_group': self.resource_group,
|
||||
'policy_set_name': self.policy_set_name,
|
||||
'name': d.get('name'),
|
||||
'id': d.get('id'),
|
||||
'tags': d.get('tags'),
|
||||
'status': d.get('status'),
|
||||
'threshold': d.get('threshold'),
|
||||
'fact_name': d.get('fact_name'),
|
||||
'evaluator_type': d.get('evaluator_type')
|
||||
}
|
||||
return d
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMDtlPolicyInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,341 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_devtestlabschedule
|
||||
version_added: "2.8"
|
||||
short_description: Manage Azure DevTest Lab Schedule instance
|
||||
description:
|
||||
- Create, update and delete instance of Azure DecTest Lab Schedule.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
required: True
|
||||
lab_name:
|
||||
description:
|
||||
- The name of the lab.
|
||||
required: True
|
||||
name:
|
||||
description:
|
||||
- The name of the schedule.
|
||||
required: True
|
||||
choices:
|
||||
- lab_vms_startup
|
||||
- lab_vms_shutdown
|
||||
time:
|
||||
description:
|
||||
- The time of day the schedule will occur.
|
||||
time_zone_id:
|
||||
description:
|
||||
- The time zone ID.
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the Schedule.
|
||||
- Use C(present) to create or update an Schedule and C(absent) to delete it.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create (or update) DevTest Lab Schedule
|
||||
azure_rm_devtestlabschedule:
|
||||
resource_group: myResourceGroup
|
||||
lab_name: myLab
|
||||
name: lab_vms_shutdown
|
||||
time: "1030"
|
||||
time_zone_id: "UTC+12"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- The identifier of the resource.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/myLab/schedules/l
|
||||
abVmsShutdown"
|
||||
'''
|
||||
|
||||
import time
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
from ansible.module_utils.common.dict_transformations import _snake_to_camel
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrest.polling import LROPoller
|
||||
from msrestazure.azure_operation import AzureOperationPoller
|
||||
from azure.mgmt.devtestlabs import DevTestLabsClient
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class Actions:
|
||||
NoAction, Create, Update, Delete = range(4)
|
||||
|
||||
|
||||
class AzureRMSchedule(AzureRMModuleBase):
|
||||
"""Configuration class for an Azure RM Schedule resource"""
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
lab_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True,
|
||||
choices=['lab_vms_startup', 'lab_vms_shutdown']
|
||||
),
|
||||
time=dict(
|
||||
type='str'
|
||||
),
|
||||
time_zone_id=dict(
|
||||
type='str'
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
)
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.lab_name = None
|
||||
self.name = None
|
||||
self.schedule = dict()
|
||||
|
||||
self.results = dict(changed=False)
|
||||
self.mgmt_client = None
|
||||
self.state = None
|
||||
self.to_do = Actions.NoAction
|
||||
|
||||
required_if = [
|
||||
('state', 'present', ['time', 'time_zone_id'])
|
||||
]
|
||||
|
||||
super(AzureRMSchedule, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=True,
|
||||
required_if=required_if)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
"""Main module execution method"""
|
||||
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
if hasattr(self, key):
|
||||
setattr(self, key, kwargs[key])
|
||||
elif kwargs[key] is not None:
|
||||
self.schedule[key] = kwargs[key]
|
||||
|
||||
self.schedule['status'] = "Enabled"
|
||||
|
||||
if self.name == 'lab_vms_startup':
|
||||
self.name = 'LabVmsStartup'
|
||||
self.schedule['task_type'] = 'LabVmsStartupTask'
|
||||
elif self.name == 'lab_vms_shutdown':
|
||||
self.name = 'LabVmsShutdown'
|
||||
self.schedule['task_type'] = 'LabVmsShutdownTask'
|
||||
|
||||
if self.state == 'present':
|
||||
self.schedule['daily_recurrence'] = {'time': self.schedule.pop('time')}
|
||||
self.schedule['time_zone_id'] = self.schedule['time_zone_id'].upper()
|
||||
|
||||
response = None
|
||||
|
||||
self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
|
||||
resource_group = self.get_resource_group(self.resource_group)
|
||||
|
||||
old_response = self.get_schedule()
|
||||
|
||||
if not old_response:
|
||||
self.log("Schedule instance doesn't exist")
|
||||
if self.state == 'absent':
|
||||
self.log("Old instance didn't exist")
|
||||
else:
|
||||
self.to_do = Actions.Create
|
||||
else:
|
||||
self.log("Schedule instance already exists")
|
||||
if self.state == 'absent':
|
||||
self.to_do = Actions.Delete
|
||||
elif self.state == 'present':
|
||||
if (not default_compare(self.schedule, old_response, '', self.results)):
|
||||
self.to_do = Actions.Update
|
||||
|
||||
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
|
||||
self.log("Need to Create / Update the Schedule instance")
|
||||
|
||||
if self.check_mode:
|
||||
self.results['changed'] = True
|
||||
return self.results
|
||||
|
||||
response = self.create_update_schedule()
|
||||
|
||||
self.results['changed'] = True
|
||||
self.log("Creation / Update done")
|
||||
elif self.to_do == Actions.Delete:
|
||||
self.log("Schedule instance deleted")
|
||||
self.results['changed'] = True
|
||||
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
self.delete_schedule()
|
||||
# This currently doesn't work as there is a bug in SDK / Service
|
||||
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
|
||||
response = self.get_poller_result(response)
|
||||
else:
|
||||
self.log("Schedule instance unchanged")
|
||||
self.results['changed'] = False
|
||||
response = old_response
|
||||
|
||||
if self.state == 'present':
|
||||
self.results.update({
|
||||
'id': response.get('id', None)
|
||||
})
|
||||
return self.results
|
||||
|
||||
def create_update_schedule(self):
|
||||
'''
|
||||
Creates or updates Schedule with the specified configuration.
|
||||
|
||||
:return: deserialized Schedule instance state dictionary
|
||||
'''
|
||||
self.log("Creating / Updating the Schedule instance {0}".format(self.name))
|
||||
|
||||
try:
|
||||
response = self.mgmt_client.schedules.create_or_update(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
name=self.name,
|
||||
schedule=self.schedule)
|
||||
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
|
||||
response = self.get_poller_result(response)
|
||||
|
||||
except CloudError as exc:
|
||||
self.log('Error attempting to create the Schedule instance.')
|
||||
self.fail("Error creating the Schedule instance: {0}".format(str(exc)))
|
||||
return response.as_dict()
|
||||
|
||||
def delete_schedule(self):
|
||||
'''
|
||||
Deletes specified Schedule instance in the specified subscription and resource group.
|
||||
|
||||
:return: True
|
||||
'''
|
||||
self.log("Deleting the Schedule instance {0}".format(self.name))
|
||||
try:
|
||||
response = self.mgmt_client.schedules.delete(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
name=self.name)
|
||||
except CloudError as e:
|
||||
self.log('Error attempting to delete the Schedule instance.')
|
||||
self.fail("Error deleting the Schedule instance: {0}".format(str(e)))
|
||||
|
||||
return True
|
||||
|
||||
def get_schedule(self):
|
||||
'''
|
||||
Gets the properties of the specified Schedule.
|
||||
|
||||
:return: deserialized Schedule instance state dictionary
|
||||
'''
|
||||
self.log("Checking if the Schedule instance {0} is present".format(self.name))
|
||||
found = False
|
||||
try:
|
||||
response = self.mgmt_client.schedules.get(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
name=self.name)
|
||||
found = True
|
||||
self.log("Response : {0}".format(response))
|
||||
self.log("Schedule instance : {0} found".format(response.name))
|
||||
except CloudError as e:
|
||||
self.log('Did not find the Schedule instance.')
|
||||
if found is True:
|
||||
return response.as_dict()
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def default_compare(new, old, path, result):
|
||||
if new is None:
|
||||
return True
|
||||
elif isinstance(new, dict):
|
||||
if not isinstance(old, dict):
|
||||
result['compare'] = 'changed [' + path + '] old dict is null'
|
||||
return False
|
||||
for k in new.keys():
|
||||
if not default_compare(new.get(k), old.get(k, None), path + '/' + k, result):
|
||||
return False
|
||||
return True
|
||||
elif isinstance(new, list):
|
||||
if not isinstance(old, list) or len(new) != len(old):
|
||||
result['compare'] = 'changed [' + path + '] length is different or null'
|
||||
return False
|
||||
if isinstance(old[0], dict):
|
||||
key = None
|
||||
if 'id' in old[0] and 'id' in new[0]:
|
||||
key = 'id'
|
||||
elif 'name' in old[0] and 'name' in new[0]:
|
||||
key = 'name'
|
||||
else:
|
||||
key = list(old[0])[0]
|
||||
new = sorted(new, key=lambda x: x.get(key, None))
|
||||
old = sorted(old, key=lambda x: x.get(key, None))
|
||||
else:
|
||||
new = sorted(new)
|
||||
old = sorted(old)
|
||||
for i in range(len(new)):
|
||||
if not default_compare(new[i], old[i], path + '/*', result):
|
||||
return False
|
||||
return True
|
||||
else:
|
||||
if path == '/location':
|
||||
new = new.replace(' ', '').lower()
|
||||
old = new.replace(' ', '').lower()
|
||||
if new == old:
|
||||
return True
|
||||
else:
|
||||
result['compare'] = 'changed [' + path + '] ' + str(new) + ' != ' + str(old)
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution"""
|
||||
AzureRMSchedule()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,222 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_devtestlabschedule_info
|
||||
version_added: "2.9"
|
||||
short_description: Get Azure Schedule facts
|
||||
description:
|
||||
- Get facts of Azure Schedule.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
required: True
|
||||
type: str
|
||||
lab_name:
|
||||
description:
|
||||
- The name of the lab.
|
||||
required: True
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- The name of the schedule.
|
||||
type: str
|
||||
tags:
|
||||
description:
|
||||
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
|
||||
type: list
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get instance of Schedule
|
||||
azure_rm_devtestlabschedule_info:
|
||||
resource_group: myResourceGroup
|
||||
lab_name: myLab
|
||||
name: mySchedule
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
schedules:
|
||||
description:
|
||||
- A list of dictionaries containing facts for Schedule.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- The identifier of the artifact source.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DevTestLab/labs/myLab/sc
|
||||
hedules/labvmsshutdown"
|
||||
resource_group:
|
||||
description:
|
||||
- Name of the resource group.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myResourceGroup
|
||||
lab_name:
|
||||
description:
|
||||
- Name of the lab.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myLab
|
||||
name:
|
||||
description:
|
||||
- The name of the environment.
|
||||
returned: always
|
||||
type: str
|
||||
sample: lab_vms_shutdown
|
||||
time:
|
||||
description:
|
||||
- Time of the schedule.
|
||||
returned: always
|
||||
type: str
|
||||
sample: lab_vms_shutdown
|
||||
time_zone_id:
|
||||
description:
|
||||
- Time zone id.
|
||||
returned: always
|
||||
type: str
|
||||
sample: UTC+12
|
||||
tags:
|
||||
description:
|
||||
- The tags of the resource.
|
||||
returned: always
|
||||
type: complex
|
||||
sample: "{ 'MyTag': 'MyValue' }"
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
from ansible.module_utils.common.dict_transformations import _camel_to_snake, _snake_to_camel
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.mgmt.devtestlabs import DevTestLabsClient
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMDtlScheduleInfo(AzureRMModuleBase):
|
||||
def __init__(self):
|
||||
# define user inputs into argument
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
lab_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str'
|
||||
),
|
||||
tags=dict(
|
||||
type='list'
|
||||
)
|
||||
)
|
||||
# store the results of the module operation
|
||||
self.results = dict(
|
||||
changed=False
|
||||
)
|
||||
self.mgmt_client = None
|
||||
self.resource_group = None
|
||||
self.lab_name = None
|
||||
self.name = None
|
||||
self.tags = None
|
||||
super(AzureRMDtlScheduleInfo, self).__init__(self.module_arg_spec, supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
is_old_facts = self.module._name == 'azure_rm_devtestlabschedule_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_devtestlabschedule_facts' module has been renamed to 'azure_rm_devtestlabschedule_info'", version='2.13')
|
||||
|
||||
for key in self.module_arg_spec:
|
||||
setattr(self, key, kwargs[key])
|
||||
self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
if self.name:
|
||||
self.results['schedules'] = self.get()
|
||||
else:
|
||||
self.results['schedules'] = self.list()
|
||||
|
||||
return self.results
|
||||
|
||||
def get(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mgmt_client.schedules.get(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
name=_snake_to_camel(self.name))
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get facts for Schedule.')
|
||||
|
||||
if response and self.has_tags(response.tags, self.tags):
|
||||
results.append(self.format_response(response))
|
||||
|
||||
return results
|
||||
|
||||
def list(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mgmt_client.schedules.list(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get facts for Schedule.')
|
||||
|
||||
if response is not None:
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(self.format_response(item))
|
||||
|
||||
return results
|
||||
|
||||
def format_response(self, item):
|
||||
d = item.as_dict()
|
||||
d = {
|
||||
'resource_group': self.resource_group,
|
||||
'lab_name': self.lab_name,
|
||||
'name': _camel_to_snake(d.get('name')),
|
||||
'id': d.get('id', None),
|
||||
'tags': d.get('tags', None),
|
||||
'time': d.get('daily_recurrence', {}).get('time'),
|
||||
'time_zone_id': d.get('time_zone_id')
|
||||
}
|
||||
return d
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMDtlScheduleInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,544 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_devtestlabvirtualmachine
|
||||
version_added: "2.8"
|
||||
short_description: Manage Azure DevTest Lab Virtual Machine instance
|
||||
description:
|
||||
- Create, update and delete instance of Azure DevTest Lab Virtual Machine.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
required: True
|
||||
lab_name:
|
||||
description:
|
||||
- The name of the lab.
|
||||
required: True
|
||||
name:
|
||||
description:
|
||||
- The name of the virtual machine.
|
||||
required: True
|
||||
notes:
|
||||
description:
|
||||
- The notes of the virtual machine.
|
||||
os_type:
|
||||
description:
|
||||
- Base type of operating system.
|
||||
choices:
|
||||
- windows
|
||||
- linux
|
||||
vm_size:
|
||||
description:
|
||||
- A valid Azure VM size value. For example, C(Standard_D4).
|
||||
- The list of choices varies depending on the subscription and location. Check your subscription for available choices.
|
||||
- Available values can be found on this website, link U(https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-general).
|
||||
- Required when I(state=present).
|
||||
user_name:
|
||||
description:
|
||||
- The user name of the virtual machine.
|
||||
password:
|
||||
description:
|
||||
- The password of the virtual machine administrator.
|
||||
ssh_key:
|
||||
description:
|
||||
- The SSH key of the virtual machine administrator.
|
||||
lab_subnet:
|
||||
description:
|
||||
- An existing subnet within lab's virtual network.
|
||||
- It can be the subnet's resource id.
|
||||
- It can be a dict which contains C(virtual_network_name) and C(name).
|
||||
disallow_public_ip_address:
|
||||
description:
|
||||
- Indicates whether the virtual machine is to be created without a public IP address.
|
||||
artifacts:
|
||||
description:
|
||||
- The artifacts to be installed on the virtual machine.
|
||||
type: list
|
||||
suboptions:
|
||||
source_name:
|
||||
description:
|
||||
- The artifact's source name.
|
||||
source_path:
|
||||
description:
|
||||
- The artifact's path in the source repository.
|
||||
parameters:
|
||||
description:
|
||||
- The parameters of the artifact.
|
||||
type: list
|
||||
suboptions:
|
||||
name:
|
||||
description:
|
||||
- The name of the artifact parameter.
|
||||
value:
|
||||
description:
|
||||
- The value of the artifact parameter.
|
||||
image:
|
||||
description:
|
||||
- The Microsoft Azure Marketplace image reference of the virtual machine.
|
||||
suboptions:
|
||||
offer:
|
||||
description:
|
||||
- The offer of the gallery image.
|
||||
publisher:
|
||||
description:
|
||||
- The publisher of the gallery image.
|
||||
sku:
|
||||
description:
|
||||
- The SKU of the gallery image.
|
||||
os_type:
|
||||
description:
|
||||
- The OS type of the gallery image.
|
||||
version:
|
||||
description:
|
||||
- The version of the gallery image.
|
||||
expiration_date:
|
||||
description:
|
||||
- The expiration date for VM.
|
||||
allow_claim:
|
||||
description:
|
||||
- Indicates whether another user can take ownership of the virtual machine.
|
||||
storage_type:
|
||||
description:
|
||||
- Storage type to use for virtual machine.
|
||||
choices:
|
||||
- standard
|
||||
- premium
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the Virtual Machine.
|
||||
- Use C(present) to create or update an Virtual Machine and C(absent) to delete it.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create (or update) Virtual Machine
|
||||
azure_rm_devtestlabvirtualmachine:
|
||||
resource_group: myrg
|
||||
lab_name: mylab
|
||||
name: myvm
|
||||
notes: Virtual machine notes....
|
||||
os_type: linux
|
||||
vm_size: Standard_A2_v2
|
||||
user_name: vmadmin
|
||||
password: ZSuppas$$21!
|
||||
lab_subnet:
|
||||
name: myvnSubnet
|
||||
virtual_network_name: myvn
|
||||
disallow_public_ip_address: no
|
||||
image:
|
||||
offer: UbuntuServer
|
||||
publisher: Canonical
|
||||
sku: 16.04-LTS
|
||||
os_type: Linux
|
||||
version: latest
|
||||
artifacts:
|
||||
- source_name: myartifact
|
||||
source_path: "/Artifacts/linux-install-mongodb"
|
||||
allow_claim: no
|
||||
expiration_date: "2019-02-22T01:49:12.117974Z"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- The identifier of the DTL Virtual Machine resource.
|
||||
returned: always
|
||||
type: str
|
||||
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myrg/providers/microsoft.devtestlab/labs/mylab/virtualmachines/myvm
|
||||
compute_id:
|
||||
description:
|
||||
- The identifier of the underlying Compute Virtual Machine resource.
|
||||
returned: always
|
||||
type: str
|
||||
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myrg/providers/microsoft.devtestlab/labs/mylab/virtualmachines/myvm
|
||||
fqdn:
|
||||
description:
|
||||
- Fully qualified domain name or IP Address of the virtual machine.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myvm.eastus.cloudapp.azure.com
|
||||
'''
|
||||
|
||||
import time
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
from ansible.module_utils.common.dict_transformations import _snake_to_camel
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrest.polling import LROPoller
|
||||
from msrestazure.azure_operation import AzureOperationPoller
|
||||
from azure.mgmt.devtestlabs import DevTestLabsClient
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class Actions:
|
||||
NoAction, Create, Update, Delete = range(4)
|
||||
|
||||
|
||||
class AzureRMVirtualMachine(AzureRMModuleBase):
|
||||
"""Configuration class for an Azure RM Virtual Machine resource"""
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
lab_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
notes=dict(
|
||||
type='str'
|
||||
),
|
||||
os_type=dict(
|
||||
type='str',
|
||||
choices=['linux', 'windows']
|
||||
),
|
||||
vm_size=dict(
|
||||
type='str'
|
||||
),
|
||||
user_name=dict(
|
||||
type='str'
|
||||
),
|
||||
password=dict(
|
||||
type='str',
|
||||
no_log=True
|
||||
),
|
||||
ssh_key=dict(
|
||||
type='str',
|
||||
no_log=True
|
||||
),
|
||||
lab_subnet=dict(
|
||||
type='raw'
|
||||
),
|
||||
disallow_public_ip_address=dict(
|
||||
type='str'
|
||||
),
|
||||
artifacts=dict(
|
||||
type='list',
|
||||
options=dict(
|
||||
artifact_id=dict(
|
||||
type='str'
|
||||
),
|
||||
parameters=dict(
|
||||
type='list',
|
||||
options=dict(
|
||||
name=dict(
|
||||
type='str'
|
||||
),
|
||||
value=dict(
|
||||
type='str'
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
),
|
||||
image=dict(
|
||||
type='dict',
|
||||
options=dict(
|
||||
offer=dict(
|
||||
type='str'
|
||||
),
|
||||
publisher=dict(
|
||||
type='str'
|
||||
),
|
||||
sku=dict(
|
||||
type='str'
|
||||
),
|
||||
os_type=dict(
|
||||
type='str'
|
||||
),
|
||||
version=dict(
|
||||
type='str'
|
||||
)
|
||||
)
|
||||
),
|
||||
expiration_date=dict(
|
||||
type='str'
|
||||
),
|
||||
allow_claim=dict(
|
||||
type='str'
|
||||
),
|
||||
storage_type=dict(
|
||||
type='str',
|
||||
choices=['standard', 'premium']
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
)
|
||||
)
|
||||
|
||||
required_if = [
|
||||
('state', 'present', [
|
||||
'image', 'lab_subnet', 'vm_size', 'os_type'])
|
||||
]
|
||||
|
||||
self.resource_group = None
|
||||
self.lab_name = None
|
||||
self.name = None
|
||||
self.lab_virtual_machine = dict()
|
||||
|
||||
self.results = dict(changed=False)
|
||||
self.mgmt_client = None
|
||||
self.state = None
|
||||
self.to_do = Actions.NoAction
|
||||
|
||||
super(AzureRMVirtualMachine, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=True,
|
||||
required_if=required_if)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
"""Main module execution method"""
|
||||
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
if hasattr(self, key):
|
||||
setattr(self, key, kwargs[key])
|
||||
elif kwargs[key] is not None:
|
||||
self.lab_virtual_machine[key] = kwargs[key]
|
||||
|
||||
self.lab_virtual_machine['gallery_image_reference'] = self.lab_virtual_machine.pop('image', None)
|
||||
|
||||
if self.lab_virtual_machine.get('artifacts') is not None:
|
||||
for artifact in self.lab_virtual_machine.get('artifacts'):
|
||||
source_name = artifact.pop('source_name')
|
||||
source_path = artifact.pop('source_path')
|
||||
template = "/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.DevTestLab/labs/{2}/artifactsources/{3}{4}"
|
||||
artifact['artifact_id'] = template.format(self.subscription_id, self.resource_group, self.lab_name, source_name, source_path)
|
||||
|
||||
self.lab_virtual_machine['size'] = self.lab_virtual_machine.pop('vm_size')
|
||||
self.lab_virtual_machine['os_type'] = _snake_to_camel(self.lab_virtual_machine['os_type'], True)
|
||||
|
||||
if self.lab_virtual_machine.get('storage_type'):
|
||||
self.lab_virtual_machine['storage_type'] = _snake_to_camel(self.lab_virtual_machine['storage_type'], True)
|
||||
|
||||
lab_subnet = self.lab_virtual_machine.pop('lab_subnet')
|
||||
|
||||
if isinstance(lab_subnet, str):
|
||||
vn_and_subnet = lab_subnet.split('/subnets/')
|
||||
if (len(vn_and_subnet) == 2):
|
||||
self.lab_virtual_machine['lab_virtual_network_id'] = vn_and_subnet[0]
|
||||
self.lab_virtual_machine['lab_subnet_name'] = vn_and_subnet[1]
|
||||
else:
|
||||
self.fail("Invalid 'lab_subnet' resource id format")
|
||||
else:
|
||||
template = "/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.DevTestLab/labs/{2}/virtualnetworks/{3}"
|
||||
self.lab_virtual_machine['lab_virtual_network_id'] = template.format(self.subscription_id,
|
||||
self.resource_group,
|
||||
self.lab_name,
|
||||
lab_subnet.get('virtual_network_name'))
|
||||
self.lab_virtual_machine['lab_subnet_name'] = lab_subnet.get('name')
|
||||
|
||||
response = None
|
||||
|
||||
self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
|
||||
old_response = self.get_virtualmachine()
|
||||
|
||||
if not old_response:
|
||||
self.log("Virtual Machine instance doesn't exist")
|
||||
if self.state == 'absent':
|
||||
self.log("Old instance didn't exist")
|
||||
else:
|
||||
self.to_do = Actions.Create
|
||||
# get location from the lab as it has to be the same and has to be specified (why??)
|
||||
lab = self.get_devtestlab()
|
||||
self.lab_virtual_machine['location'] = lab['location']
|
||||
else:
|
||||
self.log("Virtual Machine instance already exists")
|
||||
if self.state == 'absent':
|
||||
self.to_do = Actions.Delete
|
||||
elif self.state == 'present':
|
||||
self.lab_virtual_machine['location'] = old_response['location']
|
||||
|
||||
if old_response['size'].lower() != self.lab_virtual_machine.get('size').lower():
|
||||
self.lab_virtual_machine['size'] = old_response['size']
|
||||
self.module.warn("Property 'size' cannot be changed")
|
||||
|
||||
if self.lab_virtual_machine.get('storage_type') is not None and \
|
||||
old_response['storage_type'].lower() != self.lab_virtual_machine.get('storage_type').lower():
|
||||
self.lab_virtual_machine['storage_type'] = old_response['storage_type']
|
||||
self.module.warn("Property 'storage_type' cannot be changed")
|
||||
|
||||
if old_response.get('gallery_image_reference', {}) != self.lab_virtual_machine.get('gallery_image_reference', {}):
|
||||
self.lab_virtual_machine['gallery_image_reference'] = old_response['gallery_image_reference']
|
||||
self.module.warn("Property 'image' cannot be changed")
|
||||
|
||||
# currently artifacts can be only specified when vm is created
|
||||
# and in addition we don't have detailed information, just a number of "total artifacts"
|
||||
if len(self.lab_virtual_machine.get('artifacts', [])) != old_response['artifact_deployment_status']['total_artifacts']:
|
||||
self.module.warn("Property 'artifacts' cannot be changed")
|
||||
|
||||
if self.lab_virtual_machine.get('disallow_public_ip_address') is not None:
|
||||
if old_response['disallow_public_ip_address'] != self.lab_virtual_machine.get('disallow_public_ip_address'):
|
||||
self.module.warn("Property 'disallow_public_ip_address' cannot be changed")
|
||||
self.lab_virtual_machine['disallow_public_ip_address'] = old_response['disallow_public_ip_address']
|
||||
|
||||
if self.lab_virtual_machine.get('allow_claim') is not None:
|
||||
if old_response['allow_claim'] != self.lab_virtual_machine.get('allow_claim'):
|
||||
self.module.warn("Property 'allow_claim' cannot be changed")
|
||||
self.lab_virtual_machine['allow_claim'] = old_response['allow_claim']
|
||||
|
||||
if self.lab_virtual_machine.get('notes') is not None:
|
||||
if old_response['notes'] != self.lab_virtual_machine.get('notes'):
|
||||
self.to_do = Actions.Update
|
||||
else:
|
||||
self.lab_virtual_machine['notes'] = old_response['notes']
|
||||
|
||||
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
|
||||
self.log("Need to Create / Update the Virtual Machine instance")
|
||||
|
||||
self.results['changed'] = True
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
response = self.create_update_virtualmachine()
|
||||
|
||||
self.log("Creation / Update done")
|
||||
elif self.to_do == Actions.Delete:
|
||||
self.log("Virtual Machine instance deleted")
|
||||
self.results['changed'] = True
|
||||
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
self.delete_virtualmachine()
|
||||
else:
|
||||
self.log("Virtual Machine instance unchanged")
|
||||
self.results['changed'] = False
|
||||
response = old_response
|
||||
|
||||
if self.state == 'present':
|
||||
self.results.update({
|
||||
'id': response.get('id', None),
|
||||
'compute_id': response.get('compute_id', None),
|
||||
'fqdn': response.get('fqdn', None)
|
||||
})
|
||||
return self.results
|
||||
|
||||
def create_update_virtualmachine(self):
|
||||
'''
|
||||
Creates or updates Virtual Machine with the specified configuration.
|
||||
|
||||
:return: deserialized Virtual Machine instance state dictionary
|
||||
'''
|
||||
self.log("Creating / Updating the Virtual Machine instance {0}".format(self.name))
|
||||
|
||||
try:
|
||||
response = self.mgmt_client.virtual_machines.create_or_update(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
name=self.name,
|
||||
lab_virtual_machine=self.lab_virtual_machine)
|
||||
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
|
||||
response = self.get_poller_result(response)
|
||||
|
||||
except CloudError as exc:
|
||||
self.log('Error attempting to create the Virtual Machine instance.')
|
||||
self.fail("Error creating the Virtual Machine instance: {0}".format(str(exc)))
|
||||
return response.as_dict()
|
||||
|
||||
def delete_virtualmachine(self):
|
||||
'''
|
||||
Deletes specified Virtual Machine instance in the specified subscription and resource group.
|
||||
|
||||
:return: True
|
||||
'''
|
||||
self.log("Deleting the Virtual Machine instance {0}".format(self.name))
|
||||
try:
|
||||
response = self.mgmt_client.virtual_machines.delete(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
name=self.name)
|
||||
except CloudError as e:
|
||||
self.log('Error attempting to delete the Virtual Machine instance.')
|
||||
self.fail("Error deleting the Virtual Machine instance: {0}".format(str(e)))
|
||||
|
||||
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
|
||||
response = self.get_poller_result(response)
|
||||
|
||||
return True
|
||||
|
||||
def get_virtualmachine(self):
|
||||
'''
|
||||
Gets the properties of the specified Virtual Machine.
|
||||
|
||||
:return: deserialized Virtual Machine instance state dictionary
|
||||
'''
|
||||
self.log("Checking if the Virtual Machine instance {0} is present".format(self.name))
|
||||
found = False
|
||||
try:
|
||||
response = self.mgmt_client.virtual_machines.get(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
name=self.name)
|
||||
found = True
|
||||
self.log("Response : {0}".format(response))
|
||||
self.log("Virtual Machine instance : {0} found".format(response.name))
|
||||
except CloudError as e:
|
||||
self.log('Did not find the Virtual Machine instance.')
|
||||
if found is True:
|
||||
return response.as_dict()
|
||||
|
||||
return False
|
||||
|
||||
def get_devtestlab(self):
|
||||
'''
|
||||
Gets the properties of the specified DevTest Lab.
|
||||
|
||||
:return: deserialized DevTest Lab instance state dictionary
|
||||
'''
|
||||
self.log("Checking if the DevTest Lab instance {0} is present".format(self.lab_name))
|
||||
try:
|
||||
response = self.mgmt_client.labs.get(resource_group_name=self.resource_group,
|
||||
name=self.lab_name)
|
||||
self.log("Response : {0}".format(response))
|
||||
self.log("DevTest Lab instance : {0} found".format(response.name))
|
||||
return response.as_dict()
|
||||
except CloudError as e:
|
||||
self.fail('Did not find the DevTest Lab instance.')
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution"""
|
||||
AzureRMVirtualMachine()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,329 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_devtestlabvirtualmachine_info
|
||||
version_added: "2.9"
|
||||
short_description: Get Azure DevTest Lab Virtual Machine facts
|
||||
description:
|
||||
- Get facts of Azure DevTest Lab Virtual Machine.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
required: True
|
||||
type: str
|
||||
lab_name:
|
||||
description:
|
||||
- The name of the lab.
|
||||
required: True
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- The name of the virtual machine.
|
||||
type: str
|
||||
tags:
|
||||
description:
|
||||
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
|
||||
type: list
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get instance of DTL Virtual Machine
|
||||
azure_rm_devtestlabvirtualmachine_info:
|
||||
resource_group: myResourceGroup
|
||||
lab_name: myLab
|
||||
name: myVm
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
virtualmachines:
|
||||
description:
|
||||
- A list of dictionaries containing facts for DevTest Lab Virtual Machine.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- The identifier of the virtual machine.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/myLab/virt
|
||||
ualmachines/myVm"
|
||||
resource_group:
|
||||
description:
|
||||
- Name of the resource group.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myResourceGroup
|
||||
lab_name:
|
||||
description:
|
||||
- Name of the lab.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myLab
|
||||
name:
|
||||
description:
|
||||
- Name of the virtual machine.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myVm
|
||||
notes:
|
||||
description:
|
||||
- Notes of the virtual machine.
|
||||
returned: always
|
||||
type: str
|
||||
sample: My VM notes
|
||||
disallow_public_ip_address:
|
||||
description:
|
||||
- Whether public IP should be not allowed.
|
||||
returned: always
|
||||
type: bool
|
||||
sample: false
|
||||
expiration_date:
|
||||
description:
|
||||
- Virtual machine expiration date.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "2029-02-22T01:49:12.117974Z"
|
||||
image:
|
||||
description:
|
||||
- Gallery image reference.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
offer:
|
||||
description:
|
||||
- The offer of the gallery image.
|
||||
returned: when created from gallery image
|
||||
type: str
|
||||
sample: UbuntuServer
|
||||
os_type:
|
||||
description:
|
||||
- Operating system type.
|
||||
returned: when created from gallery image
|
||||
type: str
|
||||
sample: Linux
|
||||
sku:
|
||||
description:
|
||||
- The SKU of the gallery image.
|
||||
returned: when created from gallery image
|
||||
type: str
|
||||
sample: 16.04-LTS
|
||||
publisher:
|
||||
description:
|
||||
- The publisher of the gallery image.
|
||||
returned: when created from gallery image
|
||||
type: str
|
||||
sample: Canonical
|
||||
version:
|
||||
description:
|
||||
- The version of the gallery image.
|
||||
returned: when created from gallery image
|
||||
type: str
|
||||
sample: latest
|
||||
os_type:
|
||||
description:
|
||||
- Operating system type.
|
||||
returned: always
|
||||
type: str
|
||||
sample: linux
|
||||
vm_size:
|
||||
description:
|
||||
- Virtual machine size.
|
||||
returned: always
|
||||
type: str
|
||||
sample: Standard_A2_v2
|
||||
user_name:
|
||||
description:
|
||||
- Admin user name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: dtl_admin
|
||||
storage_type:
|
||||
description:
|
||||
- Storage type to use for virtual machine.
|
||||
returned: always
|
||||
type: str
|
||||
sample: standard
|
||||
compute_vm_id:
|
||||
description:
|
||||
- Resource id of compute virtual machine.
|
||||
returned: always
|
||||
type: str
|
||||
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myLab-myVm-097933/providers/Microsoft.Compute/virtualMachines/myVm
|
||||
compute_vm_resource_group:
|
||||
description:
|
||||
- Resource group where compute virtual machine is created.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myLab-myVm-097933
|
||||
compute_vm_name:
|
||||
description:
|
||||
- Name of compute virtual machine.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myVm
|
||||
fqdn:
|
||||
description:
|
||||
- Fully qualified domain name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myvm.eastus.cloudapp.azure.com
|
||||
provisioning_state:
|
||||
description:
|
||||
- Provisioning state of the virtual network.
|
||||
returned: always
|
||||
type: str
|
||||
sample: Succeeded
|
||||
tags:
|
||||
description:
|
||||
- The tags of the resource.
|
||||
returned: always
|
||||
type: complex
|
||||
sample: "{ 'foo': 'bar' }"
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.mgmt.devtestlabs import DevTestLabsClient
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMDtlVirtualMachineInfo(AzureRMModuleBase):
|
||||
def __init__(self):
|
||||
# define user inputs into argument
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
lab_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str'
|
||||
),
|
||||
tags=dict(
|
||||
type='list'
|
||||
)
|
||||
)
|
||||
# store the results of the module operation
|
||||
self.results = dict(
|
||||
changed=False
|
||||
)
|
||||
self.mgmt_client = None
|
||||
self.resource_group = None
|
||||
self.lab_name = None
|
||||
self.name = None
|
||||
self.tags = None
|
||||
super(AzureRMDtlVirtualMachineInfo, self).__init__(self.module_arg_spec, supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
is_old_facts = self.module._name == 'azure_rm_devtestlabvirtualmachine_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_devtestlabvirtualmachine_facts' module has been renamed to 'azure_rm_devtestlabvirtualmachine_info'",
|
||||
version='2.13')
|
||||
|
||||
for key in self.module_arg_spec:
|
||||
setattr(self, key, kwargs[key])
|
||||
self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
|
||||
if self.name:
|
||||
self.results['virtualmachines'] = self.get()
|
||||
else:
|
||||
self.results['virtualmachines'] = self.list()
|
||||
|
||||
return self.results
|
||||
|
||||
def get(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mgmt_client.virtual_machines.get(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
name=self.name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.fail('Could not get facts for Virtual Machine.')
|
||||
|
||||
if response and self.has_tags(response.tags, self.tags):
|
||||
results.append(self.format_response(response))
|
||||
|
||||
return results
|
||||
|
||||
def list(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mgmt_client.virtual_machines.list(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.fail('Could not get facts for Virtual Machine.')
|
||||
|
||||
if response is not None:
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(self.format_response(item))
|
||||
return results
|
||||
|
||||
def format_response(self, item):
|
||||
d = item.as_dict()
|
||||
d = {
|
||||
'id': d.get('id', None),
|
||||
'resource_group': self.parse_resource_to_dict(d.get('id')).get('resource_group'),
|
||||
'lab_name': self.parse_resource_to_dict(d.get('id')).get('name'),
|
||||
'name': d.get('name'),
|
||||
'notes': d.get('notes'),
|
||||
'disallow_public_ip_address': d.get('disallow_public_ip_address'),
|
||||
'expiration_date': d.get('expiration_date'),
|
||||
'image': d.get('gallery_image_reference'),
|
||||
'os_type': d.get('os_type').lower(),
|
||||
'vm_size': d.get('size'),
|
||||
'user_name': d.get('user_name'),
|
||||
'storage_type': d.get('storage_type').lower(),
|
||||
'compute_vm_id': d.get('compute_id'),
|
||||
'compute_vm_resource_group': self.parse_resource_to_dict(d.get('compute_id')).get('resource_group'),
|
||||
'compute_vm_name': self.parse_resource_to_dict(d.get('compute_id')).get('name'),
|
||||
'fqdn': d.get('fqdn'),
|
||||
'provisioning_state': d.get('provisioning_state'),
|
||||
'tags': d.get('tags', None)
|
||||
}
|
||||
return d
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMDtlVirtualMachineInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,293 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Zim Kalinowski, <zikalino@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_devtestlabvirtualnetwork
|
||||
version_added: "2.8"
|
||||
short_description: Manage Azure DevTest Lab Virtual Network instance
|
||||
description:
|
||||
- Create, update and delete instance of Azure DevTest Lab Virtual Network.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
required: True
|
||||
lab_name:
|
||||
description:
|
||||
- The name of the lab.
|
||||
required: True
|
||||
name:
|
||||
description:
|
||||
- The name of the virtual network.
|
||||
required: True
|
||||
location:
|
||||
description:
|
||||
- The location of the resource.
|
||||
description:
|
||||
description:
|
||||
- The description of the virtual network.
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the Virtual Network.
|
||||
- Use C(present) to create or update an Virtual Network and C(absent) to delete it.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create (or update) Virtual Network
|
||||
azure_rm_devtestlabvirtualnetwork:
|
||||
resource_group: myResourceGroup
|
||||
lab_name: mylab
|
||||
name: myvn
|
||||
description: My Lab Virtual Network
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- The identifier of the resource.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/testrg/providers/microsoft.devtestlab/
|
||||
mylab/mylab/virtualnetworks/myvn"
|
||||
external_provider_resource_id:
|
||||
description:
|
||||
- The identifier of external virtual network.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/testrg/providers/Microsoft.Network/vi
|
||||
rtualNetworks/myvn"
|
||||
'''
|
||||
|
||||
import time
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
from ansible.module_utils.common.dict_transformations import _snake_to_camel
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrest.polling import LROPoller
|
||||
from msrestazure.azure_operation import AzureOperationPoller
|
||||
from azure.mgmt.devtestlabs import DevTestLabsClient
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class Actions:
|
||||
NoAction, Create, Update, Delete = range(4)
|
||||
|
||||
|
||||
class AzureRMDevTestLabVirtualNetwork(AzureRMModuleBase):
|
||||
"""Configuration class for an Azure RM Virtual Network resource"""
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
lab_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
location=dict(
|
||||
type='str'
|
||||
),
|
||||
description=dict(
|
||||
type='str'
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
)
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.lab_name = None
|
||||
self.name = None
|
||||
self.virtual_network = {}
|
||||
|
||||
self.results = dict(changed=False)
|
||||
self.mgmt_client = None
|
||||
self.state = None
|
||||
self.to_do = Actions.NoAction
|
||||
|
||||
super(AzureRMDevTestLabVirtualNetwork, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=True)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
"""Main module execution method"""
|
||||
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
if hasattr(self, key):
|
||||
setattr(self, key, kwargs[key])
|
||||
elif kwargs[key] is not None:
|
||||
self.virtual_network[key] = kwargs[key]
|
||||
|
||||
response = None
|
||||
|
||||
self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager,
|
||||
api_version='2018-10-15')
|
||||
|
||||
resource_group = self.get_resource_group(self.resource_group)
|
||||
if self.virtual_network.get('location') is None:
|
||||
self.virtual_network['location'] = resource_group.location
|
||||
|
||||
# subnet overrides for virtual network and subnet created by default
|
||||
template = "/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/virtualNetworks/{2}/subnets/{3}"
|
||||
subnet_id = template.format(self.subscription_id,
|
||||
self.resource_group,
|
||||
self.name,
|
||||
self.name + "Subnet")
|
||||
self.virtual_network['subnet_overrides'] = [{
|
||||
'resource_id': subnet_id,
|
||||
'lab_subnet_name': self.name + "Subnet",
|
||||
'use_in_vm_creation_permission': 'Allow',
|
||||
'use_public_ip_address_permission': 'Allow'
|
||||
}]
|
||||
|
||||
old_response = self.get_virtualnetwork()
|
||||
|
||||
if not old_response:
|
||||
self.log("Virtual Network instance doesn't exist")
|
||||
if self.state == 'absent':
|
||||
self.log("Old instance didn't exist")
|
||||
else:
|
||||
self.to_do = Actions.Create
|
||||
else:
|
||||
self.log("Virtual Network instance already exists")
|
||||
if self.state == 'absent':
|
||||
self.to_do = Actions.Delete
|
||||
elif self.state == 'present':
|
||||
if self.virtual_network.get('description') is not None and self.virtual_network.get('description') != old_response.get('description'):
|
||||
self.to_do = Actions.Update
|
||||
|
||||
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
|
||||
self.log("Need to Create / Update the Virtual Network instance")
|
||||
self.results['changed'] = True
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
response = self.create_update_virtualnetwork()
|
||||
self.log("Creation / Update done")
|
||||
elif self.to_do == Actions.Delete:
|
||||
self.log("Virtual Network instance deleted")
|
||||
self.results['changed'] = True
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
self.delete_virtualnetwork()
|
||||
# This currently doesn't work as there is a bug in SDK / Service
|
||||
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
|
||||
response = self.get_poller_result(response)
|
||||
else:
|
||||
self.log("Virtual Network instance unchanged")
|
||||
self.results['changed'] = False
|
||||
response = old_response
|
||||
|
||||
if self.state == 'present':
|
||||
self.results.update({
|
||||
'id': response.get('id', None),
|
||||
'external_provider_resource_id': response.get('external_provider_resource_id', None)
|
||||
})
|
||||
return self.results
|
||||
|
||||
def create_update_virtualnetwork(self):
|
||||
'''
|
||||
Creates or updates Virtual Network with the specified configuration.
|
||||
|
||||
:return: deserialized Virtual Network instance state dictionary
|
||||
'''
|
||||
self.log("Creating / Updating the Virtual Network instance {0}".format(self.name))
|
||||
|
||||
try:
|
||||
response = self.mgmt_client.virtual_networks.create_or_update(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
name=self.name,
|
||||
virtual_network=self.virtual_network)
|
||||
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
|
||||
response = self.get_poller_result(response)
|
||||
|
||||
except CloudError as exc:
|
||||
self.log('Error attempting to create the Virtual Network instance.')
|
||||
self.fail("Error creating the Virtual Network instance: {0}".format(str(exc)))
|
||||
return response.as_dict()
|
||||
|
||||
def delete_virtualnetwork(self):
|
||||
'''
|
||||
Deletes specified Virtual Network instance in the specified subscription and resource group.
|
||||
|
||||
:return: True
|
||||
'''
|
||||
self.log("Deleting the Virtual Network instance {0}".format(self.name))
|
||||
try:
|
||||
response = self.mgmt_client.virtual_networks.delete(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
name=self.name)
|
||||
except CloudError as e:
|
||||
self.log('Error attempting to delete the Virtual Network instance.')
|
||||
self.fail("Error deleting the Virtual Network instance: {0}".format(str(e)))
|
||||
|
||||
return True
|
||||
|
||||
def get_virtualnetwork(self):
|
||||
'''
|
||||
Gets the properties of the specified Virtual Network.
|
||||
|
||||
:return: deserialized Virtual Network instance state dictionary
|
||||
'''
|
||||
self.log("Checking if the Virtual Network instance {0} is present".format(self.name))
|
||||
found = False
|
||||
try:
|
||||
response = self.mgmt_client.virtual_networks.get(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
name=self.name)
|
||||
found = True
|
||||
self.log("Response : {0}".format(response))
|
||||
self.log("Virtual Network instance : {0} found".format(response.name))
|
||||
except CloudError as e:
|
||||
self.log('Did not find the Virtual Network instance.')
|
||||
if found is True:
|
||||
return response.as_dict()
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution"""
|
||||
AzureRMDevTestLabVirtualNetwork()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,221 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_devtestlabvirtualnetwork_info
|
||||
version_added: "2.9"
|
||||
short_description: Get Azure DevTest Lab Virtual Network facts
|
||||
description:
|
||||
- Get facts of Azure DevTest Lab Virtual Network.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
required: True
|
||||
type: str
|
||||
lab_name:
|
||||
description:
|
||||
- The name of DevTest Lab.
|
||||
required: True
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- The name of DevTest Lab Virtual Network.
|
||||
type: str
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get instance of DevTest Lab Virtual Network
|
||||
azure_rm_devtestlabvirtualnetwork_info:
|
||||
resource_group: myResourceGroup
|
||||
lab_name: myLab
|
||||
name: myVirtualNetwork
|
||||
|
||||
- name: List all Virtual Networks in DevTest Lab
|
||||
azure_rm_devtestlabvirtualnetwork_info:
|
||||
resource_group: myResourceGroup
|
||||
lab_name: myLab
|
||||
name: myVirtualNetwork
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
virtualnetworks:
|
||||
description:
|
||||
- A list of dictionaries containing facts for DevTest Lab Virtual Network.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- The identifier of the virtual network.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/myLab/virt
|
||||
ualnetworks/myVirtualNetwork"
|
||||
resource_group:
|
||||
description:
|
||||
- Name of the resource group.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myResourceGroup
|
||||
lab_name:
|
||||
description:
|
||||
- Name of the lab.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myLab
|
||||
name:
|
||||
description:
|
||||
- Name of the virtual network.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myVirtualNetwork
|
||||
description:
|
||||
description:
|
||||
- Description of the virtual network.
|
||||
returned: always
|
||||
type: str
|
||||
sample: My Virtual Network
|
||||
external_provider_resource_id:
|
||||
description:
|
||||
- Resource id of an external virtual network.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/my
|
||||
VirtualNetwork"
|
||||
provisioning_state:
|
||||
description:
|
||||
- Provisioning state of the virtual network.
|
||||
returned: always
|
||||
type: str
|
||||
sample: Succeeded
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.mgmt.devtestlabs import DevTestLabsClient
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMDevTestLabVirtualNetworkInfo(AzureRMModuleBase):
|
||||
def __init__(self):
|
||||
# define user inputs into argument
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
lab_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str'
|
||||
)
|
||||
)
|
||||
# store the results of the module operation
|
||||
self.results = dict(
|
||||
changed=False
|
||||
)
|
||||
self.mgmt_client = None
|
||||
self.resource_group = None
|
||||
self.lab_name = None
|
||||
self.name = None
|
||||
super(AzureRMDevTestLabVirtualNetworkInfo, self).__init__(self.module_arg_spec, supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
is_old_facts = self.module._name == 'azure_rm_devtestlabvirtualnetwork_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_devtestlabvirtualnetwork_facts' module has been renamed to 'azure_rm_devtestlabvirtualnetwork_info'",
|
||||
version='2.13')
|
||||
|
||||
for key in self.module_arg_spec:
|
||||
setattr(self, key, kwargs[key])
|
||||
self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
|
||||
if self.name:
|
||||
self.results['virtualnetworks'] = self.get()
|
||||
else:
|
||||
self.results['virtualnetworks'] = self.list()
|
||||
|
||||
return self.results
|
||||
|
||||
def list(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mgmt_client.virtual_networks.list(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.fail('Could not list Virtual Networks for DevTest Lab.')
|
||||
|
||||
if response is not None:
|
||||
for item in response:
|
||||
results.append(self.format_response(item))
|
||||
|
||||
return results
|
||||
|
||||
def get(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mgmt_client.virtual_networks.get(resource_group_name=self.resource_group,
|
||||
lab_name=self.lab_name,
|
||||
name=self.name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.fail('Could not get facts for Virtual Network.')
|
||||
|
||||
if response:
|
||||
results.append(self.format_response(response))
|
||||
|
||||
return results
|
||||
|
||||
def format_response(self, item):
|
||||
d = item.as_dict()
|
||||
d = {
|
||||
'resource_group': self.resource_group,
|
||||
'lab_name': self.lab_name,
|
||||
'name': d.get('name', None),
|
||||
'id': d.get('id', None),
|
||||
'external_provider_resource_id': d.get('external_provider_resource_id', None),
|
||||
'provisioning_state': d.get('provisioning_state', None),
|
||||
'description': d.get('description', None)
|
||||
}
|
||||
return d
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMDevTestLabVirtualNetworkInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,485 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2017 Obezimnaka Boms, <t-ozboms@microsoft.com>
|
||||
# Copyright (c) 2017 Ansible Project
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_dnsrecordset
|
||||
|
||||
version_added: "2.4"
|
||||
|
||||
short_description: Create, delete and update DNS record sets and records
|
||||
|
||||
description:
|
||||
- Creates, deletes, and updates DNS records sets and records within an existing Azure DNS Zone.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- Name of resource group.
|
||||
required: true
|
||||
zone_name:
|
||||
description:
|
||||
- Name of the existing DNS zone in which to manage the record set.
|
||||
required: true
|
||||
relative_name:
|
||||
description:
|
||||
- Relative name of the record set.
|
||||
required: true
|
||||
record_type:
|
||||
description:
|
||||
- The type of record set to create or delete.
|
||||
choices:
|
||||
- A
|
||||
- AAAA
|
||||
- CNAME
|
||||
- MX
|
||||
- NS
|
||||
- SRV
|
||||
- TXT
|
||||
- PTR
|
||||
- CAA
|
||||
- SOA
|
||||
required: true
|
||||
record_mode:
|
||||
description:
|
||||
- Whether existing record values not sent to the module should be purged.
|
||||
default: purge
|
||||
choices:
|
||||
- append
|
||||
- purge
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the record set. Use C(present) to create or update and C(absent) to delete.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
time_to_live:
|
||||
description:
|
||||
- Time to live of the record set in seconds.
|
||||
default: 3600
|
||||
records:
|
||||
description:
|
||||
- List of records to be created depending on the type of record (set).
|
||||
suboptions:
|
||||
preference:
|
||||
description:
|
||||
- Used for creating an C(MX) record set/records.
|
||||
priority:
|
||||
description:
|
||||
- Used for creating an C(SRV) record set/records.
|
||||
weight:
|
||||
description:
|
||||
- Used for creating an C(SRV) record set/records.
|
||||
port:
|
||||
description:
|
||||
- Used for creating an C(SRV) record set/records.
|
||||
entry:
|
||||
description:
|
||||
- Primary data value for all record types.
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Obezimnaka Boms (@ozboms)
|
||||
- Matt Davis (@nitzmahone)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
||||
- name: ensure an "A" record set with multiple records
|
||||
azure_rm_dnsrecordset:
|
||||
resource_group: myResourceGroup
|
||||
relative_name: www
|
||||
zone_name: testing.com
|
||||
record_type: A
|
||||
records:
|
||||
- entry: 192.168.100.101
|
||||
- entry: 192.168.100.102
|
||||
- entry: 192.168.100.103
|
||||
|
||||
- name: delete a record set
|
||||
azure_rm_dnsrecordset:
|
||||
resource_group: myResourceGroup
|
||||
record_type: A
|
||||
relative_name: www
|
||||
zone_name: testing.com
|
||||
state: absent
|
||||
|
||||
- name: create multiple "A" record sets with multiple records
|
||||
azure_rm_dnsrecordset:
|
||||
resource_group: myResourceGroup
|
||||
zone_name: testing.com
|
||||
relative_name: "{{ item.name }}"
|
||||
record_type: "{{ item.type }}"
|
||||
records: "{{ item.records }}"
|
||||
with_items:
|
||||
- { name: 'servera', type: 'A', records: [ { entry: '10.10.10.20' }, { entry: '10.10.10.21' }] }
|
||||
- { name: 'serverb', type: 'A', records: [ { entry: '10.10.10.30' }, { entry: '10.10.10.41' }] }
|
||||
- { name: 'serverc', type: 'A', records: [ { entry: '10.10.10.40' }, { entry: '10.10.10.41' }] }
|
||||
|
||||
- name: create SRV records in a new record set
|
||||
azure_rm_dnsrecordset:
|
||||
resource_group: myResourceGroup
|
||||
relative_name: _sip._tcp.testing.com
|
||||
zone_name: testing.com
|
||||
time_to_live: 7200
|
||||
record_type: SRV
|
||||
records:
|
||||
- entry: sip.testing.com
|
||||
preference: 10
|
||||
priority: 20
|
||||
weight: 10
|
||||
port: 5060
|
||||
|
||||
- name: create PTR record in a new record set
|
||||
azure_rm_dnsrecordset:
|
||||
resource_group: myResourceGroup
|
||||
relative_name: 192.168.100.101.in-addr.arpa
|
||||
zone_name: testing.com
|
||||
record_type: PTR
|
||||
records:
|
||||
- entry: servera.testing.com
|
||||
|
||||
- name: create TXT record in a new record set
|
||||
azure_rm_dnsrecordset:
|
||||
resource_group: myResourceGroup
|
||||
relative_name: mail.testing.com
|
||||
zone_name: testing.com
|
||||
record_type: TXT
|
||||
records:
|
||||
- entry: 'v=spf1 a -all'
|
||||
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
state:
|
||||
description:
|
||||
- Current state of the DNS record set.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- The DNS record set ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxx......xxx/resourceGroups/v-xisuRG/providers/Microsoft.Network/dnszones/b57dc95985712e4523282.com/A/www"
|
||||
name:
|
||||
description:
|
||||
- Relate name of the record set.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'www'
|
||||
fqdn:
|
||||
description:
|
||||
- Fully qualified domain name of the record set.
|
||||
returned: always
|
||||
type: str
|
||||
sample: www.b57dc95985712e4523282.com
|
||||
etag:
|
||||
description:
|
||||
- The etag of the record set.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 692c3e92-a618-46fc-aecd-8f888807cd6c
|
||||
provisioning_state:
|
||||
description:
|
||||
- The DNS record set state.
|
||||
returned: always
|
||||
type: str
|
||||
sample: Succeeded
|
||||
target_resource:
|
||||
description:
|
||||
- The target resource of the record set.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {}
|
||||
ttl:
|
||||
description:
|
||||
- The TTL(time-to-live) of the records in the records set.
|
||||
returned: always
|
||||
type: int
|
||||
sample: 3600
|
||||
type:
|
||||
description:
|
||||
- The type of DNS record in this record set.
|
||||
returned: always
|
||||
type: str
|
||||
sample: A
|
||||
arecords:
|
||||
description:
|
||||
- A list of records in the record set.
|
||||
returned: always
|
||||
type: list
|
||||
sample: [
|
||||
{
|
||||
"ipv4_address": "192.0.2.2"
|
||||
},
|
||||
{
|
||||
"ipv4_address": "192.0.2.4"
|
||||
},
|
||||
{
|
||||
"ipv4_address": "192.0.2.8"
|
||||
}
|
||||
]
|
||||
'''
|
||||
|
||||
import inspect
|
||||
import sys
|
||||
|
||||
from ansible.module_utils.basic import _load_params
|
||||
from ansible.module_utils.six import iteritems
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, HAS_AZURE
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
RECORD_ARGSPECS = dict(
|
||||
A=dict(
|
||||
ipv4_address=dict(type='str', required=True, aliases=['entry'])
|
||||
),
|
||||
AAAA=dict(
|
||||
ipv6_address=dict(type='str', required=True, aliases=['entry'])
|
||||
),
|
||||
CNAME=dict(
|
||||
cname=dict(type='str', required=True, aliases=['entry'])
|
||||
),
|
||||
MX=dict(
|
||||
preference=dict(type='int', required=True),
|
||||
exchange=dict(type='str', required=True, aliases=['entry'])
|
||||
),
|
||||
NS=dict(
|
||||
nsdname=dict(type='str', required=True, aliases=['entry'])
|
||||
),
|
||||
PTR=dict(
|
||||
ptrdname=dict(type='str', required=True, aliases=['entry'])
|
||||
),
|
||||
SRV=dict(
|
||||
priority=dict(type='int', required=True),
|
||||
port=dict(type='int', required=True),
|
||||
weight=dict(type='int', required=True),
|
||||
target=dict(type='str', required=True, aliases=['entry'])
|
||||
),
|
||||
TXT=dict(
|
||||
value=dict(type='list', required=True, aliases=['entry'])
|
||||
),
|
||||
SOA=dict(
|
||||
host=dict(type='str', aliases=['entry']),
|
||||
email=dict(type='str'),
|
||||
serial_number=dict(type='long'),
|
||||
refresh_time=dict(type='long'),
|
||||
retry_time=dict(type='long'),
|
||||
expire_time=dict(type='long'),
|
||||
minimum_ttl=dict(type='long')
|
||||
),
|
||||
CAA=dict(
|
||||
value=dict(type='str', aliases=['entry']),
|
||||
flags=dict(type='int'),
|
||||
tag=dict(type='str')
|
||||
)
|
||||
# FUTURE: ensure all record types are supported (see https://github.com/Azure/azure-sdk-for-python/tree/master/azure-mgmt-dns/azure/mgmt/dns/models)
|
||||
)
|
||||
|
||||
RECORDSET_VALUE_MAP = dict(
|
||||
A=dict(attrname='arecords', classobj='ARecord', is_list=True),
|
||||
AAAA=dict(attrname='aaaa_records', classobj='AaaaRecord', is_list=True),
|
||||
CNAME=dict(attrname='cname_record', classobj='CnameRecord', is_list=False),
|
||||
MX=dict(attrname='mx_records', classobj='MxRecord', is_list=True),
|
||||
NS=dict(attrname='ns_records', classobj='NsRecord', is_list=True),
|
||||
PTR=dict(attrname='ptr_records', classobj='PtrRecord', is_list=True),
|
||||
SRV=dict(attrname='srv_records', classobj='SrvRecord', is_list=True),
|
||||
TXT=dict(attrname='txt_records', classobj='TxtRecord', is_list=True),
|
||||
SOA=dict(attrname='soa_record', classobj='SoaRecord', is_list=False),
|
||||
CAA=dict(attrname='caa_records', classobj='CaaRecord', is_list=True)
|
||||
# FUTURE: add missing record types from https://github.com/Azure/azure-sdk-for-python/blob/master/azure-mgmt-dns/azure/mgmt/dns/models/record_set.py
|
||||
) if HAS_AZURE else {}
|
||||
|
||||
|
||||
class AzureRMRecordSet(AzureRMModuleBase):
|
||||
|
||||
def __init__(self):
|
||||
|
||||
# we're doing two-pass arg validation, sample and store the args internally to allow this
|
||||
_load_params()
|
||||
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(type='str', required=True),
|
||||
relative_name=dict(type='str', required=True),
|
||||
zone_name=dict(type='str', required=True),
|
||||
record_type=dict(choices=RECORD_ARGSPECS.keys(), required=True, type='str'),
|
||||
record_mode=dict(choices=['append', 'purge'], default='purge'),
|
||||
state=dict(choices=['present', 'absent'], default='present', type='str'),
|
||||
time_to_live=dict(type='int', default=3600),
|
||||
records=dict(type='list', elements='dict')
|
||||
)
|
||||
|
||||
required_if = [
|
||||
('state', 'present', ['records'])
|
||||
]
|
||||
|
||||
self.results = dict(
|
||||
changed=False
|
||||
)
|
||||
|
||||
# first-pass arg validation so we can get the record type- skip exec_module
|
||||
super(AzureRMRecordSet, self).__init__(self.module_arg_spec, required_if=required_if, supports_check_mode=True, skip_exec=True)
|
||||
|
||||
# look up the right subspec and metadata
|
||||
record_subspec = RECORD_ARGSPECS.get(self.module.params['record_type'])
|
||||
|
||||
# patch the right record shape onto the argspec
|
||||
self.module_arg_spec['records']['options'] = record_subspec
|
||||
|
||||
self.resource_group = None
|
||||
self.relative_name = None
|
||||
self.zone_name = None
|
||||
self.record_type = None
|
||||
self.record_mode = None
|
||||
self.state = None
|
||||
self.time_to_live = None
|
||||
self.records = None
|
||||
|
||||
# rerun validation and actually run the module this time
|
||||
super(AzureRMRecordSet, self).__init__(self.module_arg_spec, required_if=required_if, supports_check_mode=True)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
for key in self.module_arg_spec.keys():
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
zone = self.dns_client.zones.get(self.resource_group, self.zone_name)
|
||||
if not zone:
|
||||
self.fail('The zone {0} does not exist in the resource group {1}'.format(self.zone_name, self.resource_group))
|
||||
|
||||
try:
|
||||
self.log('Fetching Record Set {0}'.format(self.relative_name))
|
||||
record_set = self.dns_client.record_sets.get(self.resource_group, self.zone_name, self.relative_name, self.record_type)
|
||||
self.results['state'] = self.recordset_to_dict(record_set)
|
||||
except CloudError:
|
||||
record_set = None
|
||||
# FUTURE: fail on anything other than ResourceNotFound
|
||||
|
||||
record_type_metadata = RECORDSET_VALUE_MAP.get(self.record_type)
|
||||
|
||||
# FUTURE: implement diff mode
|
||||
|
||||
if self.state == 'present':
|
||||
# convert the input records to SDK objects
|
||||
self.input_sdk_records = self.create_sdk_records(self.records, self.record_type)
|
||||
|
||||
if not record_set:
|
||||
changed = True
|
||||
else:
|
||||
# and use it to get the type-specific records
|
||||
server_records = getattr(record_set, record_type_metadata.get('attrname'))
|
||||
|
||||
# compare the input records to the server records
|
||||
self.input_sdk_records, changed = self.records_changed(self.input_sdk_records, server_records)
|
||||
|
||||
# also check top-level recordset properties
|
||||
changed |= record_set.ttl != self.time_to_live
|
||||
|
||||
# FUTURE: add metadata/tag check on recordset
|
||||
|
||||
self.results['changed'] |= changed
|
||||
|
||||
elif self.state == 'absent':
|
||||
if record_set:
|
||||
self.results['changed'] = True
|
||||
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
if self.results['changed']:
|
||||
if self.state == 'present':
|
||||
record_set_args = dict(
|
||||
ttl=self.time_to_live
|
||||
)
|
||||
|
||||
record_set_args[record_type_metadata['attrname']] = self.input_sdk_records if record_type_metadata['is_list'] else self.input_sdk_records[0]
|
||||
|
||||
record_set = self.dns_models.RecordSet(**record_set_args)
|
||||
|
||||
self.results['state'] = self.create_or_update(record_set)
|
||||
|
||||
elif self.state == 'absent':
|
||||
# delete record set
|
||||
self.delete_record_set()
|
||||
|
||||
return self.results
|
||||
|
||||
def create_or_update(self, record_set):
|
||||
try:
|
||||
record_set = self.dns_client.record_sets.create_or_update(resource_group_name=self.resource_group,
|
||||
zone_name=self.zone_name,
|
||||
relative_record_set_name=self.relative_name,
|
||||
record_type=self.record_type,
|
||||
parameters=record_set)
|
||||
return self.recordset_to_dict(record_set)
|
||||
except Exception as exc:
|
||||
self.fail("Error creating or updating dns record {0} - {1}".format(self.relative_name, exc.message or str(exc)))
|
||||
|
||||
def delete_record_set(self):
|
||||
try:
|
||||
# delete the record set
|
||||
self.dns_client.record_sets.delete(resource_group_name=self.resource_group,
|
||||
zone_name=self.zone_name,
|
||||
relative_record_set_name=self.relative_name,
|
||||
record_type=self.record_type)
|
||||
except Exception as exc:
|
||||
self.fail("Error deleting record set {0} - {1}".format(self.relative_name, exc.message or str(exc)))
|
||||
return None
|
||||
|
||||
def create_sdk_records(self, input_records, record_type):
|
||||
record = RECORDSET_VALUE_MAP.get(record_type)
|
||||
if not record:
|
||||
self.fail('record type {0} is not supported now'.format(record_type))
|
||||
record_sdk_class = getattr(self.dns_models, record.get('classobj'))
|
||||
return [record_sdk_class(**x) for x in input_records]
|
||||
|
||||
def records_changed(self, input_records, server_records):
|
||||
# ensure we're always comparing a list, even for the single-valued types
|
||||
if not isinstance(server_records, list):
|
||||
server_records = [server_records]
|
||||
|
||||
input_set = set([self.module.jsonify(x.as_dict()) for x in input_records])
|
||||
server_set = set([self.module.jsonify(x.as_dict()) for x in server_records])
|
||||
|
||||
if self.record_mode == 'append': # only a difference if the server set is missing something from the input set
|
||||
input_set = server_set.union(input_set)
|
||||
|
||||
# non-append mode; any difference in the sets is a change
|
||||
changed = input_set != server_set
|
||||
|
||||
records = [self.module.from_json(x) for x in input_set]
|
||||
return self.create_sdk_records(records, self.record_type), changed
|
||||
|
||||
def recordset_to_dict(self, recordset):
|
||||
result = recordset.as_dict()
|
||||
result['type'] = result['type'].strip('Microsoft.Network/dnszones/')
|
||||
return result
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMRecordSet()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,294 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2017 Obezimnaka Boms, <t-ozboms@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_dnsrecordset_info
|
||||
|
||||
version_added: "2.9"
|
||||
|
||||
short_description: Get DNS Record Set facts
|
||||
|
||||
description:
|
||||
- Get facts for a specific DNS Record Set in a Zone, or a specific type in all Zones or in one Zone etc.
|
||||
|
||||
options:
|
||||
relative_name:
|
||||
description:
|
||||
- Only show results for a Record Set.
|
||||
resource_group:
|
||||
description:
|
||||
- Limit results by resource group. Required when filtering by name or type.
|
||||
zone_name:
|
||||
description:
|
||||
- Limit results by zones. Required when filtering by name or type.
|
||||
record_type:
|
||||
description:
|
||||
- Limit record sets by record type.
|
||||
top:
|
||||
description:
|
||||
- Limit the maximum number of record sets to return.
|
||||
type: int
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Ozi Boms (@ozboms)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get facts for one Record Set
|
||||
azure_rm_dnsrecordset_info:
|
||||
resource_group: myResourceGroup
|
||||
zone_name: example.com
|
||||
relative_name: server10
|
||||
record_type: A
|
||||
- name: Get facts for all Type A Record Sets in a Zone
|
||||
azure_rm_dnsrecordset_info:
|
||||
resource_group: myResourceGroup
|
||||
zone_name: example.com
|
||||
record_type: A
|
||||
- name: Get all record sets in one zone
|
||||
azure_rm_dnsrecordset_info:
|
||||
resource_group: myResourceGroup
|
||||
zone_name: example.com
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
azure_dnsrecordset:
|
||||
description:
|
||||
- List of record set dicts.
|
||||
returned: always
|
||||
type: list
|
||||
example: [
|
||||
{
|
||||
"etag": "60ac0480-44dd-4881-a2ed-680d20b3978e",
|
||||
"id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/dnszones/newzone.com/A/servera",
|
||||
"name": "servera",
|
||||
"properties": {
|
||||
"ARecords": [
|
||||
{
|
||||
"ipv4Address": "10.4.5.7"
|
||||
},
|
||||
{
|
||||
"ipv4Address": "2.4.5.8"
|
||||
}
|
||||
],
|
||||
"TTL": 12900
|
||||
},
|
||||
"type": "Microsoft.Network/dnszones/A"
|
||||
}]
|
||||
dnsrecordsets:
|
||||
description:
|
||||
- List of record set dicts, which shares the same hierarchy as M(azure_rm_dnsrecordset) module's parameter.
|
||||
returned: always
|
||||
type: list
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- ID of the dns recordset.
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/dnszones/newzone.
|
||||
com/A/servera"
|
||||
relative_name:
|
||||
description:
|
||||
- Name of the dns recordset.
|
||||
sample: servera
|
||||
record_type:
|
||||
description:
|
||||
- The type of the record set.
|
||||
- Can be C(A), C(AAAA), C(CNAME), C(MX), C(NS), C(SRV), C(TXT), C(PTR).
|
||||
sample: A
|
||||
time_to_live:
|
||||
description:
|
||||
- Time to live of the record set in seconds.
|
||||
sample: 12900
|
||||
records:
|
||||
description:
|
||||
- List of records depending on the type of recordset.
|
||||
sample: [
|
||||
{
|
||||
"ipv4Address": "10.4.5.7"
|
||||
},
|
||||
{
|
||||
"ipv4Address": "2.4.5.8"
|
||||
}
|
||||
]
|
||||
provisioning_state:
|
||||
description:
|
||||
- Provision state of the resource.
|
||||
sample: Successed
|
||||
fqdn:
|
||||
description:
|
||||
- Fully qualified domain name of the record set.
|
||||
sample: www.newzone.com
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.common import AzureMissingResourceHttpError, AzureHttpError
|
||||
except Exception:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
AZURE_OBJECT_CLASS = 'RecordSet'
|
||||
|
||||
|
||||
RECORDSET_VALUE_MAP = dict(
|
||||
A='arecords',
|
||||
AAAA='aaaa_records',
|
||||
CNAME='cname_record',
|
||||
MX='mx_records',
|
||||
NS='ns_records',
|
||||
PTR='ptr_records',
|
||||
SRV='srv_records',
|
||||
TXT='txt_records',
|
||||
SOA='soa_record',
|
||||
CAA='caa_records'
|
||||
# FUTURE: add missing record types from https://github.com/Azure/azure-sdk-for-python/blob/master/azure-mgmt-dns/azure/mgmt/dns/models/record_set.py
|
||||
)
|
||||
|
||||
|
||||
class AzureRMRecordSetInfo(AzureRMModuleBase):
|
||||
|
||||
def __init__(self):
|
||||
|
||||
# define user inputs into argument
|
||||
self.module_arg_spec = dict(
|
||||
relative_name=dict(type='str'),
|
||||
resource_group=dict(type='str'),
|
||||
zone_name=dict(type='str'),
|
||||
record_type=dict(type='str'),
|
||||
top=dict(type='int')
|
||||
)
|
||||
|
||||
# store the results of the module operation
|
||||
self.results = dict(
|
||||
changed=False,
|
||||
)
|
||||
|
||||
self.relative_name = None
|
||||
self.resource_group = None
|
||||
self.zone_name = None
|
||||
self.record_type = None
|
||||
self.top = None
|
||||
|
||||
super(AzureRMRecordSetInfo, self).__init__(self.module_arg_spec)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
is_old_facts = self.module._name == 'azure_rm_dnsrecordset_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_dnsrecordset_facts' module has been renamed to 'azure_rm_dnsrecordset_info'", version='2.13')
|
||||
|
||||
for key in self.module_arg_spec:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
if not self.top or self.top <= 0:
|
||||
self.top = None
|
||||
|
||||
# create conditionals to catch errors when calling record facts
|
||||
if self.relative_name and not self.resource_group:
|
||||
self.fail("Parameter error: resource group required when filtering by name or record type.")
|
||||
if self.relative_name and not self.zone_name:
|
||||
self.fail("Parameter error: DNS Zone required when filtering by name or record type.")
|
||||
|
||||
results = []
|
||||
# list the conditions for what to return based on input
|
||||
if self.relative_name is not None:
|
||||
# if there is a name listed, they want only facts about that specific Record Set itself
|
||||
results = self.get_item()
|
||||
elif self.record_type:
|
||||
# else, they just want all the record sets of a specific type
|
||||
results = self.list_type()
|
||||
elif self.zone_name:
|
||||
# if there is a zone name listed, then they want all the record sets in a zone
|
||||
results = self.list_zone()
|
||||
|
||||
if is_old_facts:
|
||||
self.results['ansible_facts'] = {
|
||||
'azure_dnsrecordset': self.serialize_list(results)
|
||||
}
|
||||
self.results['dnsrecordsets'] = self.curated_list(results)
|
||||
return self.results
|
||||
|
||||
def get_item(self):
|
||||
self.log('Get properties for {0}'.format(self.relative_name))
|
||||
item = None
|
||||
results = []
|
||||
|
||||
# try to get information for specific Record Set
|
||||
try:
|
||||
item = self.dns_client.record_sets.get(self.resource_group, self.zone_name, self.relative_name, self.record_type)
|
||||
except CloudError:
|
||||
pass
|
||||
|
||||
results = [item]
|
||||
return results
|
||||
|
||||
def list_type(self):
|
||||
self.log('Lists the record sets of a specified type in a DNS zone')
|
||||
try:
|
||||
response = self.dns_client.record_sets.list_by_type(self.resource_group, self.zone_name, self.record_type, top=self.top)
|
||||
except AzureHttpError as exc:
|
||||
self.fail("Failed to list for record type {0} - {1}".format(self.record_type, str(exc)))
|
||||
|
||||
results = []
|
||||
for item in response:
|
||||
results.append(item)
|
||||
return results
|
||||
|
||||
def list_zone(self):
|
||||
self.log('Lists all record sets in a DNS zone')
|
||||
try:
|
||||
response = self.dns_client.record_sets.list_by_dns_zone(self.resource_group, self.zone_name, top=self.top)
|
||||
except AzureHttpError as exc:
|
||||
self.fail("Failed to list for zone {0} - {1}".format(self.zone_name, str(exc)))
|
||||
|
||||
results = []
|
||||
for item in response:
|
||||
results.append(item)
|
||||
return results
|
||||
|
||||
def serialize_list(self, raws):
|
||||
return [self.serialize_obj(item, AZURE_OBJECT_CLASS) for item in raws] if raws else []
|
||||
|
||||
def curated_list(self, raws):
|
||||
return [self.record_to_dict(item) for item in raws] if raws else []
|
||||
|
||||
def record_to_dict(self, record):
|
||||
record_type = record.type[len('Microsoft.Network/dnszones/'):]
|
||||
records = getattr(record, RECORDSET_VALUE_MAP.get(record_type))
|
||||
if not isinstance(records, list):
|
||||
records = [records]
|
||||
return dict(
|
||||
id=record.id,
|
||||
relative_name=record.name,
|
||||
record_type=record_type,
|
||||
records=[x.as_dict() for x in records],
|
||||
time_to_live=record.ttl,
|
||||
fqdn=record.fqdn,
|
||||
provisioning_state=record.provisioning_state
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMRecordSetInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,302 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2017 Obezimnaka Boms, <t-ozboms@microsoft.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_dnszone
|
||||
|
||||
version_added: "2.4"
|
||||
|
||||
short_description: Manage Azure DNS zones
|
||||
|
||||
description:
|
||||
- Creates and deletes Azure DNS zones.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- name of resource group.
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
- Name of the DNS zone.
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the zone. Use C(present) to create or update and C(absent) to delete.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
type:
|
||||
description:
|
||||
- The type of this DNS zone (C(public) or C(private)).
|
||||
choices:
|
||||
- public
|
||||
- private
|
||||
version_added: 2.8
|
||||
registration_virtual_networks:
|
||||
description:
|
||||
- A list of references to virtual networks that register hostnames in this DNS zone.
|
||||
- This is a only when I(type=private).
|
||||
- Each element can be the name or resource id, or a dict contains C(name), C(resource_group) information of the virtual network.
|
||||
version_added: 2.8
|
||||
type: list
|
||||
resolution_virtual_networks:
|
||||
description:
|
||||
- A list of references to virtual networks that resolve records in this DNS zone.
|
||||
- This is a only when I(type=private).
|
||||
- Each element can be the name or resource id, or a dict contains C(name), C(resource_group) information of the virtual network.
|
||||
version_added: '2.8'
|
||||
type: list
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Obezimnaka Boms (@ozboms)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
||||
- name: Create a DNS zone
|
||||
azure_rm_dnszone:
|
||||
resource_group: myResourceGroup
|
||||
name: example.com
|
||||
|
||||
- name: Delete a DNS zone
|
||||
azure_rm_dnszone:
|
||||
resource_group: myResourceGroup
|
||||
name: example.com
|
||||
state: absent
|
||||
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
state:
|
||||
description:
|
||||
- Current state of the zone.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup",
|
||||
"location": "global",
|
||||
"name": "Testing",
|
||||
"name_servers": [
|
||||
"ns1-07.azure-dns.com.",
|
||||
"ns2-07.azure-dns.net.",
|
||||
"ns3-07.azure-dns.org.",
|
||||
"ns4-07.azure-dns.info."
|
||||
],
|
||||
"number_of_record_sets": 2,
|
||||
"type": "private",
|
||||
"resolution_virtual_networks": ["/subscriptions/XXXX/resourceGroup/myResourceGroup/providers/Microsoft.Network/virtualNetworks/foo"]
|
||||
}
|
||||
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMDNSZone(AzureRMModuleBase):
|
||||
|
||||
def __init__(self):
|
||||
|
||||
# define user inputs from playbook
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(type='str', required=True),
|
||||
name=dict(type='str', required=True),
|
||||
state=dict(choices=['present', 'absent'], default='present', type='str'),
|
||||
type=dict(type='str', choices=['private', 'public']),
|
||||
registration_virtual_networks=dict(type='list', elements='raw'),
|
||||
resolution_virtual_networks=dict(type='list', elements='raw')
|
||||
)
|
||||
|
||||
# store the results of the module operation
|
||||
self.results = dict(
|
||||
changed=False,
|
||||
state=dict()
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.state = None
|
||||
self.tags = None
|
||||
self.type = None
|
||||
self.registration_virtual_networks = None
|
||||
self.resolution_virtual_networks = None
|
||||
|
||||
super(AzureRMDNSZone, self).__init__(self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=True)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
# create a new zone variable in case the 'try' doesn't find a zone
|
||||
zone = None
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
self.registration_virtual_networks = self.preprocess_vn_list(self.registration_virtual_networks)
|
||||
self.resolution_virtual_networks = self.preprocess_vn_list(self.resolution_virtual_networks)
|
||||
|
||||
self.results['check_mode'] = self.check_mode
|
||||
|
||||
# retrieve resource group to make sure it exists
|
||||
self.get_resource_group(self.resource_group)
|
||||
|
||||
changed = False
|
||||
results = dict()
|
||||
|
||||
try:
|
||||
self.log('Fetching DNS zone {0}'.format(self.name))
|
||||
zone = self.dns_client.zones.get(self.resource_group, self.name)
|
||||
|
||||
# serialize object into a dictionary
|
||||
results = zone_to_dict(zone)
|
||||
|
||||
# don't change anything if creating an existing zone, but change if deleting it
|
||||
if self.state == 'present':
|
||||
changed = False
|
||||
|
||||
update_tags, results['tags'] = self.update_tags(results['tags'])
|
||||
if update_tags:
|
||||
changed = True
|
||||
if self.type and results['type'] != self.type:
|
||||
changed = True
|
||||
results['type'] = self.type
|
||||
if self.resolution_virtual_networks:
|
||||
if set(self.resolution_virtual_networks) != set(results['resolution_virtual_networks'] or []):
|
||||
changed = True
|
||||
results['resolution_virtual_networks'] = self.resolution_virtual_networks
|
||||
else:
|
||||
# this property should not be changed
|
||||
self.resolution_virtual_networks = results['resolution_virtual_networks']
|
||||
if self.registration_virtual_networks:
|
||||
if set(self.registration_virtual_networks) != set(results['registration_virtual_networks'] or []):
|
||||
changed = True
|
||||
results['registration_virtual_networks'] = self.registration_virtual_networks
|
||||
else:
|
||||
self.registration_virtual_networks = results['registration_virtual_networks']
|
||||
elif self.state == 'absent':
|
||||
changed = True
|
||||
|
||||
except CloudError:
|
||||
# the zone does not exist so create it
|
||||
if self.state == 'present':
|
||||
changed = True
|
||||
else:
|
||||
# you can't delete what is not there
|
||||
changed = False
|
||||
|
||||
self.results['changed'] = changed
|
||||
self.results['state'] = results
|
||||
|
||||
# return the results if your only gathering information
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
if changed:
|
||||
if self.state == 'present':
|
||||
zone = self.dns_models.Zone(zone_type=str.capitalize(self.type) if self.type else None,
|
||||
tags=self.tags,
|
||||
location='global')
|
||||
if self.resolution_virtual_networks:
|
||||
zone.resolution_virtual_networks = self.construct_subresource_list(self.resolution_virtual_networks)
|
||||
if self.registration_virtual_networks:
|
||||
zone.registration_virtual_networks = self.construct_subresource_list(self.registration_virtual_networks)
|
||||
self.results['state'] = self.create_or_update_zone(zone)
|
||||
elif self.state == 'absent':
|
||||
# delete zone
|
||||
self.delete_zone()
|
||||
# the delete does not actually return anything. if no exception, then we'll assume
|
||||
# it worked.
|
||||
self.results['state']['status'] = 'Deleted'
|
||||
|
||||
return self.results
|
||||
|
||||
def create_or_update_zone(self, zone):
|
||||
try:
|
||||
# create or update the new Zone object we created
|
||||
new_zone = self.dns_client.zones.create_or_update(self.resource_group, self.name, zone)
|
||||
except Exception as exc:
|
||||
self.fail("Error creating or updating zone {0} - {1}".format(self.name, exc.message or str(exc)))
|
||||
return zone_to_dict(new_zone)
|
||||
|
||||
def delete_zone(self):
|
||||
try:
|
||||
# delete the Zone
|
||||
poller = self.dns_client.zones.delete(self.resource_group, self.name)
|
||||
result = self.get_poller_result(poller)
|
||||
except Exception as exc:
|
||||
self.fail("Error deleting zone {0} - {1}".format(self.name, exc.message or str(exc)))
|
||||
return result
|
||||
|
||||
def preprocess_vn_list(self, vn_list):
|
||||
return [self.parse_vn_id(x) for x in vn_list] if vn_list else None
|
||||
|
||||
def parse_vn_id(self, vn):
|
||||
vn_dict = self.parse_resource_to_dict(vn) if not isinstance(vn, dict) else vn
|
||||
return format_resource_id(val=vn_dict['name'],
|
||||
subscription_id=vn_dict.get('subscription') or self.subscription_id,
|
||||
namespace='Microsoft.Network',
|
||||
types='virtualNetworks',
|
||||
resource_group=vn_dict.get('resource_group') or self.resource_group)
|
||||
|
||||
def construct_subresource_list(self, raw):
|
||||
return [self.dns_models.SubResource(id=x) for x in raw] if raw else None
|
||||
|
||||
|
||||
def zone_to_dict(zone):
|
||||
# turn Zone object into a dictionary (serialization)
|
||||
result = dict(
|
||||
id=zone.id,
|
||||
name=zone.name,
|
||||
number_of_record_sets=zone.number_of_record_sets,
|
||||
name_servers=zone.name_servers,
|
||||
tags=zone.tags,
|
||||
type=zone.zone_type.value.lower(),
|
||||
registration_virtual_networks=[to_native(x.id) for x in zone.registration_virtual_networks] if zone.registration_virtual_networks else None,
|
||||
resolution_virtual_networks=[to_native(x.id) for x in zone.resolution_virtual_networks] if zone.resolution_virtual_networks else None
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMDNSZone()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,258 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2017 Obezimnaka Boms, <t-ozboms@microsoft.com>
|
||||
#
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_dnszone_info
|
||||
|
||||
version_added: "2.9"
|
||||
|
||||
short_description: Get DNS zone facts
|
||||
|
||||
description:
|
||||
- Get facts for a specific DNS zone or all DNS zones within a resource group.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- Limit results by resource group. Required when filtering by name.
|
||||
name:
|
||||
description:
|
||||
- Only show results for a specific zone.
|
||||
tags:
|
||||
description:
|
||||
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Obezimnaka Boms (@ozboms)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get facts for one zone
|
||||
azure_rm_dnszone_info:
|
||||
resource_group: myResourceGroup
|
||||
name: foobar22
|
||||
|
||||
- name: Get facts for all zones in a resource group
|
||||
azure_rm_dnszone_info:
|
||||
resource_group: myResourceGroup
|
||||
|
||||
- name: Get facts by tags
|
||||
azure_rm_dnszone_info:
|
||||
tags:
|
||||
- testing
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
azure_dnszones:
|
||||
description:
|
||||
- List of zone dicts.
|
||||
returned: always
|
||||
type: list
|
||||
example: [{
|
||||
"etag": "00000002-0000-0000-0dcb-df5776efd201",
|
||||
"location": "global",
|
||||
"properties": {
|
||||
"maxNumberOfRecordSets": 5000,
|
||||
"numberOfRecordSets": 15
|
||||
},
|
||||
"tags": {}
|
||||
}]
|
||||
dnszones:
|
||||
description:
|
||||
- List of zone dicts, which share the same layout as azure_rm_dnszone module parameter.
|
||||
returned: always
|
||||
type: list
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- id of the DNS Zone.
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/dnszones/azure.com"
|
||||
name:
|
||||
description:
|
||||
- name of the DNS zone.
|
||||
sample: azure.com
|
||||
type:
|
||||
description:
|
||||
- The type of this DNS zone (C(public) or C(private)).
|
||||
sample: private
|
||||
registration_virtual_networks:
|
||||
description:
|
||||
- A list of references to virtual networks that register hostnames in this DNS zone.
|
||||
type: list
|
||||
sample: ["/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/bar"]
|
||||
resolution_virtual_networks:
|
||||
description:
|
||||
- A list of references to virtual networks that resolve records in this DNS zone.
|
||||
type: list
|
||||
sample: ["/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/deadbeef"]
|
||||
number_of_record_sets:
|
||||
description:
|
||||
- The current number of record sets in this DNS zone.
|
||||
type: int
|
||||
sample: 2
|
||||
max_number_of_record_sets:
|
||||
description:
|
||||
- The maximum number of record sets that can be created in this DNS zone.
|
||||
type: int
|
||||
sample: 5000
|
||||
name_servers:
|
||||
description:
|
||||
- The name servers for this DNS zone.
|
||||
type: list
|
||||
sample: [
|
||||
"ns1-03.azure-dns.com.",
|
||||
"ns2-03.azure-dns.net.",
|
||||
"ns3-03.azure-dns.org.",
|
||||
"ns4-03.azure-dns.info."
|
||||
]
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.common import AzureMissingResourceHttpError, AzureHttpError
|
||||
except Exception:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
AZURE_OBJECT_CLASS = 'DnsZone'
|
||||
|
||||
|
||||
class AzureRMDNSZoneInfo(AzureRMModuleBase):
|
||||
|
||||
def __init__(self):
|
||||
|
||||
# define user inputs into argument
|
||||
self.module_arg_spec = dict(
|
||||
name=dict(type='str'),
|
||||
resource_group=dict(type='str'),
|
||||
tags=dict(type='list')
|
||||
)
|
||||
|
||||
# store the results of the module operation
|
||||
self.results = dict(
|
||||
changed=False,
|
||||
ansible_info=dict(azure_dnszones=[])
|
||||
)
|
||||
|
||||
self.name = None
|
||||
self.resource_group = None
|
||||
self.tags = None
|
||||
|
||||
super(AzureRMDNSZoneInfo, self).__init__(self.module_arg_spec)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
is_old_facts = self.module._name == 'azure_rm_dnszone_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_dnszone_facts' module has been renamed to 'azure_rm_dnszone_info'", version='2.13')
|
||||
|
||||
for key in self.module_arg_spec:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
if self.name and not self.resource_group:
|
||||
self.fail("Parameter error: resource group required when filtering by name.")
|
||||
|
||||
results = []
|
||||
# list the conditions and what to return based on user input
|
||||
if self.name is not None:
|
||||
# if there is a name, facts about that specific zone
|
||||
results = self.get_item()
|
||||
elif self.resource_group:
|
||||
# all the zones listed in that specific resource group
|
||||
results = self.list_resource_group()
|
||||
else:
|
||||
# all the zones in a subscription
|
||||
results = self.list_items()
|
||||
|
||||
self.results['ansible_info']['azure_dnszones'] = self.serialize_items(results)
|
||||
self.results['dnszones'] = self.curated_items(results)
|
||||
|
||||
return self.results
|
||||
|
||||
def get_item(self):
|
||||
self.log('Get properties for {0}'.format(self.name))
|
||||
item = None
|
||||
results = []
|
||||
# get specific zone
|
||||
try:
|
||||
item = self.dns_client.zones.get(self.resource_group, self.name)
|
||||
except CloudError:
|
||||
pass
|
||||
|
||||
# serialize result
|
||||
if item and self.has_tags(item.tags, self.tags):
|
||||
results = [item]
|
||||
return results
|
||||
|
||||
def list_resource_group(self):
|
||||
self.log('List items for resource group')
|
||||
try:
|
||||
response = self.dns_client.zones.list_by_resource_group(self.resource_group)
|
||||
except AzureHttpError as exc:
|
||||
self.fail("Failed to list for resource group {0} - {1}".format(self.resource_group, str(exc)))
|
||||
|
||||
results = []
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(item)
|
||||
return results
|
||||
|
||||
def list_items(self):
|
||||
self.log('List all items')
|
||||
try:
|
||||
response = self.dns_client.zones.list()
|
||||
except AzureHttpError as exc:
|
||||
self.fail("Failed to list all items - {0}".format(str(exc)))
|
||||
|
||||
results = []
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(item)
|
||||
return results
|
||||
|
||||
def serialize_items(self, raws):
|
||||
return [self.serialize_obj(item, AZURE_OBJECT_CLASS) for item in raws] if raws else []
|
||||
|
||||
def curated_items(self, raws):
|
||||
return [self.zone_to_dict(item) for item in raws] if raws else []
|
||||
|
||||
def zone_to_dict(self, zone):
|
||||
return dict(
|
||||
id=zone.id,
|
||||
name=zone.name,
|
||||
number_of_record_sets=zone.number_of_record_sets,
|
||||
max_number_of_record_sets=zone.max_number_of_record_sets,
|
||||
name_servers=zone.name_servers,
|
||||
tags=zone.tags,
|
||||
type=zone.zone_type.value.lower(),
|
||||
registration_virtual_networks=[to_native(x.id) for x in zone.registration_virtual_networks] if zone.registration_virtual_networks else None,
|
||||
resolution_virtual_networks=[to_native(x.id) for x in zone.resolution_virtual_networks] if zone.resolution_virtual_networks else None
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMDNSZoneInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,421 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2016, Thomas Stringer <tomstr@microsoft.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_functionapp
|
||||
version_added: "2.4"
|
||||
short_description: Manage Azure Function Apps
|
||||
description:
|
||||
- Create, update or delete an Azure Function App.
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- Name of resource group.
|
||||
required: true
|
||||
aliases:
|
||||
- resource_group_name
|
||||
name:
|
||||
description:
|
||||
- Name of the Azure Function App.
|
||||
required: true
|
||||
location:
|
||||
description:
|
||||
- Valid Azure location. Defaults to location of the resource group.
|
||||
plan:
|
||||
description:
|
||||
- App service plan.
|
||||
- It can be name of existing app service plan in same resource group as function app.
|
||||
- It can be resource id of existing app service plan.
|
||||
- Resource id. For example /subscriptions/<subs_id>/resourceGroups/<resource_group>/providers/Microsoft.Web/serverFarms/<plan_name>.
|
||||
- It can be a dict which contains C(name), C(resource_group).
|
||||
- C(name). Name of app service plan.
|
||||
- C(resource_group). Resource group name of app service plan.
|
||||
version_added: "2.8"
|
||||
container_settings:
|
||||
description: Web app container settings.
|
||||
suboptions:
|
||||
name:
|
||||
description:
|
||||
- Name of container. For example "imagename:tag".
|
||||
registry_server_url:
|
||||
description:
|
||||
- Container registry server url. For example C(mydockerregistry.io).
|
||||
registry_server_user:
|
||||
description:
|
||||
- The container registry server user name.
|
||||
registry_server_password:
|
||||
description:
|
||||
- The container registry server password.
|
||||
version_added: "2.8"
|
||||
storage_account:
|
||||
description:
|
||||
- Name of the storage account to use.
|
||||
required: true
|
||||
aliases:
|
||||
- storage
|
||||
- storage_account_name
|
||||
app_settings:
|
||||
description:
|
||||
- Dictionary containing application settings.
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the Function App. Use C(present) to create or update a Function App and C(absent) to delete.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Thomas Stringer (@trstringer)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a function app
|
||||
azure_rm_functionapp:
|
||||
resource_group: myResourceGroup
|
||||
name: myFunctionApp
|
||||
storage_account: myStorageAccount
|
||||
|
||||
- name: Create a function app with app settings
|
||||
azure_rm_functionapp:
|
||||
resource_group: myResourceGroup
|
||||
name: myFunctionApp
|
||||
storage_account: myStorageAccount
|
||||
app_settings:
|
||||
setting1: value1
|
||||
setting2: value2
|
||||
|
||||
- name: Create container based function app
|
||||
azure_rm_functionapp:
|
||||
resource_group: myResourceGroup
|
||||
name: myFunctionApp
|
||||
storage_account: myStorageAccount
|
||||
plan:
|
||||
resource_group: myResourceGroup
|
||||
name: myAppPlan
|
||||
container_settings:
|
||||
name: httpd
|
||||
registry_server_url: index.docker.io
|
||||
|
||||
- name: Delete a function app
|
||||
azure_rm_functionapp:
|
||||
resource_group: myResourceGroup
|
||||
name: myFunctionApp
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
state:
|
||||
description:
|
||||
- Current state of the Azure Function App.
|
||||
returned: success
|
||||
type: dict
|
||||
example:
|
||||
id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/myFunctionApp
|
||||
name: myfunctionapp
|
||||
kind: functionapp
|
||||
location: East US
|
||||
type: Microsoft.Web/sites
|
||||
state: Running
|
||||
host_names:
|
||||
- myfunctionapp.azurewebsites.net
|
||||
repository_site_name: myfunctionapp
|
||||
usage_state: Normal
|
||||
enabled: true
|
||||
enabled_host_names:
|
||||
- myfunctionapp.azurewebsites.net
|
||||
- myfunctionapp.scm.azurewebsites.net
|
||||
availability_state: Normal
|
||||
host_name_ssl_states:
|
||||
- name: myfunctionapp.azurewebsites.net
|
||||
ssl_state: Disabled
|
||||
host_type: Standard
|
||||
- name: myfunctionapp.scm.azurewebsites.net
|
||||
ssl_state: Disabled
|
||||
host_type: Repository
|
||||
server_farm_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/serverfarms/EastUSPlan
|
||||
reserved: false
|
||||
last_modified_time_utc: 2017-08-22T18:54:01.190Z
|
||||
scm_site_also_stopped: false
|
||||
client_affinity_enabled: true
|
||||
client_cert_enabled: false
|
||||
host_names_disabled: false
|
||||
outbound_ip_addresses: ............
|
||||
container_size: 1536
|
||||
daily_memory_time_quota: 0
|
||||
resource_group: myResourceGroup
|
||||
default_host_name: myfunctionapp.azurewebsites.net
|
||||
''' # NOQA
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.mgmt.web.models import (
|
||||
site_config, app_service_plan, Site, SiteConfig, NameValuePair, SiteSourceControl,
|
||||
AppServicePlan, SkuDescription
|
||||
)
|
||||
from azure.mgmt.resource.resources import ResourceManagementClient
|
||||
from msrest.polling import LROPoller
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
container_settings_spec = dict(
|
||||
name=dict(type='str', required=True),
|
||||
registry_server_url=dict(type='str'),
|
||||
registry_server_user=dict(type='str'),
|
||||
registry_server_password=dict(type='str', no_log=True)
|
||||
)
|
||||
|
||||
|
||||
class AzureRMFunctionApp(AzureRMModuleBase):
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(type='str', required=True, aliases=['resource_group_name']),
|
||||
name=dict(type='str', required=True),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent']),
|
||||
location=dict(type='str'),
|
||||
storage_account=dict(
|
||||
type='str',
|
||||
aliases=['storage', 'storage_account_name']
|
||||
),
|
||||
app_settings=dict(type='dict'),
|
||||
plan=dict(
|
||||
type='raw'
|
||||
),
|
||||
container_settings=dict(
|
||||
type='dict',
|
||||
options=container_settings_spec
|
||||
)
|
||||
)
|
||||
|
||||
self.results = dict(
|
||||
changed=False,
|
||||
state=dict()
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.state = None
|
||||
self.location = None
|
||||
self.storage_account = None
|
||||
self.app_settings = None
|
||||
self.plan = None
|
||||
self.container_settings = None
|
||||
|
||||
required_if = [('state', 'present', ['storage_account'])]
|
||||
|
||||
super(AzureRMFunctionApp, self).__init__(
|
||||
self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
required_if=required_if
|
||||
)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
for key in self.module_arg_spec:
|
||||
setattr(self, key, kwargs[key])
|
||||
if self.app_settings is None:
|
||||
self.app_settings = dict()
|
||||
|
||||
try:
|
||||
resource_group = self.rm_client.resource_groups.get(self.resource_group)
|
||||
except CloudError:
|
||||
self.fail('Unable to retrieve resource group')
|
||||
|
||||
self.location = self.location or resource_group.location
|
||||
|
||||
try:
|
||||
function_app = self.web_client.web_apps.get(
|
||||
resource_group_name=self.resource_group,
|
||||
name=self.name
|
||||
)
|
||||
# Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError
|
||||
exists = function_app is not None
|
||||
except CloudError as exc:
|
||||
exists = False
|
||||
|
||||
if self.state == 'absent':
|
||||
if exists:
|
||||
if self.check_mode:
|
||||
self.results['changed'] = True
|
||||
return self.results
|
||||
try:
|
||||
self.web_client.web_apps.delete(
|
||||
resource_group_name=self.resource_group,
|
||||
name=self.name
|
||||
)
|
||||
self.results['changed'] = True
|
||||
except CloudError as exc:
|
||||
self.fail('Failure while deleting web app: {0}'.format(exc))
|
||||
else:
|
||||
self.results['changed'] = False
|
||||
else:
|
||||
kind = 'functionapp'
|
||||
linux_fx_version = None
|
||||
if self.container_settings and self.container_settings.get('name'):
|
||||
kind = 'functionapp,linux,container'
|
||||
linux_fx_version = 'DOCKER|'
|
||||
if self.container_settings.get('registry_server_url'):
|
||||
self.app_settings['DOCKER_REGISTRY_SERVER_URL'] = 'https://' + self.container_settings['registry_server_url']
|
||||
linux_fx_version += self.container_settings['registry_server_url'] + '/'
|
||||
linux_fx_version += self.container_settings['name']
|
||||
if self.container_settings.get('registry_server_user'):
|
||||
self.app_settings['DOCKER_REGISTRY_SERVER_USERNAME'] = self.container_settings.get('registry_server_user')
|
||||
|
||||
if self.container_settings.get('registry_server_password'):
|
||||
self.app_settings['DOCKER_REGISTRY_SERVER_PASSWORD'] = self.container_settings.get('registry_server_password')
|
||||
|
||||
if not self.plan and function_app:
|
||||
self.plan = function_app.server_farm_id
|
||||
|
||||
if not exists:
|
||||
function_app = Site(
|
||||
location=self.location,
|
||||
kind=kind,
|
||||
site_config=SiteConfig(
|
||||
app_settings=self.aggregated_app_settings(),
|
||||
scm_type='LocalGit'
|
||||
)
|
||||
)
|
||||
self.results['changed'] = True
|
||||
else:
|
||||
self.results['changed'], function_app = self.update(function_app)
|
||||
|
||||
# get app service plan
|
||||
if self.plan:
|
||||
if isinstance(self.plan, dict):
|
||||
self.plan = "/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Web/serverfarms/{2}".format(
|
||||
self.subscription_id,
|
||||
self.plan.get('resource_group', self.resource_group),
|
||||
self.plan.get('name')
|
||||
)
|
||||
function_app.server_farm_id = self.plan
|
||||
|
||||
# set linux fx version
|
||||
if linux_fx_version:
|
||||
function_app.site_config.linux_fx_version = linux_fx_version
|
||||
|
||||
if self.check_mode:
|
||||
self.results['state'] = function_app.as_dict()
|
||||
elif self.results['changed']:
|
||||
try:
|
||||
new_function_app = self.web_client.web_apps.create_or_update(
|
||||
resource_group_name=self.resource_group,
|
||||
name=self.name,
|
||||
site_envelope=function_app
|
||||
).result()
|
||||
self.results['state'] = new_function_app.as_dict()
|
||||
except CloudError as exc:
|
||||
self.fail('Error creating or updating web app: {0}'.format(exc))
|
||||
|
||||
return self.results
|
||||
|
||||
def update(self, source_function_app):
|
||||
"""Update the Site object if there are any changes"""
|
||||
|
||||
source_app_settings = self.web_client.web_apps.list_application_settings(
|
||||
resource_group_name=self.resource_group,
|
||||
name=self.name
|
||||
)
|
||||
|
||||
changed, target_app_settings = self.update_app_settings(source_app_settings.properties)
|
||||
|
||||
source_function_app.site_config = SiteConfig(
|
||||
app_settings=target_app_settings,
|
||||
scm_type='LocalGit'
|
||||
)
|
||||
|
||||
return changed, source_function_app
|
||||
|
||||
def update_app_settings(self, source_app_settings):
|
||||
"""Update app settings"""
|
||||
|
||||
target_app_settings = self.aggregated_app_settings()
|
||||
target_app_settings_dict = dict([(i.name, i.value) for i in target_app_settings])
|
||||
return target_app_settings_dict != source_app_settings, target_app_settings
|
||||
|
||||
def necessary_functionapp_settings(self):
|
||||
"""Construct the necessary app settings required for an Azure Function App"""
|
||||
|
||||
function_app_settings = []
|
||||
|
||||
if self.container_settings is None:
|
||||
for key in ['AzureWebJobsStorage', 'WEBSITE_CONTENTAZUREFILECONNECTIONSTRING', 'AzureWebJobsDashboard']:
|
||||
function_app_settings.append(NameValuePair(name=key, value=self.storage_connection_string))
|
||||
function_app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~1'))
|
||||
function_app_settings.append(NameValuePair(name='WEBSITE_NODE_DEFAULT_VERSION', value='6.5.0'))
|
||||
function_app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=self.name))
|
||||
else:
|
||||
function_app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~2'))
|
||||
function_app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE', value=False))
|
||||
function_app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=self.storage_connection_string))
|
||||
|
||||
return function_app_settings
|
||||
|
||||
def aggregated_app_settings(self):
|
||||
"""Combine both system and user app settings"""
|
||||
|
||||
function_app_settings = self.necessary_functionapp_settings()
|
||||
for app_setting_key in self.app_settings:
|
||||
found_setting = None
|
||||
for s in function_app_settings:
|
||||
if s.name == app_setting_key:
|
||||
found_setting = s
|
||||
break
|
||||
if found_setting:
|
||||
found_setting.value = self.app_settings[app_setting_key]
|
||||
else:
|
||||
function_app_settings.append(NameValuePair(
|
||||
name=app_setting_key,
|
||||
value=self.app_settings[app_setting_key]
|
||||
))
|
||||
return function_app_settings
|
||||
|
||||
@property
|
||||
def storage_connection_string(self):
|
||||
"""Construct the storage account connection string"""
|
||||
|
||||
return 'DefaultEndpointsProtocol=https;AccountName={0};AccountKey={1}'.format(
|
||||
self.storage_account,
|
||||
self.storage_key
|
||||
)
|
||||
|
||||
@property
|
||||
def storage_key(self):
|
||||
"""Retrieve the storage account key"""
|
||||
|
||||
return self.storage_client.storage_accounts.list_keys(
|
||||
resource_group_name=self.resource_group,
|
||||
account_name=self.storage_account
|
||||
).keys[0].value
|
||||
|
||||
|
||||
def main():
|
||||
"""Main function execution"""
|
||||
|
||||
AzureRMFunctionApp()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,206 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2016 Thomas Stringer, <tomstr@microsoft.com>
|
||||
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_functionapp_info
|
||||
version_added: "2.9"
|
||||
short_description: Get Azure Function App facts
|
||||
description:
|
||||
- Get facts for one Azure Function App or all Function Apps within a resource group.
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Only show results for a specific Function App.
|
||||
resource_group:
|
||||
description:
|
||||
- Limit results to a resource group. Required when filtering by name.
|
||||
aliases:
|
||||
- resource_group_name
|
||||
tags:
|
||||
description:
|
||||
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Thomas Stringer (@trstringer)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get facts for one Function App
|
||||
azure_rm_functionapp_info:
|
||||
resource_group: myResourceGroup
|
||||
name: myfunctionapp
|
||||
|
||||
- name: Get facts for all Function Apps in a resource group
|
||||
azure_rm_functionapp_info:
|
||||
resource_group: myResourceGroup
|
||||
|
||||
- name: Get facts for all Function Apps by tags
|
||||
azure_rm_functionapp_info:
|
||||
tags:
|
||||
- testing
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
azure_functionapps:
|
||||
description:
|
||||
- List of Azure Function Apps dicts.
|
||||
returned: always
|
||||
type: list
|
||||
example:
|
||||
id: /subscriptions/.../resourceGroups/ansible-rg/providers/Microsoft.Web/sites/myfunctionapp
|
||||
name: myfunctionapp
|
||||
kind: functionapp
|
||||
location: East US
|
||||
type: Microsoft.Web/sites
|
||||
state: Running
|
||||
host_names:
|
||||
- myfunctionapp.azurewebsites.net
|
||||
repository_site_name: myfunctionapp
|
||||
usage_state: Normal
|
||||
enabled: true
|
||||
enabled_host_names:
|
||||
- myfunctionapp.azurewebsites.net
|
||||
- myfunctionapp.scm.azurewebsites.net
|
||||
availability_state: Normal
|
||||
host_name_ssl_states:
|
||||
- name: myfunctionapp.azurewebsites.net
|
||||
ssl_state: Disabled
|
||||
host_type: Standard
|
||||
- name: myfunctionapp.scm.azurewebsites.net
|
||||
ssl_state: Disabled
|
||||
host_type: Repository
|
||||
server_farm_id: /subscriptions/.../resourceGroups/ansible-rg/providers/Microsoft.Web/serverfarms/EastUSPlan
|
||||
reserved: false
|
||||
last_modified_time_utc: 2017-08-22T18:54:01.190Z
|
||||
scm_site_also_stopped: false
|
||||
client_affinity_enabled: true
|
||||
client_cert_enabled: false
|
||||
host_names_disabled: false
|
||||
outbound_ip_addresses: ............
|
||||
container_size: 1536
|
||||
daily_memory_time_quota: 0
|
||||
resource_group: myResourceGroup
|
||||
default_host_name: myfunctionapp.azurewebsites.net
|
||||
'''
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except Exception:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
|
||||
class AzureRMFunctionAppInfo(AzureRMModuleBase):
|
||||
def __init__(self):
|
||||
|
||||
self.module_arg_spec = dict(
|
||||
name=dict(type='str'),
|
||||
resource_group=dict(type='str', aliases=['resource_group_name']),
|
||||
tags=dict(type='list'),
|
||||
)
|
||||
|
||||
self.results = dict(
|
||||
changed=False,
|
||||
ansible_info=dict(azure_functionapps=[])
|
||||
)
|
||||
|
||||
self.name = None
|
||||
self.resource_group = None
|
||||
self.tags = None
|
||||
|
||||
super(AzureRMFunctionAppInfo, self).__init__(
|
||||
self.module_arg_spec,
|
||||
supports_tags=False,
|
||||
facts_module=True
|
||||
)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
is_old_facts = self.module._name == 'azure_rm_functionapp_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_functionapp_facts' module has been renamed to 'azure_rm_functionapp_info'", version='2.13')
|
||||
|
||||
for key in self.module_arg_spec:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
if self.name and not self.resource_group:
|
||||
self.fail("Parameter error: resource group required when filtering by name.")
|
||||
|
||||
if self.name:
|
||||
self.results['ansible_info']['azure_functionapps'] = self.get_functionapp()
|
||||
elif self.resource_group:
|
||||
self.results['ansible_info']['azure_functionapps'] = self.list_resource_group()
|
||||
else:
|
||||
self.results['ansible_info']['azure_functionapps'] = self.list_all()
|
||||
|
||||
return self.results
|
||||
|
||||
def get_functionapp(self):
|
||||
self.log('Get properties for Function App {0}'.format(self.name))
|
||||
function_app = None
|
||||
result = []
|
||||
|
||||
try:
|
||||
function_app = self.web_client.web_apps.get(
|
||||
self.resource_group,
|
||||
self.name
|
||||
)
|
||||
except CloudError:
|
||||
pass
|
||||
|
||||
if function_app and self.has_tags(function_app.tags, self.tags):
|
||||
result = function_app.as_dict()
|
||||
|
||||
return [result]
|
||||
|
||||
def list_resource_group(self):
|
||||
self.log('List items')
|
||||
try:
|
||||
response = self.web_client.web_apps.list_by_resource_group(self.resource_group)
|
||||
except Exception as exc:
|
||||
self.fail("Error listing for resource group {0} - {1}".format(self.resource_group, str(exc)))
|
||||
|
||||
results = []
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(item.as_dict())
|
||||
return results
|
||||
|
||||
def list_all(self):
|
||||
self.log('List all items')
|
||||
try:
|
||||
response = self.web_client.web_apps.list_by_resource_group(self.resource_group)
|
||||
except Exception as exc:
|
||||
self.fail("Error listing all items - {0}".format(str(exc)))
|
||||
|
||||
results = []
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(item.as_dict())
|
||||
return results
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMFunctionAppInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,308 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_gallery
|
||||
version_added: '2.9'
|
||||
short_description: Manage Azure Shared Image Gallery instance
|
||||
description:
|
||||
- Create, update and delete instance of Azure Shared Image Gallery (SIG).
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
required: true
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- The name of the Shared Image Gallery.
|
||||
- Valid names consist of less than 80 alphanumeric characters, underscores and periods.
|
||||
required: true
|
||||
type: str
|
||||
location:
|
||||
description:
|
||||
- Resource location.
|
||||
type: str
|
||||
description:
|
||||
description:
|
||||
- The description of this Shared Image Gallery resource. This property is updatable.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the Gallery.
|
||||
- Use C(present) to create or update an Gallery and C(absent) to delete it.
|
||||
default: present
|
||||
type: str
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create or update a simple gallery.
|
||||
azure_rm_gallery:
|
||||
resource_group: myResourceGroup
|
||||
name: myGallery1283
|
||||
location: West US
|
||||
description: This is the gallery description.
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- Resource ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/galleries/myGallery1283"
|
||||
'''
|
||||
|
||||
import time
|
||||
import json
|
||||
import re
|
||||
from ansible.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
|
||||
from ansible.module_utils.azure_rm_common_rest import GenericRestClient
|
||||
from copy import deepcopy
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class Actions:
|
||||
NoAction, Create, Update, Delete = range(4)
|
||||
|
||||
|
||||
class AzureRMGalleries(AzureRMModuleBaseExt):
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
updatable=False,
|
||||
disposition='resourceGroupName',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str',
|
||||
updatable=False,
|
||||
disposition='galleryName',
|
||||
required=True
|
||||
),
|
||||
location=dict(
|
||||
type='str',
|
||||
updatable=False,
|
||||
disposition='/'
|
||||
),
|
||||
description=dict(
|
||||
type='str',
|
||||
disposition='/properties/*'
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
)
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.gallery = None
|
||||
|
||||
self.results = dict(changed=False)
|
||||
self.mgmt_client = None
|
||||
self.state = None
|
||||
self.url = None
|
||||
self.status_code = [200, 201, 202]
|
||||
self.to_do = Actions.NoAction
|
||||
|
||||
self.body = {}
|
||||
self.query_parameters = {}
|
||||
self.query_parameters['api-version'] = '2019-07-01'
|
||||
self.header_parameters = {}
|
||||
self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
|
||||
|
||||
super(AzureRMGalleries, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=True)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
for key in list(self.module_arg_spec.keys()):
|
||||
if hasattr(self, key):
|
||||
setattr(self, key, kwargs[key])
|
||||
elif kwargs[key] is not None:
|
||||
self.body[key] = kwargs[key]
|
||||
|
||||
self.inflate_parameters(self.module_arg_spec, self.body, 0)
|
||||
|
||||
old_response = None
|
||||
response = None
|
||||
|
||||
self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
|
||||
resource_group = self.get_resource_group(self.resource_group)
|
||||
|
||||
if 'location' not in self.body:
|
||||
self.body['location'] = resource_group.location
|
||||
|
||||
self.url = ('/subscriptions' +
|
||||
'/{{ subscription_id }}' +
|
||||
'/resourceGroups' +
|
||||
'/{{ resource_group }}' +
|
||||
'/providers' +
|
||||
'/Microsoft.Compute' +
|
||||
'/galleries' +
|
||||
'/{{ gallery_name }}')
|
||||
self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
|
||||
self.url = self.url.replace('{{ resource_group }}', self.resource_group)
|
||||
self.url = self.url.replace('{{ gallery_name }}', self.name)
|
||||
|
||||
old_response = self.get_resource()
|
||||
|
||||
if not old_response:
|
||||
self.log("Gallery instance doesn't exist")
|
||||
|
||||
if self.state == 'absent':
|
||||
self.log("Old instance didn't exist")
|
||||
else:
|
||||
self.to_do = Actions.Create
|
||||
else:
|
||||
self.log('Gallery instance already exists')
|
||||
|
||||
if self.state == 'absent':
|
||||
self.to_do = Actions.Delete
|
||||
else:
|
||||
modifiers = {}
|
||||
self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
|
||||
self.results['modifiers'] = modifiers
|
||||
self.results['compare'] = []
|
||||
if not self.default_compare(modifiers, self.body, old_response, '', self.results):
|
||||
self.to_do = Actions.Update
|
||||
self.body['properties'].pop('identifier', None)
|
||||
|
||||
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
|
||||
self.log('Need to Create / Update the Gallery instance')
|
||||
|
||||
if self.check_mode:
|
||||
self.results['changed'] = True
|
||||
return self.results
|
||||
|
||||
response = self.create_update_resource()
|
||||
|
||||
# if not old_response:
|
||||
self.results['changed'] = True
|
||||
# else:
|
||||
# self.results['changed'] = old_response.__ne__(response)
|
||||
self.log('Creation / Update done')
|
||||
elif self.to_do == Actions.Delete:
|
||||
self.log('Gallery instance deleted')
|
||||
self.results['changed'] = True
|
||||
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
self.delete_resource()
|
||||
|
||||
# make sure instance is actually deleted, for some Azure resources, instance is hanging around
|
||||
# for some time after deletion -- this should be really fixed in Azure
|
||||
while self.get_resource():
|
||||
time.sleep(20)
|
||||
else:
|
||||
self.log('Gallery instance unchanged')
|
||||
self.results['changed'] = False
|
||||
response = old_response
|
||||
|
||||
if response:
|
||||
self.results["id"] = response["id"]
|
||||
|
||||
return self.results
|
||||
|
||||
def create_update_resource(self):
|
||||
# self.log('Creating / Updating the Gallery instance {0}'.format(self.))
|
||||
|
||||
try:
|
||||
response = self.mgmt_client.query(self.url,
|
||||
'PUT',
|
||||
self.query_parameters,
|
||||
self.header_parameters,
|
||||
self.body,
|
||||
self.status_code,
|
||||
600,
|
||||
30)
|
||||
except CloudError as exc:
|
||||
self.log('Error attempting to create the Gallery instance.')
|
||||
self.fail('Error creating the Gallery instance: {0}'.format(str(exc)))
|
||||
|
||||
try:
|
||||
response = json.loads(response.text)
|
||||
except Exception:
|
||||
response = {'text': response.text}
|
||||
|
||||
return response
|
||||
|
||||
def delete_resource(self):
|
||||
# self.log('Deleting the Gallery instance {0}'.format(self.))
|
||||
try:
|
||||
response = self.mgmt_client.query(self.url,
|
||||
'DELETE',
|
||||
self.query_parameters,
|
||||
self.header_parameters,
|
||||
None,
|
||||
self.status_code,
|
||||
600,
|
||||
30)
|
||||
except CloudError as e:
|
||||
self.log('Error attempting to delete the Gallery instance.')
|
||||
self.fail('Error deleting the Gallery instance: {0}'.format(str(e)))
|
||||
|
||||
return True
|
||||
|
||||
def get_resource(self):
|
||||
# self.log('Checking if the Gallery instance {0} is present'.format(self.))
|
||||
found = False
|
||||
try:
|
||||
response = self.mgmt_client.query(self.url,
|
||||
'GET',
|
||||
self.query_parameters,
|
||||
self.header_parameters,
|
||||
None,
|
||||
self.status_code,
|
||||
600,
|
||||
30)
|
||||
response = json.loads(response.text)
|
||||
found = True
|
||||
self.log("Response : {0}".format(response))
|
||||
# self.log("AzureFirewall instance : {0} found".format(response.name))
|
||||
except CloudError as e:
|
||||
self.log('Did not find the AzureFirewall instance.')
|
||||
if found is True:
|
||||
return response
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMGalleries()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,263 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Liu Qingyi, (@smile37773)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_gallery_info
|
||||
version_added: '2.9'
|
||||
short_description: Get Azure Shared Image Gallery info
|
||||
description:
|
||||
- Get info of Azure Shared Image Gallery.
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- Resource name
|
||||
type: str
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
author:
|
||||
- Liu Qingyi (@smile37773)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: List galleries in a subscription.
|
||||
azure_rm_gallery_info:
|
||||
- name: List galleries in a resource group.
|
||||
azure_rm_gallery_info:
|
||||
resource_group: myResourceGroup
|
||||
- name: Get a gallery.
|
||||
azure_rm_gallery_info:
|
||||
resource_group: myResourceGroup
|
||||
name: myGallery
|
||||
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
galleries:
|
||||
description:
|
||||
- A list of dict results where the key is the name of the gallery and the values are the info for that gallery.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- Resource ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/galleries/myGallery"
|
||||
name:
|
||||
description:
|
||||
- Resource name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "myGallery"
|
||||
location:
|
||||
description:
|
||||
- Resource location.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "eastus"
|
||||
tags:
|
||||
description:
|
||||
- Resource tags.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: { "tag": "value" }
|
||||
description:
|
||||
description:
|
||||
- This is the gallery description.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "This is the gallery description."
|
||||
provisioning_state:
|
||||
description:
|
||||
- The current state of the gallery.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "Succeeded"
|
||||
|
||||
'''
|
||||
|
||||
import time
|
||||
import json
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
from ansible.module_utils.azure_rm_common_rest import GenericRestClient
|
||||
from copy import deepcopy
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except Exception:
|
||||
# handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMGalleriesInfo(AzureRMModuleBase):
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str'
|
||||
),
|
||||
name=dict(
|
||||
type='str'
|
||||
)
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
|
||||
self.results = dict(changed=False)
|
||||
self.mgmt_client = None
|
||||
self.state = None
|
||||
self.url = None
|
||||
self.status_code = [200]
|
||||
|
||||
self.query_parameters = {}
|
||||
self.query_parameters['api-version'] = '2019-03-01'
|
||||
self.header_parameters = {}
|
||||
self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
|
||||
|
||||
self.mgmt_client = None
|
||||
super(AzureRMGalleriesInfo, self).__init__(self.module_arg_spec, supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
for key in self.module_arg_spec:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
|
||||
if (self.resource_group is not None and self.name is not None):
|
||||
# self.results['galleries'] = self.format_item(self.get())
|
||||
self.results['galleries'] = self.get()
|
||||
elif (self.resource_group is not None):
|
||||
# self.results['galleries'] = self.format_item(self.listbyresourcegroup())
|
||||
self.results['galleries'] = self.listbyresourcegroup()
|
||||
else:
|
||||
# self.results['galleries'] = [self.format_item(self.list())]
|
||||
self.results['galleries'] = self.list()
|
||||
return self.results
|
||||
|
||||
def get(self):
|
||||
response = None
|
||||
results = {}
|
||||
# prepare url
|
||||
self.url = ('/subscriptions' +
|
||||
'/{{ subscription_id }}' +
|
||||
'/resourceGroups' +
|
||||
'/{{ resource_group }}' +
|
||||
'/providers' +
|
||||
'/Microsoft.Compute' +
|
||||
'/galleries' +
|
||||
'/{{ gallery_name }}')
|
||||
self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
|
||||
self.url = self.url.replace('{{ resource_group }}', self.resource_group)
|
||||
self.url = self.url.replace('{{ gallery_name }}', self.name)
|
||||
|
||||
try:
|
||||
response = self.mgmt_client.query(self.url,
|
||||
'GET',
|
||||
self.query_parameters,
|
||||
self.header_parameters,
|
||||
None,
|
||||
self.status_code,
|
||||
600,
|
||||
30)
|
||||
results = json.loads(response.text)
|
||||
# self.log('Response : {0}'.format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
|
||||
|
||||
return self.format_item(results)
|
||||
|
||||
def listbyresourcegroup(self):
|
||||
response = None
|
||||
results = {}
|
||||
# prepare url
|
||||
self.url = ('/subscriptions' +
|
||||
'/{{ subscription_id }}' +
|
||||
'/resourceGroups' +
|
||||
'/{{ resource_group }}' +
|
||||
'/providers' +
|
||||
'/Microsoft.Compute' +
|
||||
'/galleries')
|
||||
self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
|
||||
self.url = self.url.replace('{{ resource_group }}', self.resource_group)
|
||||
|
||||
try:
|
||||
response = self.mgmt_client.query(self.url,
|
||||
'GET',
|
||||
self.query_parameters,
|
||||
self.header_parameters,
|
||||
None,
|
||||
self.status_code,
|
||||
600,
|
||||
30)
|
||||
results = json.loads(response.text)
|
||||
# self.log('Response : {0}'.format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
|
||||
|
||||
return [self.format_item(x) for x in results['value']] if results['value'] else []
|
||||
|
||||
def list(self):
|
||||
response = None
|
||||
results = {}
|
||||
# prepare url
|
||||
self.url = ('/subscriptions' +
|
||||
'/{{ subscription_id }}' +
|
||||
'/providers' +
|
||||
'/Microsoft.Compute' +
|
||||
'/galleries')
|
||||
self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
|
||||
|
||||
try:
|
||||
response = self.mgmt_client.query(self.url,
|
||||
'GET',
|
||||
self.query_parameters,
|
||||
self.header_parameters,
|
||||
None,
|
||||
self.status_code,
|
||||
600,
|
||||
30)
|
||||
results = json.loads(response.text)
|
||||
# self.log('Response : {0}'.format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
|
||||
|
||||
return [self.format_item(x) for x in results['value']] if results['value'] else []
|
||||
|
||||
def format_item(self, item):
|
||||
d = {
|
||||
'id': item['id'],
|
||||
'name': item['name'],
|
||||
'location': item['location'],
|
||||
'tags': item.get('tags'),
|
||||
'description': item['properties']['description'],
|
||||
'provisioning_state': item['properties']['provisioningState']
|
||||
}
|
||||
return d
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMGalleriesInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,544 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_galleryimage
|
||||
version_added: '2.9'
|
||||
short_description: Manage Azure SIG Image instance
|
||||
description:
|
||||
- Create, update and delete instance of Azure SIG Image.
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
required: true
|
||||
type: str
|
||||
gallery_name:
|
||||
description:
|
||||
- The name of the Shared Image Gallery in which the Image Definition is to be created.
|
||||
required: true
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- The name of the gallery Image Definition to be created or updated.
|
||||
- The allowed characters are alphabets and numbers with dots, dashes, and periods allowed in the middle.
|
||||
- The maximum length is 80 characters.
|
||||
required: true
|
||||
type: str
|
||||
location:
|
||||
description:
|
||||
- Resource location.
|
||||
type: str
|
||||
description:
|
||||
description:
|
||||
- The description of this gallery Image Definition resource. This property is updatable.
|
||||
type: str
|
||||
eula:
|
||||
description:
|
||||
- The Eula agreement for the gallery Image Definition.
|
||||
type: str
|
||||
privacy_statement_uri:
|
||||
description:
|
||||
- The privacy statement uri.
|
||||
type: str
|
||||
release_note_uri:
|
||||
description:
|
||||
- The release note uri.
|
||||
type: str
|
||||
os_type:
|
||||
description:
|
||||
- This property allows you to specify the type of the OS that is included in the disk when creating a VM from a managed image.
|
||||
choices:
|
||||
- windows
|
||||
- linux
|
||||
required: true
|
||||
type: str
|
||||
os_state:
|
||||
description:
|
||||
- The allowed values for OS State are C(generalized).
|
||||
choices:
|
||||
- generalized
|
||||
- specialized
|
||||
required: true
|
||||
type: str
|
||||
end_of_life_date:
|
||||
description:
|
||||
- The end of life date of the gallery Image Definition.
|
||||
- This property can be used for decommissioning purposes.
|
||||
- This property is updatable.
|
||||
- Format should be according to ISO-8601, for instance "2019-06-26".
|
||||
type: str
|
||||
identifier:
|
||||
description:
|
||||
- Image identifier.
|
||||
required: true
|
||||
type: dict
|
||||
suboptions:
|
||||
publisher:
|
||||
description:
|
||||
- The name of the gallery Image Definition publisher.
|
||||
required: true
|
||||
type: str
|
||||
offer:
|
||||
description:
|
||||
- The name of the gallery Image Definition offer.
|
||||
required: true
|
||||
type: str
|
||||
sku:
|
||||
description:
|
||||
- The name of the gallery Image Definition SKU.
|
||||
required: true
|
||||
type: str
|
||||
recommended:
|
||||
description:
|
||||
- Recommended parameter values.
|
||||
type: dict
|
||||
suboptions:
|
||||
v_cpus:
|
||||
description:
|
||||
- Number of virtual CPUs.
|
||||
type: dict
|
||||
suboptions:
|
||||
min:
|
||||
description:
|
||||
- The minimum number of the resource.
|
||||
type: int
|
||||
max:
|
||||
description:
|
||||
- The maximum number of the resource.
|
||||
type: int
|
||||
memory:
|
||||
description:
|
||||
- Memory.
|
||||
type: dict
|
||||
suboptions:
|
||||
min:
|
||||
description:
|
||||
- The minimum number of the resource.
|
||||
type: int
|
||||
max:
|
||||
description:
|
||||
- The maximum number of the resource.
|
||||
type: int
|
||||
disallowed:
|
||||
description:
|
||||
- Disallowed parameter values.
|
||||
type: dict
|
||||
suboptions:
|
||||
disk_types:
|
||||
description:
|
||||
- A list of disallowed disk types.
|
||||
type: list
|
||||
purchase_plan:
|
||||
description:
|
||||
- Purchase plan.
|
||||
type: dict
|
||||
suboptions:
|
||||
name:
|
||||
description:
|
||||
- The plan ID.
|
||||
type: str
|
||||
publisher:
|
||||
description:
|
||||
- The publisher ID.
|
||||
type: str
|
||||
product:
|
||||
description:
|
||||
- The product ID.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the GalleryImage.
|
||||
- Use C(present) to create or update an GalleryImage and C(absent) to delete it.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
type: str
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create or update gallery image
|
||||
azure_rm_galleryimage:
|
||||
resource_group: myResourceGroup
|
||||
gallery_name: myGallery1283
|
||||
name: myImage
|
||||
location: West US
|
||||
os_type: linux
|
||||
os_state: generalized
|
||||
identifier:
|
||||
publisher: myPublisherName
|
||||
offer: myOfferName
|
||||
sku: mySkuName
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- Resource ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/galleries/myGalle
|
||||
ry1283/images/myImage"
|
||||
'''
|
||||
|
||||
import time
|
||||
import json
|
||||
import re
|
||||
from ansible.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
|
||||
from ansible.module_utils.azure_rm_common_rest import GenericRestClient
|
||||
from copy import deepcopy
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class Actions:
|
||||
NoAction, Create, Update, Delete = range(4)
|
||||
|
||||
|
||||
class AzureRMGalleryImages(AzureRMModuleBaseExt):
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
updatable=False,
|
||||
disposition='resourceGroupName',
|
||||
required=True
|
||||
),
|
||||
gallery_name=dict(
|
||||
type='str',
|
||||
updatable=False,
|
||||
disposition='galleryName',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str',
|
||||
updatable=False,
|
||||
disposition='galleryImageName',
|
||||
required=True
|
||||
),
|
||||
location=dict(
|
||||
type='str',
|
||||
updatable=False,
|
||||
disposition='/'
|
||||
),
|
||||
description=dict(
|
||||
type='str',
|
||||
disposition='/properties/*'
|
||||
),
|
||||
eula=dict(
|
||||
type='str',
|
||||
disposition='/properties/*'
|
||||
),
|
||||
privacy_statement_uri=dict(
|
||||
type='str',
|
||||
disposition='/properties/privacyStatementUri'
|
||||
),
|
||||
release_note_uri=dict(
|
||||
type='str',
|
||||
disposition='/properties/releaseNoteUri'
|
||||
),
|
||||
os_type=dict(
|
||||
type='str',
|
||||
disposition='/properties/osType',
|
||||
choices=['windows',
|
||||
'linux']
|
||||
),
|
||||
os_state=dict(
|
||||
type='str',
|
||||
disposition='/properties/osState',
|
||||
choices=['generalized',
|
||||
'specialized']
|
||||
),
|
||||
end_of_life_date=dict(
|
||||
type='str',
|
||||
disposition='/properties/endOfLifeDate'
|
||||
),
|
||||
identifier=dict(
|
||||
type='dict',
|
||||
disposition='/properties/*',
|
||||
options=dict(
|
||||
publisher=dict(
|
||||
type='str',
|
||||
required=True,
|
||||
updatable=False
|
||||
),
|
||||
offer=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
sku=dict(
|
||||
type='str',
|
||||
required=True
|
||||
)
|
||||
)
|
||||
),
|
||||
recommended=dict(
|
||||
type='dict',
|
||||
disposition='/properties/*',
|
||||
options=dict(
|
||||
v_cpus=dict(
|
||||
type='dict',
|
||||
disposition='vCPUs',
|
||||
options=dict(
|
||||
min=dict(
|
||||
type='int'
|
||||
),
|
||||
max=dict(
|
||||
type='int'
|
||||
)
|
||||
)
|
||||
),
|
||||
memory=dict(
|
||||
type='dict',
|
||||
options=dict(
|
||||
min=dict(
|
||||
type='int'
|
||||
),
|
||||
max=dict(
|
||||
type='int'
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
),
|
||||
disallowed=dict(
|
||||
type='dict',
|
||||
disposition='/properties/*',
|
||||
options=dict(
|
||||
disk_types=dict(
|
||||
type='list',
|
||||
disposition='diskTypes'
|
||||
)
|
||||
)
|
||||
),
|
||||
purchase_plan=dict(
|
||||
type='dict',
|
||||
disposition='/properties/purchasePlan',
|
||||
options=dict(
|
||||
name=dict(
|
||||
type='str'
|
||||
),
|
||||
publisher=dict(
|
||||
type='str'
|
||||
),
|
||||
product=dict(
|
||||
type='str'
|
||||
)
|
||||
)
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
)
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.gallery_name = None
|
||||
self.name = None
|
||||
self.gallery_image = None
|
||||
|
||||
self.results = dict(changed=False)
|
||||
self.mgmt_client = None
|
||||
self.state = None
|
||||
self.url = None
|
||||
self.status_code = [200, 201, 202]
|
||||
self.to_do = Actions.NoAction
|
||||
|
||||
self.body = {}
|
||||
self.query_parameters = {}
|
||||
self.query_parameters['api-version'] = '2019-07-01'
|
||||
self.header_parameters = {}
|
||||
self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
|
||||
|
||||
super(AzureRMGalleryImages, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=True)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
for key in list(self.module_arg_spec.keys()):
|
||||
if hasattr(self, key):
|
||||
setattr(self, key, kwargs[key])
|
||||
elif kwargs[key] is not None:
|
||||
self.body[key] = kwargs[key]
|
||||
|
||||
self.inflate_parameters(self.module_arg_spec, self.body, 0)
|
||||
|
||||
old_response = None
|
||||
response = None
|
||||
|
||||
self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
|
||||
resource_group = self.get_resource_group(self.resource_group)
|
||||
|
||||
if 'location' not in self.body:
|
||||
self.body['location'] = resource_group.location
|
||||
|
||||
self.url = ('/subscriptions' +
|
||||
'/{{ subscription_id }}' +
|
||||
'/resourceGroups' +
|
||||
'/{{ resource_group }}' +
|
||||
'/providers' +
|
||||
'/Microsoft.Compute' +
|
||||
'/galleries' +
|
||||
'/{{ gallery_name }}' +
|
||||
'/images' +
|
||||
'/{{ image_name }}')
|
||||
self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
|
||||
self.url = self.url.replace('{{ resource_group }}', self.resource_group)
|
||||
self.url = self.url.replace('{{ gallery_name }}', self.gallery_name)
|
||||
self.url = self.url.replace('{{ image_name }}', self.name)
|
||||
|
||||
old_response = self.get_resource()
|
||||
|
||||
if not old_response:
|
||||
self.log("GalleryImage instance doesn't exist")
|
||||
|
||||
if self.state == 'absent':
|
||||
self.log("Old instance didn't exist")
|
||||
else:
|
||||
self.to_do = Actions.Create
|
||||
else:
|
||||
self.log('GalleryImage instance already exists')
|
||||
|
||||
if self.state == 'absent':
|
||||
self.to_do = Actions.Delete
|
||||
else:
|
||||
modifiers = {}
|
||||
self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
|
||||
self.results['modifiers'] = modifiers
|
||||
self.results['compare'] = []
|
||||
if not self.default_compare(modifiers, self.body, old_response, '', self.results):
|
||||
self.to_do = Actions.Update
|
||||
|
||||
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
|
||||
self.log('Need to Create / Update the GalleryImage instance')
|
||||
|
||||
if self.check_mode:
|
||||
self.results['changed'] = True
|
||||
return self.results
|
||||
|
||||
response = self.create_update_resource()
|
||||
|
||||
# if not old_response:
|
||||
self.results['changed'] = True
|
||||
# else:
|
||||
# self.results['changed'] = old_response.__ne__(response)
|
||||
self.log('Creation / Update done')
|
||||
elif self.to_do == Actions.Delete:
|
||||
self.log('GalleryImage instance deleted')
|
||||
self.results['changed'] = True
|
||||
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
self.delete_resource()
|
||||
|
||||
# make sure instance is actually deleted, for some Azure resources, instance is hanging around
|
||||
# for some time after deletion -- this should be really fixed in Azure
|
||||
while self.get_resource():
|
||||
time.sleep(20)
|
||||
else:
|
||||
self.log('GalleryImage instance unchanged')
|
||||
self.results['changed'] = False
|
||||
response = old_response
|
||||
|
||||
if response:
|
||||
self.results["id"] = response["id"]
|
||||
|
||||
return self.results
|
||||
|
||||
def create_update_resource(self):
|
||||
# self.log('Creating / Updating the GalleryImage instance {0}'.format(self.))
|
||||
|
||||
try:
|
||||
response = self.mgmt_client.query(self.url,
|
||||
'PUT',
|
||||
self.query_parameters,
|
||||
self.header_parameters,
|
||||
self.body,
|
||||
self.status_code,
|
||||
600,
|
||||
30)
|
||||
except CloudError as exc:
|
||||
self.log('Error attempting to create the GalleryImage instance.')
|
||||
self.fail('Error creating the GalleryImage instance: {0}'.format(str(exc)))
|
||||
|
||||
try:
|
||||
response = json.loads(response.text)
|
||||
except Exception:
|
||||
response = {'text': response.text}
|
||||
|
||||
return response
|
||||
|
||||
def delete_resource(self):
|
||||
# self.log('Deleting the GalleryImage instance {0}'.format(self.))
|
||||
try:
|
||||
response = self.mgmt_client.query(self.url,
|
||||
'DELETE',
|
||||
self.query_parameters,
|
||||
self.header_parameters,
|
||||
None,
|
||||
self.status_code,
|
||||
600,
|
||||
30)
|
||||
except CloudError as e:
|
||||
self.log('Error attempting to delete the GalleryImage instance.')
|
||||
self.fail('Error deleting the GalleryImage instance: {0}'.format(str(e)))
|
||||
|
||||
return True
|
||||
|
||||
def get_resource(self):
|
||||
# self.log('Checking if the GalleryImage instance {0} is present'.format(self.))
|
||||
found = False
|
||||
try:
|
||||
response = self.mgmt_client.query(self.url,
|
||||
'GET',
|
||||
self.query_parameters,
|
||||
self.header_parameters,
|
||||
None,
|
||||
self.status_code,
|
||||
600,
|
||||
30)
|
||||
response = json.loads(response.text)
|
||||
found = True
|
||||
self.log("Response : {0}".format(response))
|
||||
# self.log("AzureFirewall instance : {0} found".format(response.name))
|
||||
except CloudError as e:
|
||||
self.log('Did not find the AzureFirewall instance.')
|
||||
if found is True:
|
||||
return response
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMGalleryImages()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,274 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Liu Qingyi, (@smile37773)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_galleryimage_info
|
||||
version_added: '2.9'
|
||||
short_description: Get Azure SIG Image info
|
||||
description:
|
||||
- Get info of Azure SIG Image.
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
type: str
|
||||
required: true
|
||||
gallery_name:
|
||||
description:
|
||||
- The name of the shared image gallery from which the image definitions are to be retrieved.
|
||||
type: str
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
- Resource name.
|
||||
type: str
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
author:
|
||||
- Liu Qingyi (@smile37773)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: List gallery images in a gallery.
|
||||
azure_rm_galleryimage_info:
|
||||
resource_group: myResourceGroup
|
||||
gallery_name: myGallery
|
||||
- name: Get a gallery image.
|
||||
azure_rm_galleryimage_info:
|
||||
resource_group: myResourceGroup
|
||||
gallery_name: myGallery
|
||||
name: myImage
|
||||
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
images:
|
||||
description:
|
||||
- A list of dict results where the key is the name of the image and the values are the info for that image.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- Resource ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup
|
||||
/providers/Microsoft.Compute/galleries/myGallery/images/myImage"
|
||||
name:
|
||||
description:
|
||||
- Resource name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myImage
|
||||
location:
|
||||
description:
|
||||
- Resource location.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "eastus"
|
||||
tags:
|
||||
description:
|
||||
- Resource tags.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: { "tag": "value" }
|
||||
os_state:
|
||||
description:
|
||||
- The allowed values for OS State are C(generalized).
|
||||
type: OperatingSystemStateTypes
|
||||
sample: "Generalized"
|
||||
os_type:
|
||||
description:
|
||||
- This property allows you to specify the type of the OS that is included in the disk when creating a VM from a managed image.
|
||||
type: OperatingSystemTypes
|
||||
sample: "linux/windows"
|
||||
identifier:
|
||||
description:
|
||||
- This is the gallery image definition identifier.
|
||||
type: dict
|
||||
contains:
|
||||
offer:
|
||||
description:
|
||||
- The name of the gallery image definition offer.
|
||||
type: str
|
||||
sample: "myOfferName"
|
||||
publisher:
|
||||
description:
|
||||
- The name of the gallery image definition publisher.
|
||||
type: str
|
||||
sample: "myPublisherName"
|
||||
sku:
|
||||
description:
|
||||
- The name of the gallery image definition sku.
|
||||
type: str
|
||||
sample: "mySkuName"
|
||||
|
||||
'''
|
||||
|
||||
import time
|
||||
import json
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
from ansible.module_utils.azure_rm_common_rest import GenericRestClient
|
||||
from copy import deepcopy
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except Exception:
|
||||
# handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMGalleryImagesInfo(AzureRMModuleBase):
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
gallery_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str'
|
||||
)
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.gallery_name = None
|
||||
self.name = None
|
||||
|
||||
self.results = dict(changed=False)
|
||||
self.mgmt_client = None
|
||||
self.state = None
|
||||
self.url = None
|
||||
self.status_code = [200]
|
||||
|
||||
self.query_parameters = {}
|
||||
self.query_parameters['api-version'] = '2019-03-01'
|
||||
self.header_parameters = {}
|
||||
self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
|
||||
|
||||
self.mgmt_client = None
|
||||
super(AzureRMGalleryImagesInfo, self).__init__(self.module_arg_spec, supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
for key in self.module_arg_spec:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
|
||||
if (self.resource_group is not None and
|
||||
self.gallery_name is not None and
|
||||
self.name is not None):
|
||||
# self.results['gallery_images'] = self.format_item(self.get())
|
||||
self.results['images'] = self.get()
|
||||
elif (self.resource_group is not None and
|
||||
self.gallery_name is not None):
|
||||
# self.results['gallery_images'] = self.format_item(self.listbygallery())
|
||||
self.results['images'] = self.listbygallery()
|
||||
return self.results
|
||||
|
||||
def get(self):
|
||||
response = None
|
||||
results = {}
|
||||
# prepare url
|
||||
self.url = ('/subscriptions' +
|
||||
'/{{ subscription_id }}' +
|
||||
'/resourceGroups' +
|
||||
'/{{ resource_group }}' +
|
||||
'/providers' +
|
||||
'/Microsoft.Compute' +
|
||||
'/galleries' +
|
||||
'/{{ gallery_name }}' +
|
||||
'/images' +
|
||||
'/{{ image_name }}')
|
||||
self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
|
||||
self.url = self.url.replace('{{ resource_group }}', self.resource_group)
|
||||
self.url = self.url.replace('{{ gallery_name }}', self.gallery_name)
|
||||
self.url = self.url.replace('{{ image_name }}', self.name)
|
||||
|
||||
try:
|
||||
response = self.mgmt_client.query(self.url,
|
||||
'GET',
|
||||
self.query_parameters,
|
||||
self.header_parameters,
|
||||
None,
|
||||
self.status_code,
|
||||
600,
|
||||
30)
|
||||
results = json.loads(response.text)
|
||||
# self.log('Response : {0}'.format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
|
||||
|
||||
return self.format_item(results)
|
||||
|
||||
def listbygallery(self):
|
||||
response = None
|
||||
results = {}
|
||||
# prepare url
|
||||
self.url = ('/subscriptions' +
|
||||
'/{{ subscription_id }}' +
|
||||
'/resourceGroups' +
|
||||
'/{{ resource_group }}' +
|
||||
'/providers' +
|
||||
'/Microsoft.Compute' +
|
||||
'/galleries' +
|
||||
'/{{ gallery_name }}' +
|
||||
'/images')
|
||||
self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
|
||||
self.url = self.url.replace('{{ resource_group }}', self.resource_group)
|
||||
self.url = self.url.replace('{{ gallery_name }}', self.gallery_name)
|
||||
|
||||
try:
|
||||
response = self.mgmt_client.query(self.url,
|
||||
'GET',
|
||||
self.query_parameters,
|
||||
self.header_parameters,
|
||||
None,
|
||||
self.status_code,
|
||||
600,
|
||||
30)
|
||||
results = json.loads(response.text)
|
||||
# self.log('Response : {0}'.format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
|
||||
|
||||
return [self.format_item(x) for x in results['value']] if results['value'] else []
|
||||
|
||||
def format_item(self, item):
|
||||
d = {
|
||||
'id': item['id'],
|
||||
'name': item['name'],
|
||||
'location': item['location'],
|
||||
'tags': item.get('tags'),
|
||||
'os_state': item['properties']['osState'],
|
||||
'os_type': item['properties']['osType'],
|
||||
'identifier': item['properties']['identifier']
|
||||
}
|
||||
return d
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMGalleryImagesInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,629 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_galleryimageversion
|
||||
version_added: '2.9'
|
||||
short_description: Manage Azure SIG Image Version instance
|
||||
description:
|
||||
- Create, update and delete instance of Azure SIG Image Version.
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
required: true
|
||||
type: str
|
||||
gallery_name:
|
||||
description:
|
||||
- The name of the Shared Image Gallery in which the Image Definition resides.
|
||||
required: true
|
||||
type: str
|
||||
gallery_image_name:
|
||||
description:
|
||||
- The name of the gallery Image Definition in which the Image Version is to be created.
|
||||
required: true
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- The name of the gallery Image Version to be created.
|
||||
- Needs to follow semantic version name pattern, The allowed characters are digit and period.
|
||||
- Digits must be within the range of a 32-bit integer. For example <MajorVersion>.<MinorVersion>.<Patch>.
|
||||
required: true
|
||||
type: str
|
||||
location:
|
||||
description:
|
||||
- Resource location.
|
||||
type: str
|
||||
storage_profile:
|
||||
description:
|
||||
- Storage profile
|
||||
required: true
|
||||
version_added: "2.10"
|
||||
type: dict
|
||||
suboptions:
|
||||
source_image:
|
||||
description:
|
||||
- Reference to managed image or gallery image version
|
||||
- Could be resource ID to managed image, or dictionary containing I(resource_group) and I(name)
|
||||
- Could be resource ID to image version, or dictionary containing I(resource_group),I(gallery_name), I(gallery_image_name) and I(version)
|
||||
- Mutual exclusive with os_disk and data_disks
|
||||
type: raw
|
||||
os_disk:
|
||||
description:
|
||||
- os disk snapshot
|
||||
- Mutual exclusive with source_image
|
||||
type: raw
|
||||
suboptions:
|
||||
source:
|
||||
description:
|
||||
- Reference to os disk snapshot. Could be resource ID or dictionary containing I(resource_group) and I(name)
|
||||
type: str
|
||||
host_caching:
|
||||
description:
|
||||
- host disk caching
|
||||
type: str
|
||||
default: None
|
||||
choices:
|
||||
- None
|
||||
- ReadOnly
|
||||
- ReadWrite
|
||||
data_disks:
|
||||
description:
|
||||
- list of data disk snapshot
|
||||
- Mutual exclusive with source_image
|
||||
type: list
|
||||
suboptions:
|
||||
source:
|
||||
description:
|
||||
- Reference to data disk snapshot. Could be resource ID or dictionary containing I(resource_group) and I(name)
|
||||
type: str
|
||||
lun:
|
||||
description:
|
||||
- lun of the data disk
|
||||
type: int
|
||||
host_caching:
|
||||
description:
|
||||
- host disk caching
|
||||
type: str
|
||||
default: None
|
||||
choices:
|
||||
- None
|
||||
- ReadOnly
|
||||
- ReadWrite
|
||||
publishing_profile:
|
||||
description:
|
||||
- Publishing profile.
|
||||
required: true
|
||||
type: dict
|
||||
suboptions:
|
||||
target_regions:
|
||||
description:
|
||||
- The target regions where the Image Version is going to be replicated to.
|
||||
- This property is updatable.
|
||||
type: list
|
||||
suboptions:
|
||||
name:
|
||||
description:
|
||||
- Region name.
|
||||
type: str
|
||||
regional_replica_count:
|
||||
description:
|
||||
- The number of replicas of the Image Version to be created per region.
|
||||
- This property would take effect for a region when regionalReplicaCount is not specified.
|
||||
- This property is updatable.
|
||||
type: str
|
||||
storage_account_type:
|
||||
description:
|
||||
- Storage account type.
|
||||
type: str
|
||||
managed_image:
|
||||
description:
|
||||
- Managed image reference, could be resource ID, or dictionary containing I(resource_group) and I(name)
|
||||
- Obsolete since 2.10, use storage_profile instead
|
||||
snapshot:
|
||||
description:
|
||||
- Source snapshot to be used.
|
||||
- Obsolete since 2.10, use storage_profile instead
|
||||
replica_count:
|
||||
description:
|
||||
- The number of replicas of the Image Version to be created per region.
|
||||
- This property would take effect for a region when regionalReplicaCount is not specified.
|
||||
- This property is updatable.
|
||||
type: int
|
||||
exclude_from_latest:
|
||||
description:
|
||||
If I(exclude_from_latest=true), Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version.
|
||||
type: bool
|
||||
end_of_life_date:
|
||||
description:
|
||||
- The end of life date of the gallery Image Version.
|
||||
- This property can be used for decommissioning purposes.
|
||||
- This property is updatable. Format should be according to ISO-8601, for instance "2019-06-26".
|
||||
type: str
|
||||
storage_account_type:
|
||||
description:
|
||||
- Specifies the storage account type to be used to store the image.
|
||||
- This property is not updatable.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the GalleryImageVersion.
|
||||
- Use C(present) to create or update an GalleryImageVersion and C(absent) to delete it.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
type: str
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a gallery image version form a managed image
|
||||
azure_rm_galleryimageversion:
|
||||
resource_group: myResourceGroup
|
||||
gallery_name: myGallery
|
||||
gallery_image_name: myGalleryImage
|
||||
name: 1.1.0
|
||||
location: East US
|
||||
publishing_profile:
|
||||
end_of_life_date: "2020-10-01t00:00:00+00:00"
|
||||
exclude_from_latest: yes
|
||||
replica_count: 4
|
||||
storage_account_type: Standard_LRS
|
||||
target_regions:
|
||||
- name: West US
|
||||
regional_replica_count: 1
|
||||
- name: East US
|
||||
regional_replica_count: 3
|
||||
storage_account_type: Standard_LRS
|
||||
storage_profile:
|
||||
source_image: /subscriptions/sub123/resourceGroups/group123/providers/Microsoft.Compute/images/myOsImage
|
||||
|
||||
- name: Create a gallery image version from another gallery image version
|
||||
azure_rm_galleryimageversion:
|
||||
resource_group: myResourceGroup
|
||||
gallery_name: myGallery
|
||||
gallery_image_name: myGalleryImage
|
||||
name: 1.2.0
|
||||
location: East US
|
||||
publishing_profile:
|
||||
end_of_life_date: "2020-10-01t00:00:00+00:00"
|
||||
exclude_from_latest: yes
|
||||
replica_count: 4
|
||||
storage_account_type: Standard_LRS
|
||||
target_regions:
|
||||
- name: West US
|
||||
regional_replica_count: 1
|
||||
- name: East US
|
||||
regional_replica_count: 3
|
||||
storage_account_type: Standard_LRS
|
||||
storage_profile:
|
||||
source_image:
|
||||
version: 1.1.0
|
||||
gallery_name: myGallery2
|
||||
gallery_image_name: myGalleryImage2
|
||||
|
||||
- name: Create gallery image by using one os dist snapshot and zero or many data disk snapshots
|
||||
azure_rm_galleryimageversion:
|
||||
resource_group: myRsourceGroup
|
||||
gallery_name: myGallery
|
||||
gallery_image_name: myGalleryImage
|
||||
name: 3.4.0
|
||||
location: East US
|
||||
publishing_profile:
|
||||
end_of_life_date: "2020-10-01t00:00:00+00:00"
|
||||
exclude_from_latest: yes
|
||||
replica_count: 1
|
||||
storage_account_type: Standard_LRS
|
||||
target_regions:
|
||||
- name: East US
|
||||
regional_replica_count: 1
|
||||
storage_account_type: Standard_LRS
|
||||
storage_profile:
|
||||
os_disk:
|
||||
source: "/subscriptions/mySub/resourceGroups/myGroup/providers/Microsoft.Compute/snapshots/os_snapshot_vma"
|
||||
data_disks:
|
||||
- lun: 0
|
||||
source:
|
||||
name: data_snapshot_vma
|
||||
- lun: 1
|
||||
source: "/subscriptions/mySub/resourceGroups/myGroup/providers/Microsoft.Compute/snapshots/data_snapshot_vmb"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- Resource ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/galleries/myGalle
|
||||
ry1283/images/myImage/versions/10.1.3"
|
||||
'''
|
||||
|
||||
import time
|
||||
import json
|
||||
from ansible.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
|
||||
from ansible.module_utils.azure_rm_common_rest import GenericRestClient
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class Actions:
|
||||
NoAction, Create, Update, Delete = range(4)
|
||||
|
||||
|
||||
class AzureRMGalleryImageVersions(AzureRMModuleBaseExt):
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
updatable=False,
|
||||
disposition='resourceGroupName',
|
||||
required=True
|
||||
),
|
||||
gallery_name=dict(
|
||||
type='str',
|
||||
updatable=False,
|
||||
disposition='galleryName',
|
||||
required=True
|
||||
),
|
||||
gallery_image_name=dict(
|
||||
type='str',
|
||||
updatable=False,
|
||||
disposition='galleryImageName',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str',
|
||||
updatable=False,
|
||||
disposition='galleryImageVersionName',
|
||||
required=True
|
||||
),
|
||||
location=dict(
|
||||
type='str',
|
||||
updatable=False,
|
||||
disposition='/',
|
||||
comparison='location'
|
||||
),
|
||||
storage_profile=dict(
|
||||
type='dict',
|
||||
updatable=False,
|
||||
disposition='/properties/storageProfile',
|
||||
comparison='ignore',
|
||||
options=dict(
|
||||
source_image=dict(
|
||||
type='raw',
|
||||
disposition='source/id',
|
||||
purgeIfNone=True,
|
||||
pattern=[('/subscriptions/{subscription_id}/resourceGroups'
|
||||
'/{resource_group}/providers/Microsoft.Compute'
|
||||
'/images/{name}'),
|
||||
('/subscriptions/{subscription_id}/resourceGroups'
|
||||
'/{resource_group}/providers/Microsoft.Compute'
|
||||
'/galleries/{gallery_name}/images/{gallery_image_name}'
|
||||
'/versions/{version}')]
|
||||
),
|
||||
os_disk=dict(
|
||||
type='dict',
|
||||
disposition='osDiskImage',
|
||||
purgeIfNone=True,
|
||||
comparison='ignore',
|
||||
options=dict(
|
||||
source=dict(
|
||||
type='raw',
|
||||
disposition='source/id',
|
||||
pattern=('/subscriptions/{subscription_id}/resourceGroups'
|
||||
'/{resource_group}/providers/Microsoft.Compute'
|
||||
'/snapshots/{name}')
|
||||
),
|
||||
host_caching=dict(
|
||||
type='str',
|
||||
disposition='hostCaching',
|
||||
default="None",
|
||||
choices=["ReadOnly", "ReadWrite", "None"]
|
||||
)
|
||||
)
|
||||
),
|
||||
data_disks=dict(
|
||||
type='list',
|
||||
disposition='dataDiskImages',
|
||||
purgeIfNone=True,
|
||||
options=dict(
|
||||
lun=dict(
|
||||
type='int'
|
||||
),
|
||||
source=dict(
|
||||
type='raw',
|
||||
disposition="source/id",
|
||||
pattern=('/subscriptions/{subscription_id}/resourceGroups'
|
||||
'/{resource_group}/providers/Microsoft.Compute'
|
||||
'/snapshots/{name}')
|
||||
),
|
||||
host_caching=dict(
|
||||
type='str',
|
||||
disposition='hostCaching',
|
||||
default="None",
|
||||
choices=["ReadOnly", "ReadWrite", "None"]
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
),
|
||||
publishing_profile=dict(
|
||||
type='dict',
|
||||
disposition='/properties/publishingProfile',
|
||||
options=dict(
|
||||
target_regions=dict(
|
||||
type='list',
|
||||
disposition='targetRegions',
|
||||
options=dict(
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True,
|
||||
comparison='location'
|
||||
),
|
||||
regional_replica_count=dict(
|
||||
type='int',
|
||||
disposition='regionalReplicaCount'
|
||||
),
|
||||
storage_account_type=dict(
|
||||
type='str',
|
||||
disposition='storageAccountType'
|
||||
)
|
||||
)
|
||||
),
|
||||
managed_image=dict(
|
||||
type='raw',
|
||||
pattern=('/subscriptions/{subscription_id}/resourceGroups'
|
||||
'/{resource_group}/providers/Microsoft.Compute'
|
||||
'/images/{name}'),
|
||||
comparison='ignore'
|
||||
),
|
||||
snapshot=dict(
|
||||
type='raw',
|
||||
pattern=('/subscriptions/{subscription_id}/resourceGroups'
|
||||
'/{resource_group}/providers/Microsoft.Compute'
|
||||
'/snapshots/{name}'),
|
||||
comparison='ignore'
|
||||
),
|
||||
replica_count=dict(
|
||||
type='int',
|
||||
disposition='replicaCount'
|
||||
),
|
||||
exclude_from_latest=dict(
|
||||
type='bool',
|
||||
disposition='excludeFromLatest'
|
||||
),
|
||||
end_of_life_date=dict(
|
||||
type='str',
|
||||
disposition='endOfLifeDate'
|
||||
),
|
||||
storage_account_type=dict(
|
||||
type='str',
|
||||
disposition='storageAccountType',
|
||||
choices=['Standard_LRS',
|
||||
'Standard_ZRS']
|
||||
)
|
||||
)
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
)
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.gallery_name = None
|
||||
self.gallery_image_name = None
|
||||
self.name = None
|
||||
self.gallery_image_version = None
|
||||
|
||||
self.results = dict(changed=False)
|
||||
self.mgmt_client = None
|
||||
self.state = None
|
||||
self.url = None
|
||||
self.status_code = [200, 201, 202]
|
||||
self.to_do = Actions.NoAction
|
||||
|
||||
self.body = {}
|
||||
self.query_parameters = {}
|
||||
self.query_parameters['api-version'] = '2019-07-01'
|
||||
self.header_parameters = {}
|
||||
self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
|
||||
|
||||
super(AzureRMGalleryImageVersions, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=True)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
for key in list(self.module_arg_spec.keys()):
|
||||
if hasattr(self, key):
|
||||
setattr(self, key, kwargs[key])
|
||||
elif kwargs[key] is not None:
|
||||
self.body[key] = kwargs[key]
|
||||
|
||||
self.inflate_parameters(self.module_arg_spec, self.body, 0)
|
||||
|
||||
# keep backward compatibility
|
||||
snapshot = self.body.get('properties', {}).get('publishingProfile', {}).pop('snapshot', None)
|
||||
if snapshot is not None:
|
||||
self.body['properties'].setdefault('storageProfile', {}).setdefault('osDiskImage', {}).setdefault('source', {})['id'] = snapshot
|
||||
managed_image = self.body.get('properties', {}).get('publishingProfile', {}).pop('managed_image', None)
|
||||
if managed_image:
|
||||
self.body['properties'].setdefault('storageProfile', {}).setdefault('source', {})['id'] = managed_image
|
||||
|
||||
old_response = None
|
||||
response = None
|
||||
|
||||
self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
|
||||
resource_group = self.get_resource_group(self.resource_group)
|
||||
|
||||
if 'location' not in self.body:
|
||||
self.body['location'] = resource_group.location
|
||||
|
||||
self.url = ('/subscriptions' +
|
||||
'/{{ subscription_id }}' +
|
||||
'/resourceGroups' +
|
||||
'/{{ resource_group }}' +
|
||||
'/providers' +
|
||||
'/Microsoft.Compute' +
|
||||
'/galleries' +
|
||||
'/{{ gallery_name }}' +
|
||||
'/images' +
|
||||
'/{{ image_name }}' +
|
||||
'/versions' +
|
||||
'/{{ version_name }}')
|
||||
self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
|
||||
self.url = self.url.replace('{{ resource_group }}', self.resource_group)
|
||||
self.url = self.url.replace('{{ gallery_name }}', self.gallery_name)
|
||||
self.url = self.url.replace('{{ image_name }}', self.gallery_image_name)
|
||||
self.url = self.url.replace('{{ version_name }}', self.name)
|
||||
|
||||
old_response = self.get_resource()
|
||||
|
||||
if not old_response:
|
||||
self.log("GalleryImageVersion instance doesn't exist")
|
||||
|
||||
if self.state == 'absent':
|
||||
self.log("Old instance didn't exist")
|
||||
else:
|
||||
self.to_do = Actions.Create
|
||||
else:
|
||||
self.log('GalleryImageVersion instance already exists')
|
||||
|
||||
if self.state == 'absent':
|
||||
self.to_do = Actions.Delete
|
||||
else:
|
||||
modifiers = {}
|
||||
self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
|
||||
self.results['modifiers'] = modifiers
|
||||
self.results['compare'] = []
|
||||
if not self.default_compare(modifiers, self.body, old_response, '', self.results):
|
||||
self.to_do = Actions.Update
|
||||
|
||||
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
|
||||
self.log('Need to Create / Update the GalleryImageVersion instance')
|
||||
|
||||
if self.check_mode:
|
||||
self.results['changed'] = True
|
||||
return self.results
|
||||
|
||||
response = self.create_update_resource()
|
||||
|
||||
self.results['changed'] = True
|
||||
self.log('Creation / Update done')
|
||||
elif self.to_do == Actions.Delete:
|
||||
self.log('GalleryImageVersion instance deleted')
|
||||
self.results['changed'] = True
|
||||
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
self.delete_resource()
|
||||
else:
|
||||
self.log('GalleryImageVersion instance unchanged')
|
||||
self.results['changed'] = False
|
||||
response = old_response
|
||||
|
||||
if response:
|
||||
self.results["id"] = response["id"]
|
||||
|
||||
return self.results
|
||||
|
||||
def create_update_resource(self):
|
||||
# self.log('Creating / Updating the GalleryImageVersion instance {0}'.format(self.))
|
||||
|
||||
try:
|
||||
response = self.mgmt_client.query(self.url,
|
||||
'PUT',
|
||||
self.query_parameters,
|
||||
self.header_parameters,
|
||||
self.body,
|
||||
self.status_code,
|
||||
600,
|
||||
30)
|
||||
except CloudError as exc:
|
||||
self.log('Error attempting to create the GalleryImageVersion instance.')
|
||||
self.fail('Error creating the GalleryImageVersion instance: {0}'.format(str(exc)))
|
||||
|
||||
try:
|
||||
response = json.loads(response.text)
|
||||
except Exception:
|
||||
response = {'text': response.text}
|
||||
|
||||
while response['properties']['provisioningState'] == 'Creating':
|
||||
time.sleep(60)
|
||||
response = self.get_resource()
|
||||
|
||||
return response
|
||||
|
||||
def delete_resource(self):
|
||||
# self.log('Deleting the GalleryImageVersion instance {0}'.format(self.))
|
||||
try:
|
||||
response = self.mgmt_client.query(self.url,
|
||||
'DELETE',
|
||||
self.query_parameters,
|
||||
self.header_parameters,
|
||||
None,
|
||||
self.status_code,
|
||||
600,
|
||||
30)
|
||||
except CloudError as e:
|
||||
self.log('Error attempting to delete the GalleryImageVersion instance.')
|
||||
self.fail('Error deleting the GalleryImageVersion instance: {0}'.format(str(e)))
|
||||
return True
|
||||
|
||||
def get_resource(self):
|
||||
# self.log('Checking if the GalleryImageVersion instance {0} is present'.format(self.))
|
||||
found = False
|
||||
try:
|
||||
response = self.mgmt_client.query(self.url,
|
||||
'GET',
|
||||
self.query_parameters,
|
||||
self.header_parameters,
|
||||
None,
|
||||
self.status_code,
|
||||
600,
|
||||
30)
|
||||
response = json.loads(response.text)
|
||||
found = True
|
||||
self.log("Response : {0}".format(response))
|
||||
# self.log("AzureFirewall instance : {0} found".format(response.name))
|
||||
except CloudError as e:
|
||||
self.log('Did not find the AzureFirewall instance.')
|
||||
if found is True:
|
||||
return response
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMGalleryImageVersions()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,270 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Liu Qingyi, (@smile37773)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_galleryimageversion_info
|
||||
version_added: '2.9'
|
||||
short_description: Get Azure SIG Image Version info
|
||||
description:
|
||||
- Get info of Azure SIG Image Version.
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
type: str
|
||||
required: true
|
||||
gallery_name:
|
||||
description:
|
||||
- The name of the Shared Image Gallery in which the Image Definition resides.
|
||||
type: str
|
||||
required: true
|
||||
gallery_image_name:
|
||||
description:
|
||||
- The name of the gallery Image Definition in which the Image Version resides.
|
||||
type: str
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
- Resource name.
|
||||
type: str
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
author:
|
||||
- Liu Qingyi (@smile37773)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: List gallery image versions in a gallery image definition.
|
||||
azure_rm_galleryimageversion_info:
|
||||
resource_group: myResourceGroup
|
||||
gallery_name: myGallery
|
||||
gallery_image_name: myImage
|
||||
- name: Get a gallery image version.
|
||||
azure_rm_galleryimageversion_info:
|
||||
resource_group: myResourceGroup
|
||||
gallery_name: myGallery
|
||||
gallery_image_name: myImage
|
||||
name: myVersion
|
||||
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
versions:
|
||||
description:
|
||||
A list of dict results where the key is the name of the version and the values are the info for that version.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- Resource ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups
|
||||
/myResourceGroup/providers/Microsoft.Compute/galleries/myGallery/images/myImage/versions/myVersion"
|
||||
name:
|
||||
description:
|
||||
- Resource name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "myVersion"
|
||||
location:
|
||||
description:
|
||||
- Resource location.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "eastus"
|
||||
tags:
|
||||
description:
|
||||
- Resource tags.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: { "tag": "value" }
|
||||
publishing_profile:
|
||||
description:
|
||||
- The publishing profile of a gallery image version.
|
||||
type: dict
|
||||
provisioning_state:
|
||||
description:
|
||||
- The current state of the gallery.
|
||||
type: str
|
||||
sample: "Succeeded"
|
||||
|
||||
'''
|
||||
|
||||
import time
|
||||
import json
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
from ansible.module_utils.azure_rm_common_rest import GenericRestClient
|
||||
from copy import deepcopy
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except Exception:
|
||||
# handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMGalleryImageVersionsInfo(AzureRMModuleBase):
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
gallery_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
gallery_image_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str'
|
||||
)
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.gallery_name = None
|
||||
self.gallery_image_name = None
|
||||
self.name = None
|
||||
|
||||
self.results = dict(changed=False)
|
||||
self.mgmt_client = None
|
||||
self.state = None
|
||||
self.url = None
|
||||
self.status_code = [200]
|
||||
|
||||
self.query_parameters = {}
|
||||
self.query_parameters['api-version'] = '2019-03-01'
|
||||
self.header_parameters = {}
|
||||
self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
|
||||
|
||||
self.mgmt_client = None
|
||||
super(AzureRMGalleryImageVersionsInfo, self).__init__(self.module_arg_spec, supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
for key in self.module_arg_spec:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
|
||||
if (self.resource_group is not None and
|
||||
self.gallery_name is not None and
|
||||
self.gallery_image_name is not None and
|
||||
self.name is not None):
|
||||
self.results['versions'] = self.get()
|
||||
elif (self.resource_group is not None and
|
||||
self.gallery_name is not None and
|
||||
self.gallery_image_name is not None):
|
||||
self.results['versions'] = self.listbygalleryimage()
|
||||
return self.results
|
||||
|
||||
def get(self):
|
||||
response = None
|
||||
results = {}
|
||||
# prepare url
|
||||
self.url = ('/subscriptions' +
|
||||
'/{{ subscription_id }}' +
|
||||
'/resourceGroups' +
|
||||
'/{{ resource_group }}' +
|
||||
'/providers' +
|
||||
'/Microsoft.Compute' +
|
||||
'/galleries' +
|
||||
'/{{ gallery_name }}' +
|
||||
'/images' +
|
||||
'/{{ image_name }}' +
|
||||
'/versions' +
|
||||
'/{{ version_name }}')
|
||||
self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
|
||||
self.url = self.url.replace('{{ resource_group }}', self.resource_group)
|
||||
self.url = self.url.replace('{{ gallery_name }}', self.gallery_name)
|
||||
self.url = self.url.replace('{{ image_name }}', self.gallery_image_name)
|
||||
self.url = self.url.replace('{{ version_name }}', self.name)
|
||||
|
||||
try:
|
||||
response = self.mgmt_client.query(self.url,
|
||||
'GET',
|
||||
self.query_parameters,
|
||||
self.header_parameters,
|
||||
None,
|
||||
self.status_code,
|
||||
600,
|
||||
30)
|
||||
results = json.loads(response.text)
|
||||
# self.log('Response : {0}'.format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
|
||||
|
||||
return self.format_item(results)
|
||||
|
||||
def listbygalleryimage(self):
|
||||
response = None
|
||||
results = {}
|
||||
# prepare url
|
||||
self.url = ('/subscriptions' +
|
||||
'/{{ subscription_id }}' +
|
||||
'/resourceGroups' +
|
||||
'/{{ resource_group }}' +
|
||||
'/providers' +
|
||||
'/Microsoft.Compute' +
|
||||
'/galleries' +
|
||||
'/{{ gallery_name }}' +
|
||||
'/images' +
|
||||
'/{{ image_name }}' +
|
||||
'/versions')
|
||||
self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
|
||||
self.url = self.url.replace('{{ resource_group }}', self.resource_group)
|
||||
self.url = self.url.replace('{{ gallery_name }}', self.gallery_name)
|
||||
self.url = self.url.replace('{{ image_name }}', self.gallery_image_name)
|
||||
|
||||
try:
|
||||
response = self.mgmt_client.query(self.url,
|
||||
'GET',
|
||||
self.query_parameters,
|
||||
self.header_parameters,
|
||||
None,
|
||||
self.status_code,
|
||||
600,
|
||||
30)
|
||||
results = json.loads(response.text)
|
||||
# self.log('Response : {0}'.format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
|
||||
|
||||
return [self.format_item(x) for x in results['value']] if results['value'] else []
|
||||
|
||||
def format_item(self, item):
|
||||
d = {
|
||||
'id': item['id'],
|
||||
'name': item['name'],
|
||||
'location': item['location'],
|
||||
'tags': item.get('tags'),
|
||||
'publishing_profile': item['properties']['publishingProfile'],
|
||||
'provisioning_state': item['properties']['provisioningState']
|
||||
}
|
||||
return d
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMGalleryImageVersionsInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,555 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_hdinsightcluster
|
||||
version_added: "2.8"
|
||||
short_description: Manage Azure HDInsight Cluster instance
|
||||
description:
|
||||
- Create, update and delete instance of Azure HDInsight Cluster.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
required: True
|
||||
name:
|
||||
description:
|
||||
- The name of the cluster.
|
||||
required: True
|
||||
location:
|
||||
description:
|
||||
- Resource location. If not set, location from the resource group will be used as default.
|
||||
cluster_version:
|
||||
description:
|
||||
- The version of the cluster. For example C(3.6).
|
||||
os_type:
|
||||
description:
|
||||
- The type of operating system.
|
||||
choices:
|
||||
- 'linux'
|
||||
tier:
|
||||
description:
|
||||
- The cluster tier.
|
||||
choices:
|
||||
- 'standard'
|
||||
- 'premium'
|
||||
cluster_definition:
|
||||
description:
|
||||
- The cluster definition.
|
||||
suboptions:
|
||||
kind:
|
||||
description:
|
||||
- The type of cluster.
|
||||
choices:
|
||||
- hadoop
|
||||
- spark
|
||||
- hbase
|
||||
- storm
|
||||
gateway_rest_username:
|
||||
description:
|
||||
- Gateway REST user name.
|
||||
gateway_rest_password:
|
||||
description:
|
||||
- Gateway REST password.
|
||||
compute_profile_roles:
|
||||
description:
|
||||
- The list of roles in the cluster.
|
||||
type: list
|
||||
suboptions:
|
||||
name:
|
||||
description:
|
||||
- The name of the role.
|
||||
choices:
|
||||
- 'headnode'
|
||||
- 'workernode'
|
||||
- 'zookepernode'
|
||||
min_instance_count:
|
||||
description:
|
||||
- The minimum instance count of the cluster.
|
||||
target_instance_count:
|
||||
description:
|
||||
- The instance count of the cluster.
|
||||
vm_size:
|
||||
description:
|
||||
- The size of the VM.
|
||||
linux_profile:
|
||||
description:
|
||||
- The Linux OS profile.
|
||||
suboptions:
|
||||
username:
|
||||
description:
|
||||
- SSH user name.
|
||||
password:
|
||||
description:
|
||||
- SSH password.
|
||||
storage_accounts:
|
||||
description:
|
||||
- The list of storage accounts in the cluster.
|
||||
type: list
|
||||
suboptions:
|
||||
name:
|
||||
description:
|
||||
- Blob storage endpoint. For example storage_account_name.blob.core.windows.net.
|
||||
is_default:
|
||||
description:
|
||||
- Whether or not the storage account is the default storage account.
|
||||
container:
|
||||
description:
|
||||
- The container in the storage account.
|
||||
key:
|
||||
description:
|
||||
- The storage account access key.
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the cluster.
|
||||
- Use C(present) to create or update a cluster and C(absent) to delete it.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create instance of HDInsight Cluster
|
||||
azure_rm_hdinsightcluster:
|
||||
resource_group: myResourceGroup
|
||||
name: myCluster
|
||||
location: eastus2
|
||||
cluster_version: 3.6
|
||||
os_type: linux
|
||||
tier: standard
|
||||
cluster_definition:
|
||||
kind: spark
|
||||
gateway_rest_username: http-user
|
||||
gateway_rest_password: MuABCPassword!!@123
|
||||
storage_accounts:
|
||||
- name: myStorageAccount.blob.core.windows.net
|
||||
is_default: yes
|
||||
container: myContainer
|
||||
key: GExmaxH4lDNdHA9nwAsCt8t4AOQas2y9vXQP1kKALTram7Q3/5xLVIab3+nYG1x63Xyak9/VXxQyNBHA9pDWw==
|
||||
compute_profile_roles:
|
||||
- name: headnode
|
||||
target_instance_count: 2
|
||||
hardware_profile:
|
||||
vm_size: Standard_D3
|
||||
linux_profile:
|
||||
username: sshuser
|
||||
password: MuABCPassword!!@123
|
||||
- name: workernode
|
||||
target_instance_count: 2
|
||||
vm_size: Standard_D3
|
||||
linux_profile:
|
||||
username: sshuser
|
||||
password: MuABCPassword!!@123
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- Fully qualified resource id of the cluster.
|
||||
returned: always
|
||||
type: str
|
||||
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.HDInsight/clusters/myCluster
|
||||
'''
|
||||
|
||||
import time
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrest.polling import LROPoller
|
||||
from msrestazure.azure_operation import AzureOperationPoller
|
||||
from azure.mgmt.hdinsight import HDInsightManagementClient
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class Actions:
|
||||
NoAction, Create, Update, Delete = range(4)
|
||||
|
||||
|
||||
class AzureRMClusters(AzureRMModuleBase):
|
||||
"""Configuration class for an Azure RM Cluster resource"""
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
location=dict(
|
||||
type='str'
|
||||
),
|
||||
cluster_version=dict(
|
||||
type='str'
|
||||
),
|
||||
os_type=dict(
|
||||
type='str',
|
||||
choices=['linux']
|
||||
),
|
||||
tier=dict(
|
||||
type='str',
|
||||
choices=['standard',
|
||||
'premium']
|
||||
),
|
||||
cluster_definition=dict(
|
||||
type='dict'
|
||||
),
|
||||
compute_profile_roles=dict(
|
||||
type='list'
|
||||
),
|
||||
storage_accounts=dict(
|
||||
type='list'
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
)
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.parameters = dict()
|
||||
|
||||
self.results = dict(changed=False)
|
||||
self.mgmt_client = None
|
||||
self.state = None
|
||||
self.to_do = Actions.NoAction
|
||||
self.tags_changed = False
|
||||
self.new_instance_count = None
|
||||
|
||||
super(AzureRMClusters, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=True)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
"""Main module execution method"""
|
||||
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
if hasattr(self, key):
|
||||
setattr(self, key, kwargs[key])
|
||||
elif kwargs[key] is not None:
|
||||
self.parameters[key] = kwargs[key]
|
||||
|
||||
dict_expand(self.parameters, ['cluster_version'], 'properties')
|
||||
dict_camelize(self.parameters, ['os_type'], True)
|
||||
dict_expand(self.parameters, ['os_type'], 'properties')
|
||||
dict_camelize(self.parameters, ['tier'], True)
|
||||
dict_expand(self.parameters, ['tier'], 'properties')
|
||||
|
||||
dict_rename(self.parameters, ['cluster_definition', 'gateway_rest_username'], 'restAuthCredential.username')
|
||||
dict_rename(self.parameters, ['cluster_definition', 'gateway_rest_password'], 'restAuthCredential.password')
|
||||
dict_expand(self.parameters, ['cluster_definition', 'restAuthCredential.username'], 'gateway')
|
||||
dict_expand(self.parameters, ['cluster_definition', 'restAuthCredential.password'], 'gateway')
|
||||
dict_expand(self.parameters, ['cluster_definition', 'gateway'], 'configurations')
|
||||
|
||||
dict_expand(self.parameters, ['cluster_definition'], 'properties')
|
||||
dict_expand(self.parameters, ['compute_profile_roles', 'vm_size'], 'hardware_profile')
|
||||
dict_rename(self.parameters, ['compute_profile_roles', 'linux_profile'], 'linux_operating_system_profile')
|
||||
dict_expand(self.parameters, ['compute_profile_roles', 'linux_operating_system_profile'], 'os_profile')
|
||||
dict_rename(self.parameters, ['compute_profile_roles'], 'roles')
|
||||
dict_expand(self.parameters, ['roles'], 'compute_profile')
|
||||
dict_expand(self.parameters, ['compute_profile'], 'properties')
|
||||
dict_rename(self.parameters, ['storage_accounts'], 'storageaccounts')
|
||||
dict_expand(self.parameters, ['storageaccounts'], 'storage_profile')
|
||||
dict_expand(self.parameters, ['storage_profile'], 'properties')
|
||||
|
||||
response = None
|
||||
|
||||
self.mgmt_client = self.get_mgmt_svc_client(HDInsightManagementClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
|
||||
resource_group = self.get_resource_group(self.resource_group)
|
||||
|
||||
if "location" not in self.parameters:
|
||||
self.parameters["location"] = resource_group.location
|
||||
|
||||
old_response = self.get_cluster()
|
||||
|
||||
if not old_response:
|
||||
self.log("Cluster instance doesn't exist")
|
||||
if self.state == 'absent':
|
||||
self.log("Old instance didn't exist")
|
||||
else:
|
||||
self.to_do = Actions.Create
|
||||
else:
|
||||
self.log("Cluster instance already exists")
|
||||
if self.state == 'absent':
|
||||
self.to_do = Actions.Delete
|
||||
elif self.state == 'present':
|
||||
compare_result = {}
|
||||
if (not default_compare(self.parameters, old_response, '', compare_result)):
|
||||
if compare_result.pop('/properties/compute_profile/roles/*/target_instance_count', False):
|
||||
# check if it's workernode
|
||||
new_count = 0
|
||||
old_count = 0
|
||||
for role in self.parameters['properties']['compute_profile']['roles']:
|
||||
if role['name'] == 'workernode':
|
||||
new_count = role['target_instance_count']
|
||||
for role in old_response['properties']['compute_profile']['roles']:
|
||||
if role['name'] == 'workernode':
|
||||
old_count = role['target_instance_count']
|
||||
if old_count != new_count:
|
||||
self.new_instance_count = new_count
|
||||
self.to_do = Actions.Update
|
||||
if compare_result.pop('/tags', False):
|
||||
self.to_do = Actions.Update
|
||||
self.tags_changed = True
|
||||
if compare_result:
|
||||
for k in compare_result.keys():
|
||||
self.module.warn("property '" + k + "' cannot be updated (" + compare_result[k] + ")")
|
||||
self.module.warn("only tags and target_instance_count can be updated")
|
||||
|
||||
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
|
||||
self.log("Need to Create / Update the Cluster instance")
|
||||
self.results['changed'] = True
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
response = self.create_update_cluster()
|
||||
self.log("Creation / Update done")
|
||||
elif self.to_do == Actions.Delete:
|
||||
self.log("Cluster instance deleted")
|
||||
self.results['changed'] = True
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
self.delete_cluster()
|
||||
else:
|
||||
self.log("Cluster instance unchanged")
|
||||
self.results['changed'] = False
|
||||
response = old_response
|
||||
|
||||
if self.state == 'present':
|
||||
self.results.update(self.format_item(response))
|
||||
return self.results
|
||||
|
||||
def create_update_cluster(self):
|
||||
'''
|
||||
Creates or updates Cluster with the specified configuration.
|
||||
|
||||
:return: deserialized Cluster instance state dictionary
|
||||
'''
|
||||
self.log("Creating / Updating the Cluster instance {0}".format(self.name))
|
||||
|
||||
try:
|
||||
if self.to_do == Actions.Create:
|
||||
response = self.mgmt_client.clusters.create(resource_group_name=self.resource_group,
|
||||
cluster_name=self.name,
|
||||
parameters=self.parameters)
|
||||
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
|
||||
response = self.get_poller_result(response)
|
||||
else:
|
||||
if self.tags_changed:
|
||||
response = self.mgmt_client.clusters.update(resource_group_name=self.resource_group,
|
||||
cluster_name=self.name,
|
||||
tags=self.parameters.get('tags'))
|
||||
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
|
||||
response = self.get_poller_result(response)
|
||||
if self.new_instance_count:
|
||||
response = self.mgmt_client.clusters.resize(resource_group_name=self.resource_group,
|
||||
cluster_name=self.name,
|
||||
target_instance_count=self.new_instance_count)
|
||||
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
|
||||
response = self.get_poller_result(response)
|
||||
except CloudError as exc:
|
||||
self.fail("Error creating or updating Cluster instance: {0}".format(str(exc)))
|
||||
return response.as_dict() if response else {}
|
||||
|
||||
def delete_cluster(self):
|
||||
'''
|
||||
Deletes specified Cluster instance in the specified subscription and resource group.
|
||||
|
||||
:return: True
|
||||
'''
|
||||
self.log("Deleting the Cluster instance {0}".format(self.name))
|
||||
try:
|
||||
response = self.mgmt_client.clusters.delete(resource_group_name=self.resource_group,
|
||||
cluster_name=self.name)
|
||||
except CloudError as e:
|
||||
self.fail("Error deleting the Cluster instance: {0}".format(str(e)))
|
||||
|
||||
return True
|
||||
|
||||
def get_cluster(self):
|
||||
'''
|
||||
Gets the properties of the specified Cluster.
|
||||
|
||||
:return: deserialized Cluster instance state dictionary
|
||||
'''
|
||||
self.log("Checking if the Cluster instance {0} is present".format(self.name))
|
||||
found = False
|
||||
try:
|
||||
response = self.mgmt_client.clusters.get(resource_group_name=self.resource_group,
|
||||
cluster_name=self.name)
|
||||
found = True
|
||||
self.log("Response : {0}".format(response))
|
||||
self.log("Cluster instance : {0} found".format(response.name))
|
||||
except Exception as e:
|
||||
self.log('Did not find the Cluster instance.')
|
||||
if found is True:
|
||||
return response.as_dict()
|
||||
|
||||
return False
|
||||
|
||||
def format_item(self, d):
|
||||
d = {
|
||||
'id': d.get('id', None)
|
||||
}
|
||||
return d
|
||||
|
||||
|
||||
def default_compare(new, old, path, result):
|
||||
if new is None:
|
||||
match = True
|
||||
elif isinstance(new, dict):
|
||||
match = True
|
||||
if not isinstance(old, dict):
|
||||
result[path] = 'old dict is null'
|
||||
match = False
|
||||
else:
|
||||
for k in new.keys():
|
||||
if not default_compare(new.get(k), old.get(k, None), path + '/' + k, result):
|
||||
match = False
|
||||
elif isinstance(new, list):
|
||||
if not isinstance(old, list) or len(new) != len(old):
|
||||
result[path] = 'length is different or null'
|
||||
match = False
|
||||
elif len(old) == 0:
|
||||
match = True
|
||||
else:
|
||||
match = True
|
||||
if isinstance(old[0], dict):
|
||||
key = None
|
||||
if 'id' in old[0] and 'id' in new[0]:
|
||||
key = 'id'
|
||||
elif 'name' in old[0] and 'name' in new[0]:
|
||||
key = 'name'
|
||||
else:
|
||||
key = list(old[0])[0]
|
||||
new = sorted(new, key=lambda x: x.get(key, ''))
|
||||
old = sorted(old, key=lambda x: x.get(key, ''))
|
||||
else:
|
||||
new = sorted(new)
|
||||
old = sorted(old)
|
||||
for i in range(len(new)):
|
||||
if not default_compare(new[i], old[i], path + '/*', result):
|
||||
match = False
|
||||
return match
|
||||
else:
|
||||
if path.endswith('password'):
|
||||
match = True
|
||||
else:
|
||||
if path == '/location' or path.endswith('location_name'):
|
||||
new = new.replace(' ', '').lower()
|
||||
old = new.replace(' ', '').lower()
|
||||
if new == old:
|
||||
match = True
|
||||
else:
|
||||
result[path] = str(new) + ' != ' + str(old)
|
||||
match = False
|
||||
return match
|
||||
|
||||
|
||||
def dict_camelize(d, path, camelize_first):
|
||||
if isinstance(d, list):
|
||||
for i in range(len(d)):
|
||||
dict_camelize(d[i], path, camelize_first)
|
||||
elif isinstance(d, dict):
|
||||
if len(path) == 1:
|
||||
old_value = d.get(path[0], None)
|
||||
if old_value is not None:
|
||||
d[path[0]] = _snake_to_camel(old_value, camelize_first)
|
||||
else:
|
||||
sd = d.get(path[0], None)
|
||||
if sd is not None:
|
||||
dict_camelize(sd, path[1:], camelize_first)
|
||||
|
||||
|
||||
def dict_upper(d, path):
|
||||
if isinstance(d, list):
|
||||
for i in range(len(d)):
|
||||
dict_upper(d[i], path)
|
||||
elif isinstance(d, dict):
|
||||
if len(path) == 1:
|
||||
old_value = d.get(path[0], None)
|
||||
if old_value is not None:
|
||||
d[path[0]] = old_value.upper()
|
||||
else:
|
||||
sd = d.get(path[0], None)
|
||||
if sd is not None:
|
||||
dict_upper(sd, path[1:])
|
||||
|
||||
|
||||
def dict_rename(d, path, new_name):
|
||||
if isinstance(d, list):
|
||||
for i in range(len(d)):
|
||||
dict_rename(d[i], path, new_name)
|
||||
elif isinstance(d, dict):
|
||||
if len(path) == 1:
|
||||
old_value = d.pop(path[0], None)
|
||||
if old_value is not None:
|
||||
d[new_name] = old_value
|
||||
else:
|
||||
sd = d.get(path[0], None)
|
||||
if sd is not None:
|
||||
dict_rename(sd, path[1:], new_name)
|
||||
|
||||
|
||||
def dict_expand(d, path, outer_dict_name):
|
||||
if isinstance(d, list):
|
||||
for i in range(len(d)):
|
||||
dict_expand(d[i], path, outer_dict_name)
|
||||
elif isinstance(d, dict):
|
||||
if len(path) == 1:
|
||||
old_value = d.pop(path[0], None)
|
||||
if old_value is not None:
|
||||
d[outer_dict_name] = d.get(outer_dict_name, {})
|
||||
d[outer_dict_name][path[0]] = old_value
|
||||
else:
|
||||
sd = d.get(path[0], None)
|
||||
if sd is not None:
|
||||
dict_expand(sd, path[1:], outer_dict_name)
|
||||
|
||||
|
||||
def _snake_to_camel(snake, capitalize_first=False):
|
||||
if capitalize_first:
|
||||
return ''.join(x.capitalize() or '_' for x in snake.split('_'))
|
||||
else:
|
||||
return snake.split('_')[0] + ''.join(x.capitalize() or '_' for x in snake.split('_')[1:])
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution"""
|
||||
AzureRMClusters()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,321 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_hdinsightcluster_info
|
||||
version_added: "2.9"
|
||||
short_description: Get Azure HDInsight Cluster facts
|
||||
description:
|
||||
- Get facts of Azure HDInsight Cluster.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- Name of an Azure resource group.
|
||||
name:
|
||||
description:
|
||||
- HDInsight cluster name.
|
||||
tags:
|
||||
description:
|
||||
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get instance of HDInsight Cluster
|
||||
azure_rm_hdinsightcluster_info:
|
||||
resource_group: myResourceGroup
|
||||
name: myCluster
|
||||
|
||||
- name: List instances of HDInsight Cluster
|
||||
azure_rm_hdinsightcluster_info:
|
||||
resource_group: myResourceGroup
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
clusters:
|
||||
description:
|
||||
- A list of dictionaries containing facts for HDInsight Cluster.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- The unique resource identifier of the HDInsight Cluster.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.HDInsight/clusters/myCluster"
|
||||
resource_group:
|
||||
description:
|
||||
- Name of an Azure resource group.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myResourceGroup
|
||||
name:
|
||||
description:
|
||||
- The name of the HDInsight Cluster.
|
||||
returned: always
|
||||
type: str
|
||||
sample: testaccount
|
||||
location:
|
||||
description:
|
||||
- The location of the resource group to which the resource belongs.
|
||||
returned: always
|
||||
type: str
|
||||
sample: westus
|
||||
cluster_version:
|
||||
description:
|
||||
- The version of the cluster.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 3.6.1000.67
|
||||
os_type:
|
||||
description:
|
||||
- The type of operating system.
|
||||
returned: always
|
||||
type: str
|
||||
sample: linux
|
||||
tier:
|
||||
description:
|
||||
- The cluster tier.
|
||||
returned: always
|
||||
type: str
|
||||
sample: standard
|
||||
cluster_definition:
|
||||
description:
|
||||
- The cluster definition.
|
||||
contains:
|
||||
kind:
|
||||
description:
|
||||
- The type of cluster.
|
||||
returned: always
|
||||
type: str
|
||||
sample: spark
|
||||
compute_profile_roles:
|
||||
description:
|
||||
- The list of roles in the cluster.
|
||||
type: list
|
||||
contains:
|
||||
name:
|
||||
description:
|
||||
- The name of the role.
|
||||
returned: always
|
||||
type: str
|
||||
sample: headnode
|
||||
target_instance_count:
|
||||
description:
|
||||
- The instance count of the cluster.
|
||||
returned: always
|
||||
type: int
|
||||
sample: 2
|
||||
vm_size:
|
||||
description:
|
||||
- The size of the VM.
|
||||
returned: always
|
||||
type: str
|
||||
sample: Standard_D3
|
||||
linux_profile:
|
||||
description:
|
||||
- The Linux OS profile.
|
||||
contains:
|
||||
username:
|
||||
description:
|
||||
- User name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myuser
|
||||
connectivity_endpoints:
|
||||
description:
|
||||
- Cluster's connectivity endpoints.
|
||||
type: list
|
||||
contains:
|
||||
location:
|
||||
description:
|
||||
- Endpoint location.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myCluster-ssh.azurehdinsight.net
|
||||
name:
|
||||
description:
|
||||
- Endpoint name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: SSH
|
||||
port:
|
||||
description:
|
||||
- Endpoint port.
|
||||
returned: always
|
||||
type: int
|
||||
sample: 22
|
||||
protocol:
|
||||
description:
|
||||
- Endpoint protocol.
|
||||
returned: always
|
||||
type: str
|
||||
sample: TCP
|
||||
tags:
|
||||
description:
|
||||
- The tags of the resource.
|
||||
returned: always
|
||||
type: complex
|
||||
sample: {}
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
from ansible.module_utils.common.dict_transformations import _camel_to_snake
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.mgmt.hdinsight import HDInsightManagementClient
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMHDInsightclusterInfo(AzureRMModuleBase):
|
||||
def __init__(self):
|
||||
# define user inputs into argument
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str'
|
||||
),
|
||||
name=dict(
|
||||
type='str'
|
||||
),
|
||||
tags=dict(
|
||||
type='list'
|
||||
)
|
||||
)
|
||||
# store the results of the module operation
|
||||
self.results = dict(
|
||||
changed=False
|
||||
)
|
||||
self.mgmt_client = None
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.tags = None
|
||||
|
||||
super(AzureRMHDInsightclusterInfo, self).__init__(self.module_arg_spec, supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
is_old_facts = self.module._name == 'azure_rm_hdinsightcluster_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_hdinsightcluster_facts' module has been renamed to 'azure_rm_hdinsightcluster_info'",
|
||||
version='2.13')
|
||||
|
||||
for key in self.module_arg_spec:
|
||||
setattr(self, key, kwargs[key])
|
||||
self.mgmt_client = self.get_mgmt_svc_client(HDInsightManagementClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
|
||||
if self.name is not None:
|
||||
self.results['clusters'] = self.get()
|
||||
elif self.resource_group is not None:
|
||||
self.results['clusters'] = self.list_by_resource_group()
|
||||
else:
|
||||
self.results['clusters'] = self.list_all()
|
||||
return self.results
|
||||
|
||||
def get(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mgmt_client.clusters.get(resource_group_name=self.resource_group,
|
||||
cluster_name=self.name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get facts for HDInsight Cluster.')
|
||||
|
||||
if response and self.has_tags(response.tags, self.tags):
|
||||
results.append(self.format_response(response))
|
||||
|
||||
return results
|
||||
|
||||
def list_by_resource_group(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mgmt_client.clusters.list_by_resource_group(resource_group_name=self.resource_group)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get facts for HDInsight Cluster.')
|
||||
|
||||
if response is not None:
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(self.format_response(item))
|
||||
|
||||
return results
|
||||
|
||||
def list_all(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mgmt_client.clusters.list()
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get facts for HDInsight Cluster.')
|
||||
|
||||
if response is not None:
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(self.format_response(item))
|
||||
|
||||
return results
|
||||
|
||||
def format_response(self, item):
|
||||
d = item.as_dict()
|
||||
d = {
|
||||
'id': d.get('id'),
|
||||
'resource_group': self.parse_resource_to_dict(d.get('id')).get('resource_group'),
|
||||
'name': d.get('name', None),
|
||||
'location': d.get('location', '').replace(' ', '').lower(),
|
||||
|
||||
'cluster_version': d.get('properties', {}).get('cluster_version'),
|
||||
'os_type': d.get('properties', {}).get('os_type'),
|
||||
'tier': d.get('properties', {}).get('tier'),
|
||||
'cluster_definition': {
|
||||
'kind': d.get('properties', {}).get('cluster_definition', {}).get('kind')
|
||||
},
|
||||
'compute_profile_roles': [{
|
||||
'name': item.get('name'),
|
||||
'target_instance_count': item.get('target_instance_count'),
|
||||
'vm_size': item.get('hardware_profile', {}).get('vm_size'),
|
||||
'linux_profile': {
|
||||
'username': item.get('os_profile', {}).get('linux_operating_system_profile', {}).get('username')
|
||||
}
|
||||
} for item in d.get('properties', []).get('compute_profile', {}).get('roles', [])],
|
||||
'connectivity_endpoints': d.get('properties', {}).get('connectivity_endpoints'),
|
||||
'tags': d.get('tags', None)
|
||||
}
|
||||
|
||||
return d
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMHDInsightclusterInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,370 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2017 Yuwei Zhou, <yuwzho@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_image
|
||||
version_added: "2.5"
|
||||
short_description: Manage Azure image
|
||||
description:
|
||||
- Create, delete an image from virtual machine, blob uri, managed disk or snapshot.
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- Name of resource group.
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
- Name of the image.
|
||||
required: true
|
||||
source:
|
||||
description:
|
||||
- OS disk source from the same region.
|
||||
- It can be a virtual machine, OS disk blob URI, managed OS disk, or OS snapshot.
|
||||
- Each type of source except for blob URI can be given as resource id, name or a dict contains C(resource_group), C(name) and C(type).
|
||||
- If source type is blob URI, the source should be the full URI of the blob in string type.
|
||||
- If you specify the I(type) in a dict, acceptable value contains C(disks), C(virtual_machines) and C(snapshots).
|
||||
type: raw
|
||||
required: true
|
||||
data_disk_sources:
|
||||
description:
|
||||
- List of data disk sources, including unmanaged blob URI, managed disk id or name, or snapshot id or name.
|
||||
type: list
|
||||
location:
|
||||
description:
|
||||
- Location of the image. Derived from I(resource_group) if not specified.
|
||||
os_type:
|
||||
description: The OS type of image.
|
||||
choices:
|
||||
- Windows
|
||||
- Linux
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the image. Use C(present) to create or update a image and C(absent) to delete an image.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Yuwei Zhou (@yuwzho)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create an image from a virtual machine
|
||||
azure_rm_image:
|
||||
resource_group: myResourceGroup
|
||||
name: myImage
|
||||
source: myVirtualMachine
|
||||
|
||||
- name: Create an image from os disk
|
||||
azure_rm_image:
|
||||
resource_group: myResourceGroup
|
||||
name: myImage
|
||||
source: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Compute/disks/disk001
|
||||
data_disk_sources:
|
||||
- datadisk001
|
||||
- datadisk002
|
||||
os_type: Linux
|
||||
|
||||
- name: Create an image from os disk via dict
|
||||
azure_rm_image:
|
||||
resource_group: myResourceGroup
|
||||
name: myImage
|
||||
source:
|
||||
type: disks
|
||||
resource_group: myResourceGroup
|
||||
name: disk001
|
||||
data_disk_sources:
|
||||
- datadisk001
|
||||
- datadisk002
|
||||
os_type: Linux
|
||||
|
||||
- name: Delete an image
|
||||
azure_rm_image:
|
||||
state: absent
|
||||
resource_group: myResourceGroup
|
||||
name: myImage
|
||||
source: testvm001
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- Image resource path.
|
||||
type: str
|
||||
returned: success
|
||||
example: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Compute/images/myImage"
|
||||
''' # NOQA
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id
|
||||
|
||||
try:
|
||||
from msrestazure.tools import parse_resource_id
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMImage(AzureRMModuleBase):
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(type='str', required=True),
|
||||
name=dict(type='str', required=True),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent']),
|
||||
location=dict(type='str'),
|
||||
source=dict(type='raw'),
|
||||
data_disk_sources=dict(type='list', default=[]),
|
||||
os_type=dict(type='str', choices=['Windows', 'Linux'])
|
||||
)
|
||||
|
||||
self.results = dict(
|
||||
changed=False,
|
||||
id=None
|
||||
)
|
||||
|
||||
required_if = [
|
||||
('state', 'present', ['source'])
|
||||
]
|
||||
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.state = None
|
||||
self.location = None
|
||||
self.source = None
|
||||
self.data_disk_sources = None
|
||||
self.os_type = None
|
||||
|
||||
super(AzureRMImage, self).__init__(self.module_arg_spec, supports_check_mode=True, required_if=required_if)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
results = None
|
||||
changed = False
|
||||
image = None
|
||||
|
||||
if not self.location:
|
||||
# Set default location
|
||||
resource_group = self.get_resource_group(self.resource_group)
|
||||
self.location = resource_group.location
|
||||
|
||||
self.log('Fetching image {0}'.format(self.name))
|
||||
image = self.get_image()
|
||||
if image:
|
||||
self.check_provisioning_state(image, self.state)
|
||||
results = image.id
|
||||
# update is not supported except for tags
|
||||
update_tags, tags = self.update_tags(image.tags)
|
||||
if update_tags:
|
||||
changed = True
|
||||
self.tags = tags
|
||||
if self.state == 'absent':
|
||||
changed = True
|
||||
# the image does not exist and create a new one
|
||||
elif self.state == 'present':
|
||||
changed = True
|
||||
|
||||
self.results['changed'] = changed
|
||||
self.results['id'] = results
|
||||
|
||||
if changed:
|
||||
if self.state == 'present':
|
||||
image_instance = None
|
||||
# create from virtual machine
|
||||
vm = self.get_source_vm()
|
||||
if vm:
|
||||
if self.data_disk_sources:
|
||||
self.fail('data_disk_sources is not allowed when capturing image from vm')
|
||||
image_instance = self.compute_models.Image(location=self.location,
|
||||
source_virtual_machine=self.compute_models.SubResource(id=vm.id),
|
||||
tags=self.tags)
|
||||
else:
|
||||
if not self.os_type:
|
||||
self.fail('os_type is required to create the image')
|
||||
os_disk = self.create_os_disk()
|
||||
data_disks = self.create_data_disks()
|
||||
storage_profile = self.compute_models.ImageStorageProfile(os_disk=os_disk, data_disks=data_disks)
|
||||
image_instance = self.compute_models.Image(location=self.location, storage_profile=storage_profile, tags=self.tags)
|
||||
|
||||
# finally make the change if not check mode
|
||||
if not self.check_mode and image_instance:
|
||||
new_image = self.create_image(image_instance)
|
||||
self.results['id'] = new_image.id
|
||||
|
||||
elif self.state == 'absent':
|
||||
if not self.check_mode:
|
||||
# delete image
|
||||
self.delete_image()
|
||||
# the delete does not actually return anything. if no exception, then we'll assume it worked.
|
||||
self.results['id'] = None
|
||||
|
||||
return self.results
|
||||
|
||||
def resolve_storage_source(self, source):
|
||||
blob_uri = None
|
||||
disk = None
|
||||
snapshot = None
|
||||
# blob URI can only be given by str
|
||||
if isinstance(source, str) and source.lower().endswith('.vhd'):
|
||||
blob_uri = source
|
||||
return (blob_uri, disk, snapshot)
|
||||
|
||||
tokenize = dict()
|
||||
if isinstance(source, dict):
|
||||
tokenize = source
|
||||
elif isinstance(source, str):
|
||||
tokenize = parse_resource_id(source)
|
||||
else:
|
||||
self.fail("source parameter should be in type string or dictionary")
|
||||
if tokenize.get('type') == 'disks':
|
||||
disk = format_resource_id(tokenize['name'],
|
||||
tokenize.get('subscription_id') or self.subscription_id,
|
||||
'Microsoft.Compute',
|
||||
'disks',
|
||||
tokenize.get('resource_group') or self.resource_group)
|
||||
return (blob_uri, disk, snapshot)
|
||||
|
||||
if tokenize.get('type') == 'snapshots':
|
||||
snapshot = format_resource_id(tokenize['name'],
|
||||
tokenize.get('subscription_id') or self.subscription_id,
|
||||
'Microsoft.Compute',
|
||||
'snapshots',
|
||||
tokenize.get('resource_group') or self.resource_group)
|
||||
return (blob_uri, disk, snapshot)
|
||||
|
||||
# not a disk or snapshots
|
||||
if 'type' in tokenize:
|
||||
return (blob_uri, disk, snapshot)
|
||||
|
||||
# source can be name of snapshot or disk
|
||||
snapshot_instance = self.get_snapshot(tokenize.get('resource_group') or self.resource_group,
|
||||
tokenize['name'])
|
||||
if snapshot_instance:
|
||||
snapshot = snapshot_instance.id
|
||||
return (blob_uri, disk, snapshot)
|
||||
|
||||
disk_instance = self.get_disk(tokenize.get('resource_group') or self.resource_group,
|
||||
tokenize['name'])
|
||||
if disk_instance:
|
||||
disk = disk_instance.id
|
||||
return (blob_uri, disk, snapshot)
|
||||
|
||||
def create_os_disk(self):
|
||||
blob_uri, disk, snapshot = self.resolve_storage_source(self.source)
|
||||
snapshot_resource = self.compute_models.SubResource(id=snapshot) if snapshot else None
|
||||
managed_disk = self.compute_models.SubResource(id=disk) if disk else None
|
||||
return self.compute_models.ImageOSDisk(os_type=self.os_type,
|
||||
os_state=self.compute_models.OperatingSystemStateTypes.generalized,
|
||||
snapshot=snapshot_resource,
|
||||
managed_disk=managed_disk,
|
||||
blob_uri=blob_uri)
|
||||
|
||||
def create_data_disk(self, lun, source):
|
||||
blob_uri, disk, snapshot = self.resolve_storage_source(source)
|
||||
if blob_uri or disk or snapshot:
|
||||
snapshot_resource = self.compute_models.SubResource(id=snapshot) if snapshot else None
|
||||
managed_disk = self.compute_models.SubResource(id=disk) if disk else None
|
||||
return self.compute_models.ImageDataDisk(lun=lun,
|
||||
blob_uri=blob_uri,
|
||||
snapshot=snapshot_resource,
|
||||
managed_disk=managed_disk)
|
||||
|
||||
def create_data_disks(self):
|
||||
return list(filter(None, [self.create_data_disk(lun, source) for lun, source in enumerate(self.data_disk_sources)]))
|
||||
|
||||
def get_source_vm(self):
|
||||
# self.resource can be a vm (id/name/dict), or not a vm. return the vm iff it is an existing vm.
|
||||
resource = dict()
|
||||
if isinstance(self.source, dict):
|
||||
if self.source.get('type') != 'virtual_machines':
|
||||
return None
|
||||
resource = dict(type='virtualMachines',
|
||||
name=self.source['name'],
|
||||
resource_group=self.source.get('resource_group') or self.resource_group)
|
||||
elif isinstance(self.source, str):
|
||||
vm_resource_id = format_resource_id(self.source,
|
||||
self.subscription_id,
|
||||
'Microsoft.Compute',
|
||||
'virtualMachines',
|
||||
self.resource_group)
|
||||
resource = parse_resource_id(vm_resource_id)
|
||||
else:
|
||||
self.fail("Unsupported type of source parameter, please give string or dictionary")
|
||||
return self.get_vm(resource['resource_group'], resource['name']) if resource['type'] == 'virtualMachines' else None
|
||||
|
||||
def get_snapshot(self, resource_group, snapshot_name):
|
||||
return self._get_resource(self.compute_client.snapshots.get, resource_group, snapshot_name)
|
||||
|
||||
def get_disk(self, resource_group, disk_name):
|
||||
return self._get_resource(self.compute_client.disks.get, resource_group, disk_name)
|
||||
|
||||
def get_vm(self, resource_group, vm_name):
|
||||
return self._get_resource(self.compute_client.virtual_machines.get, resource_group, vm_name, 'instanceview')
|
||||
|
||||
def get_image(self):
|
||||
return self._get_resource(self.compute_client.images.get, self.resource_group, self.name)
|
||||
|
||||
def _get_resource(self, get_method, resource_group, name, expand=None):
|
||||
try:
|
||||
if expand:
|
||||
return get_method(resource_group, name, expand=expand)
|
||||
else:
|
||||
return get_method(resource_group, name)
|
||||
except CloudError as cloud_err:
|
||||
# Return None iff the resource is not found
|
||||
if cloud_err.status_code == 404:
|
||||
self.log('{0}'.format(str(cloud_err)))
|
||||
return None
|
||||
self.fail('Error: failed to get resource {0} - {1}'.format(name, str(cloud_err)))
|
||||
except Exception as exc:
|
||||
self.fail('Error: failed to get resource {0} - {1}'.format(name, str(exc)))
|
||||
|
||||
def create_image(self, image):
|
||||
try:
|
||||
poller = self.compute_client.images.create_or_update(self.resource_group, self.name, image)
|
||||
new_image = self.get_poller_result(poller)
|
||||
except Exception as exc:
|
||||
self.fail("Error creating image {0} - {1}".format(self.name, str(exc)))
|
||||
self.check_provisioning_state(new_image)
|
||||
return new_image
|
||||
|
||||
def delete_image(self):
|
||||
self.log('Deleting image {0}'.format(self.name))
|
||||
try:
|
||||
poller = self.compute_client.images.delete(self.resource_group, self.name)
|
||||
result = self.get_poller_result(poller)
|
||||
except Exception as exc:
|
||||
self.fail("Error deleting image {0} - {1}".format(self.name, str(exc)))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMImage()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,307 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_image_info
|
||||
|
||||
version_added: "2.9"
|
||||
|
||||
short_description: Get facts about azure custom images
|
||||
|
||||
description:
|
||||
- List azure custom images. The images can be listed where scope of listing can be based on subscription, resource group, name or tags.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- Name of resource group.
|
||||
name:
|
||||
description:
|
||||
- Name of the image to filter from existing images.
|
||||
tags:
|
||||
description:
|
||||
- List of tags to be matched.
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Madhura Naniwadekar (@Madhura-CSI)
|
||||
'''
|
||||
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: List images with name
|
||||
azure_rm_image_info:
|
||||
name: test-image
|
||||
resource_group: myResourceGroup
|
||||
|
||||
- name: List images by resource group
|
||||
azure_rm_image_info:
|
||||
resource_group: myResourceGroup
|
||||
tags:
|
||||
- testing
|
||||
- foo:bar
|
||||
|
||||
- name: List all available images under current subscription
|
||||
azure_rm_image_info:
|
||||
'''
|
||||
|
||||
|
||||
RETURN = '''
|
||||
images:
|
||||
description:
|
||||
- List of image dicts.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- Id of the image.
|
||||
returned: always
|
||||
type: str
|
||||
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/images/xx
|
||||
name:
|
||||
description:
|
||||
- Name of the image.
|
||||
returned: always
|
||||
type: str
|
||||
resource_group:
|
||||
description:
|
||||
- Resource group of the image.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myResourceGroup
|
||||
location:
|
||||
description:
|
||||
- Location of the image.
|
||||
returned: always
|
||||
type: str
|
||||
os_disk:
|
||||
description:
|
||||
- Id of os disk for image.
|
||||
type: str
|
||||
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/disks/xx
|
||||
os_disk_caching:
|
||||
description:
|
||||
- Specifies caching requirements for the image.
|
||||
returned: always
|
||||
type: str
|
||||
os_state:
|
||||
description:
|
||||
- Specifies image operating system state. Possible values are C(Generalized) or C(Specialized).
|
||||
returned: always
|
||||
type: str
|
||||
sample: Generalized
|
||||
os_storage_account_type:
|
||||
description:
|
||||
- Specifies the storage account type for the managed disk.
|
||||
type: str
|
||||
sample: Standard_LRS
|
||||
os_type:
|
||||
description:
|
||||
- Type of OS for image.
|
||||
returned: always
|
||||
type: str
|
||||
sample: Linux
|
||||
provisioning_state:
|
||||
description:
|
||||
- State of image.
|
||||
returned: always
|
||||
type: str
|
||||
sample: Succeeded
|
||||
source:
|
||||
description:
|
||||
- Resource id of source VM from which the image is created.
|
||||
type: str
|
||||
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/xx
|
||||
tags:
|
||||
description:
|
||||
- Dictionary of tags associated with the image.
|
||||
type: complex
|
||||
data_disks:
|
||||
description:
|
||||
- List of data disks associated with the image.
|
||||
type: complex
|
||||
returned: always
|
||||
contains:
|
||||
caching:
|
||||
description:
|
||||
- Type of caching of data disk.
|
||||
sample: read_only
|
||||
disk_size_gb:
|
||||
description:
|
||||
- Specifies the size of empty data disks in gigabytes.
|
||||
returned: always
|
||||
type: int
|
||||
sample: 50
|
||||
lun:
|
||||
description:
|
||||
- Specifies the logical unit number of the data disk.
|
||||
returned: always
|
||||
type: int
|
||||
sample: 0
|
||||
storage_account_type:
|
||||
description:
|
||||
- Specifies the storage account type for the managed disk data disk.
|
||||
type: str
|
||||
sample: Standard_LRS
|
||||
managed_disk_id:
|
||||
description:
|
||||
- Id of managed disk.
|
||||
type: str
|
||||
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/disks/xx
|
||||
blob_uri:
|
||||
description:
|
||||
- The virtual hard disk.
|
||||
'''
|
||||
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except Exception:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
|
||||
AZURE_ENUM_MODULES = ['azure.mgmt.compute.models']
|
||||
|
||||
|
||||
class AzureRMImageInfo(AzureRMModuleBase):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(type='str'),
|
||||
name=dict(type='str'),
|
||||
tags=dict(type='list')
|
||||
)
|
||||
|
||||
self.results = dict(
|
||||
changed=False
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.format = None
|
||||
self.tags = None
|
||||
|
||||
super(AzureRMImageInfo, self).__init__(
|
||||
derived_arg_spec=self.module_arg_spec,
|
||||
supports_tags=False,
|
||||
facts_module=True
|
||||
)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
is_old_facts = self.module._name == 'azure_rm_image_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_image_facts' module has been renamed to 'azure_rm_image_info'", version='2.13')
|
||||
|
||||
for key in self.module_arg_spec:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
if self.name and self.resource_group:
|
||||
self.results['images'] = self.get_image(self.resource_group, self.name)
|
||||
elif self.name and not self.resource_group:
|
||||
self.results['images'] = self.list_images(self.name)
|
||||
elif not self.name and self.resource_group:
|
||||
self.results['images'] = self.list_images_by_resource_group(self.resource_group)
|
||||
elif not self.name and not self.resource_group:
|
||||
self.results['images'] = self.list_images()
|
||||
return self.results
|
||||
|
||||
def get_image(self, resource_group, image_name):
|
||||
'''
|
||||
Returns image details based on its name
|
||||
'''
|
||||
|
||||
self.log('Get properties for {0}'.format(self.name))
|
||||
|
||||
result = []
|
||||
item = None
|
||||
try:
|
||||
item = self.compute_client.images.get(resource_group, image_name)
|
||||
except CloudError as exc:
|
||||
self.fail('Failed to list images - {0}'.format(str(exc)))
|
||||
|
||||
result = [self.format_item(item)]
|
||||
return result
|
||||
|
||||
def list_images_by_resource_group(self, resource_group):
|
||||
'''
|
||||
Returns image details based on its resource group
|
||||
'''
|
||||
|
||||
self.log('List images filtered by resource group')
|
||||
response = None
|
||||
try:
|
||||
response = self.compute_client.images.list_by_resource_group(resource_group)
|
||||
except CloudError as exc:
|
||||
self.fail("Failed to list images: {0}".format(str(exc)))
|
||||
|
||||
return [self.format_item(x) for x in response if self.has_tags(x.tags, self.tags)] if response else []
|
||||
|
||||
def list_images(self, image_name=None):
|
||||
'''
|
||||
Returns image details in current subscription
|
||||
'''
|
||||
|
||||
self.log('List images within current subscription')
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.compute_client.images.list()
|
||||
except CloudError as exc:
|
||||
self.fail("Failed to list all images: {0}".format(str(exc)))
|
||||
|
||||
results = [self.format_item(x) for x in response if self.has_tags(x.tags, self.tags)] if response else []
|
||||
if image_name:
|
||||
results = [result for result in results if result['name'] == image_name]
|
||||
return results
|
||||
|
||||
def format_item(self, item):
|
||||
d = item.as_dict()
|
||||
|
||||
for data_disk in d['storage_profile']['data_disks']:
|
||||
if 'managed_disk' in data_disk.keys():
|
||||
data_disk['managed_disk_id'] = data_disk['managed_disk']['id']
|
||||
data_disk.pop('managed_disk', None)
|
||||
|
||||
d = {
|
||||
'id': d['id'],
|
||||
'resource_group': d['id'].split('/')[4],
|
||||
'name': d['name'],
|
||||
'location': d['location'],
|
||||
'tags': d.get('tags'),
|
||||
'source': d['source_virtual_machine']['id'] if 'source_virtual_machine' in d.keys() else None,
|
||||
'os_type': d['storage_profile']['os_disk']['os_type'],
|
||||
'os_state': d['storage_profile']['os_disk']['os_state'],
|
||||
'os_disk_caching': d['storage_profile']['os_disk']['caching'],
|
||||
'os_storage_account_type': d['storage_profile']['os_disk']['storage_account_type'],
|
||||
'os_disk': d['storage_profile']['os_disk']['managed_disk']['id'] if 'managed_disk' in d['storage_profile']['os_disk'].keys() else None,
|
||||
'os_blob_uri': d['storage_profile']['os_disk']['blob_uri'] if 'blob_uri' in d['storage_profile']['os_disk'].keys() else None,
|
||||
'provisioning_state': d['provisioning_state'],
|
||||
'data_disks': d['storage_profile']['data_disks']
|
||||
}
|
||||
return d
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMImageInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,472 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Yuwei Zhou, <yuwzho@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_iotdevice
|
||||
version_added: "2.9"
|
||||
short_description: Manage Azure IoT hub device
|
||||
description:
|
||||
- Create, delete an Azure IoT hub device.
|
||||
options:
|
||||
hub:
|
||||
description:
|
||||
- Name of IoT Hub.
|
||||
type: str
|
||||
required: true
|
||||
hub_policy_name:
|
||||
description:
|
||||
- Policy name of the IoT Hub which will be used to query from IoT hub.
|
||||
- This policy should have 'RegistryWrite, ServiceConnect, DeviceConnect' accesses. You may get 401 error when you lack any of these.
|
||||
type: str
|
||||
required: true
|
||||
hub_policy_key:
|
||||
description:
|
||||
- Key of the I(hub_policy_name).
|
||||
type: str
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
- Name of the IoT hub device identity.
|
||||
type: str
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- State of the IoT hub. Use C(present) to create or update an IoT hub device and C(absent) to delete an IoT hub device.
|
||||
type: str
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
auth_method:
|
||||
description:
|
||||
- The authorization type an entity is to be created with.
|
||||
type: str
|
||||
choices:
|
||||
- sas
|
||||
- certificate_authority
|
||||
- self_signed
|
||||
default: sas
|
||||
primary_key:
|
||||
description:
|
||||
- Explicit self-signed certificate thumbprint to use for primary key.
|
||||
- Explicit Shared Private Key to use for primary key.
|
||||
type: str
|
||||
aliases:
|
||||
- primary_thumbprint
|
||||
secondary_key:
|
||||
description:
|
||||
- Explicit self-signed certificate thumbprint to use for secondary key.
|
||||
- Explicit Shared Private Key to use for secondary key.
|
||||
type: str
|
||||
aliases:
|
||||
- secondary_thumbprint
|
||||
status:
|
||||
description:
|
||||
- Set device status upon creation.
|
||||
type: bool
|
||||
edge_enabled:
|
||||
description:
|
||||
- Flag indicating edge enablement.
|
||||
- Not supported in IoT Hub with Basic tier.
|
||||
type: bool
|
||||
twin_tags:
|
||||
description:
|
||||
- A section that the solution back end can read from and write to.
|
||||
- Tags are not visible to device apps.
|
||||
- "The tag can be nested dictionary, '.', '$', '#', ' ' is not allowed in the key."
|
||||
- List is not supported.
|
||||
- Not supported in IoT Hub with Basic tier.
|
||||
type: dict
|
||||
desired:
|
||||
description:
|
||||
- Used along with reported properties to synchronize device configuration or conditions.
|
||||
- "The tag can be nested dictionary, '.', '$', '#', ' ' is not allowed in the key."
|
||||
- List is not supported.
|
||||
- Not supported in IoT Hub with Basic tier.
|
||||
type: dict
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Yuwei Zhou (@yuwzho)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create simplest Azure IoT Hub device
|
||||
azure_rm_iotdevice:
|
||||
hub: myHub
|
||||
name: Testing
|
||||
hub_policy_name: iothubowner
|
||||
hub_policy_key: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
|
||||
|
||||
- name: Create Azure IoT Edge device
|
||||
azure_rm_iotdevice:
|
||||
hub: myHub
|
||||
name: Testing
|
||||
hub_policy_name: iothubowner
|
||||
hub_policy_key: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
|
||||
edge_enabled: yes
|
||||
|
||||
- name: Create Azure IoT Hub device with device twin properties and tag
|
||||
azure_rm_iotdevice:
|
||||
hub: myHub
|
||||
name: Testing
|
||||
hub_policy_name: iothubowner
|
||||
hub_policy_key: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
|
||||
twin_tags:
|
||||
location:
|
||||
country: US
|
||||
city: Redmond
|
||||
sensor: humidity
|
||||
desired:
|
||||
period: 100
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
device:
|
||||
description:
|
||||
- IoT Hub device.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"authentication": {
|
||||
"symmetricKey": {
|
||||
"primaryKey": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
|
||||
"secondaryKey": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
|
||||
},
|
||||
"type": "sas",
|
||||
"x509Thumbprint": {
|
||||
"primaryThumbprint": null,
|
||||
"secondaryThumbprint": null
|
||||
}
|
||||
},
|
||||
"capabilities": {
|
||||
"iotEdge": false
|
||||
},
|
||||
"changed": true,
|
||||
"cloudToDeviceMessageCount": 0,
|
||||
"connectionState": "Disconnected",
|
||||
"connectionStateUpdatedTime": "0001-01-01T00:00:00",
|
||||
"deviceId": "Testing",
|
||||
"etag": "NzA2NjU2ODc=",
|
||||
"failed": false,
|
||||
"generationId": "636903014505613307",
|
||||
"lastActivityTime": "0001-01-01T00:00:00",
|
||||
"modules": [
|
||||
{
|
||||
"authentication": {
|
||||
"symmetricKey": {
|
||||
"primaryKey": "XXXXXXXXXXXXXXXXXXX",
|
||||
"secondaryKey": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
|
||||
},
|
||||
"type": "sas",
|
||||
"x509Thumbprint": {
|
||||
"primaryThumbprint": null,
|
||||
"secondaryThumbprint": null
|
||||
}
|
||||
},
|
||||
"cloudToDeviceMessageCount": 0,
|
||||
"connectionState": "Disconnected",
|
||||
"connectionStateUpdatedTime": "0001-01-01T00:00:00",
|
||||
"deviceId": "testdevice",
|
||||
"etag": "MjgxOTE5ODE4",
|
||||
"generationId": "636903840872788074",
|
||||
"lastActivityTime": "0001-01-01T00:00:00",
|
||||
"managedBy": null,
|
||||
"moduleId": "test"
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"desired": {
|
||||
"$metadata": {
|
||||
"$lastUpdated": "2019-04-10T05:00:46.2702079Z",
|
||||
"$lastUpdatedVersion": 8,
|
||||
"period": {
|
||||
"$lastUpdated": "2019-04-10T05:00:46.2702079Z",
|
||||
"$lastUpdatedVersion": 8
|
||||
}
|
||||
},
|
||||
"$version": 1,
|
||||
"period": 100
|
||||
},
|
||||
"reported": {
|
||||
"$metadata": {
|
||||
"$lastUpdated": "2019-04-08T06:24:10.5613307Z"
|
||||
},
|
||||
"$version": 1
|
||||
}
|
||||
},
|
||||
"status": "enabled",
|
||||
"statusReason": null,
|
||||
"statusUpdatedTime": "0001-01-01T00:00:00",
|
||||
"tags": {
|
||||
"location": {
|
||||
"country": "us",
|
||||
"city": "Redmond"
|
||||
},
|
||||
"sensor": "humidity"
|
||||
}
|
||||
}
|
||||
''' # NOQA
|
||||
|
||||
import json
|
||||
import copy
|
||||
import re
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id
|
||||
from ansible.module_utils.common.dict_transformations import _snake_to_camel
|
||||
|
||||
try:
|
||||
from msrestazure.tools import parse_resource_id
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMIoTDevice(AzureRMModuleBase):
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.module_arg_spec = dict(
|
||||
name=dict(type='str', required=True),
|
||||
hub_policy_name=dict(type='str', required=True),
|
||||
hub_policy_key=dict(type='str', required=True),
|
||||
hub=dict(type='str', required=True),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent']),
|
||||
status=dict(type='bool'),
|
||||
edge_enabled=dict(type='bool'),
|
||||
twin_tags=dict(type='dict'),
|
||||
desired=dict(type='dict'),
|
||||
auth_method=dict(type='str', choices=['self_signed', 'sas', 'certificate_authority'], default='sas'),
|
||||
primary_key=dict(type='str', no_log=True, aliases=['primary_thumbprint']),
|
||||
secondary_key=dict(type='str', no_log=True, aliases=['secondary_thumbprint'])
|
||||
)
|
||||
|
||||
self.results = dict(
|
||||
changed=False,
|
||||
id=None
|
||||
)
|
||||
|
||||
self.name = None
|
||||
self.hub = None
|
||||
self.hub_policy_key = None
|
||||
self.hub_policy_name = None
|
||||
self.state = None
|
||||
self.status = None
|
||||
self.edge_enabled = None
|
||||
self.twin_tags = None
|
||||
self.desired = None
|
||||
self.auth_method = None
|
||||
self.primary_key = None
|
||||
self.secondary_key = None
|
||||
|
||||
required_if = [
|
||||
['auth_method', 'self_signed', ['certificate_authority']]
|
||||
]
|
||||
|
||||
self._base_url = None
|
||||
self._mgmt_client = None
|
||||
self.query_parameters = {
|
||||
'api-version': '2018-06-30'
|
||||
}
|
||||
self.header_parameters = {
|
||||
'Content-Type': 'application/json; charset=utf-8',
|
||||
'accept-language': 'en-US'
|
||||
}
|
||||
super(AzureRMIoTDevice, self).__init__(self.module_arg_spec, supports_check_mode=True, required_if=required_if)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
for key in self.module_arg_spec.keys():
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
self._base_url = '{0}.azure-devices.net'.format(self.hub)
|
||||
config = {
|
||||
'base_url': self._base_url,
|
||||
'key': self.hub_policy_key,
|
||||
'policy': self.hub_policy_name
|
||||
}
|
||||
self._mgmt_client = self.get_data_svc_client(**config)
|
||||
|
||||
changed = False
|
||||
|
||||
device = self.get_device()
|
||||
if self.state == 'present':
|
||||
if not device:
|
||||
changed = True
|
||||
auth = {'type': _snake_to_camel(self.auth_method)}
|
||||
if self.auth_method == 'self_signed':
|
||||
auth['x509Thumbprint'] = {
|
||||
'primaryThumbprint': self.primary_key,
|
||||
'secondaryThumbprint': self.secondary_key
|
||||
}
|
||||
elif self.auth_method == 'sas':
|
||||
auth['symmetricKey'] = {
|
||||
'primaryKey': self.primary_key,
|
||||
'secondaryKey': self.secondary_key
|
||||
}
|
||||
device = {
|
||||
'deviceId': self.name,
|
||||
'capabilities': {'iotEdge': self.edge_enabled or False},
|
||||
'authentication': auth
|
||||
}
|
||||
if self.status is not None and not self.status:
|
||||
device['status'] = 'disabled'
|
||||
else:
|
||||
if self.edge_enabled is not None and self.edge_enabled != device['capabilities']['iotEdge']:
|
||||
changed = True
|
||||
device['capabilities']['iotEdge'] = self.edge_enabled
|
||||
if self.status is not None:
|
||||
status = 'enabled' if self.status else 'disabled'
|
||||
if status != device['status']:
|
||||
changed = True
|
||||
device['status'] = status
|
||||
if changed and not self.check_mode:
|
||||
device = self.create_or_update_device(device)
|
||||
twin = self.get_twin()
|
||||
if twin:
|
||||
if not twin.get('tags'):
|
||||
twin['tags'] = dict()
|
||||
twin_change = False
|
||||
if self.twin_tags and not self.is_equal(self.twin_tags, twin['tags']):
|
||||
twin_change = True
|
||||
if self.desired and not self.is_equal(self.desired, twin['properties']['desired']):
|
||||
twin_change = True
|
||||
if twin_change and not self.check_mode:
|
||||
self.update_twin(twin)
|
||||
changed = changed or twin_change
|
||||
device['tags'] = twin.get('tags') or dict()
|
||||
device['properties'] = twin['properties']
|
||||
device['modules'] = self.list_device_modules()
|
||||
elif self.twin_tags or self.desired:
|
||||
self.fail("Device twin is not supported in IoT Hub with basic tier.")
|
||||
elif device:
|
||||
if not self.check_mode:
|
||||
self.delete_device(device['etag'])
|
||||
changed = True
|
||||
device = None
|
||||
self.results = device or dict()
|
||||
self.results['changed'] = changed
|
||||
return self.results
|
||||
|
||||
def is_equal(self, updated, original):
|
||||
changed = False
|
||||
if not isinstance(updated, dict):
|
||||
self.fail('The Property or Tag should be a dict')
|
||||
for key in updated.keys():
|
||||
if re.search(r'[.|$|#|\s]', key):
|
||||
self.fail("Property or Tag name has invalid characters: '.', '$', '#' or ' '. Got '{0}'".format(key))
|
||||
original_value = original.get(key)
|
||||
updated_value = updated[key]
|
||||
if isinstance(updated_value, dict):
|
||||
if not isinstance(original_value, dict):
|
||||
changed = True
|
||||
original[key] = updated_value
|
||||
elif not self.is_equal(updated_value, original_value):
|
||||
changed = True
|
||||
elif original_value != updated_value:
|
||||
changed = True
|
||||
original[key] = updated_value
|
||||
return not changed
|
||||
|
||||
def create_or_update_device(self, device):
|
||||
try:
|
||||
url = '/devices/{0}'.format(self.name)
|
||||
headers = copy.copy(self.header_parameters)
|
||||
if device.get('etag'):
|
||||
headers['If-Match'] = '"{0}"'.format(device['etag'])
|
||||
request = self._mgmt_client.put(url, self.query_parameters)
|
||||
response = self._mgmt_client.send(request=request, headers=headers, content=device)
|
||||
if response.status_code not in [200, 201, 202]:
|
||||
raise CloudError(response)
|
||||
return json.loads(response.text)
|
||||
except Exception as exc:
|
||||
if exc.status_code in [403] and self.edge_enabled:
|
||||
self.fail('Edge device is not supported in IoT Hub with Basic tier.')
|
||||
else:
|
||||
self.fail('Error when creating or updating IoT Hub device {0}: {1}'.format(self.name, exc.message or str(exc)))
|
||||
|
||||
def delete_device(self, etag):
|
||||
try:
|
||||
url = '/devices/{0}'.format(self.name)
|
||||
headers = copy.copy(self.header_parameters)
|
||||
headers['If-Match'] = '"{0}"'.format(etag)
|
||||
request = self._mgmt_client.delete(url, self.query_parameters)
|
||||
response = self._mgmt_client.send(request=request, headers=headers)
|
||||
if response.status_code not in [204]:
|
||||
raise CloudError(response)
|
||||
except Exception as exc:
|
||||
self.fail('Error when deleting IoT Hub device {0}: {1}'.format(self.name, exc.message or str(exc)))
|
||||
|
||||
def get_device(self):
|
||||
try:
|
||||
url = '/devices/{0}'.format(self.name)
|
||||
device = self._https_get(url, self.query_parameters, self.header_parameters)
|
||||
return device
|
||||
except Exception as exc:
|
||||
if exc.status_code in [404]:
|
||||
return None
|
||||
else:
|
||||
self.fail('Error when getting IoT Hub device {0}: {1}'.format(self.name, exc.message or str(exc)))
|
||||
|
||||
def get_twin(self):
|
||||
try:
|
||||
url = '/twins/{0}'.format(self.name)
|
||||
return self._https_get(url, self.query_parameters, self.header_parameters)
|
||||
except Exception as exc:
|
||||
if exc.status_code in [403]:
|
||||
# The Basic sku has nothing to to with twin
|
||||
return None
|
||||
else:
|
||||
self.fail('Error when getting IoT Hub device {0} twin: {1}'.format(self.name, exc.message or str(exc)))
|
||||
|
||||
def update_twin(self, twin):
|
||||
try:
|
||||
url = '/twins/{0}'.format(self.name)
|
||||
headers = copy.copy(self.header_parameters)
|
||||
headers['If-Match'] = '"{0}"'.format(twin['etag'])
|
||||
request = self._mgmt_client.patch(url, self.query_parameters)
|
||||
response = self._mgmt_client.send(request=request, headers=headers, content=twin)
|
||||
if response.status_code not in [200]:
|
||||
raise CloudError(response)
|
||||
return json.loads(response.text)
|
||||
except Exception as exc:
|
||||
self.fail('Error when creating or updating IoT Hub device twin {0}: {1}'.format(self.name, exc.message or str(exc)))
|
||||
|
||||
def list_device_modules(self):
|
||||
try:
|
||||
url = '/devices/{0}/modules'.format(self.name)
|
||||
return self._https_get(url, self.query_parameters, self.header_parameters)
|
||||
except Exception as exc:
|
||||
self.fail('Error when listing IoT Hub device {0} modules: {1}'.format(self.name, exc.message or str(exc)))
|
||||
|
||||
def _https_get(self, url, query_parameters, header_parameters):
|
||||
request = self._mgmt_client.get(url, query_parameters)
|
||||
response = self._mgmt_client.send(request=request, headers=header_parameters, content=None)
|
||||
if response.status_code not in [200]:
|
||||
raise CloudError(response)
|
||||
return json.loads(response.text)
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMIoTDevice()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,313 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Yuwei Zhou, <yuwzho@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_iotdevice_info
|
||||
version_added: "2.9"
|
||||
short_description: Facts of Azure IoT hub device
|
||||
description:
|
||||
- Query, get Azure IoT hub device.
|
||||
options:
|
||||
hub:
|
||||
description:
|
||||
- Name of IoT Hub.
|
||||
type: str
|
||||
required: true
|
||||
hub_policy_name:
|
||||
description:
|
||||
- Policy name of the IoT Hub which will be used to query from IoT hub.
|
||||
- This policy should have at least 'Registry Read' access.
|
||||
type: str
|
||||
required: true
|
||||
hub_policy_key:
|
||||
description:
|
||||
- Key of the I(hub_policy_name).
|
||||
type: str
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
- Name of the IoT hub device identity.
|
||||
type: str
|
||||
aliases:
|
||||
- device_id
|
||||
module_id:
|
||||
description:
|
||||
- Name of the IoT hub device module.
|
||||
- Must use with I(device_id) defined.
|
||||
type: str
|
||||
query:
|
||||
description:
|
||||
- Query an IoT hub to retrieve information regarding device twins using a SQL-like language.
|
||||
- "See U(https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-devguide-query-language)."
|
||||
type: str
|
||||
top:
|
||||
description:
|
||||
- Used when I(name) not defined.
|
||||
- List the top n devices in the query.
|
||||
type: int
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Yuwei Zhou (@yuwzho)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get the details of a device
|
||||
azure_rm_iotdevice_info:
|
||||
name: Testing
|
||||
hub: MyIoTHub
|
||||
hub_policy_name: registryRead
|
||||
hub_policy_key: XXXXXXXXXXXXXXXXXXXX
|
||||
|
||||
- name: Query all device modules in an IoT Hub
|
||||
azure_rm_iotdevice_info:
|
||||
query: "SELECT * FROM devices.modules"
|
||||
hub: MyIoTHub
|
||||
hub_policy_name: registryRead
|
||||
hub_policy_key: XXXXXXXXXXXXXXXXXXXX
|
||||
|
||||
- name: List all devices in an IoT Hub
|
||||
azure_rm_iotdevice_info:
|
||||
hub: MyIoTHub
|
||||
hub_policy_name: registryRead
|
||||
hub_policy_key: XXXXXXXXXXXXXXXXXXXX
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
iot_devices:
|
||||
description:
|
||||
- IoT Hub device.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"authentication": {
|
||||
"symmetricKey": {
|
||||
"primaryKey": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
|
||||
"secondaryKey": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
|
||||
},
|
||||
"type": "sas",
|
||||
"x509Thumbprint": {
|
||||
"primaryThumbprint": null,
|
||||
"secondaryThumbprint": null
|
||||
}
|
||||
},
|
||||
"capabilities": {
|
||||
"iotEdge": false
|
||||
},
|
||||
"changed": true,
|
||||
"cloudToDeviceMessageCount": 0,
|
||||
"connectionState": "Disconnected",
|
||||
"connectionStateUpdatedTime": "0001-01-01T00:00:00",
|
||||
"deviceId": "Testing",
|
||||
"etag": "NzA2NjU2ODc=",
|
||||
"failed": false,
|
||||
"generationId": "636903014505613307",
|
||||
"lastActivityTime": "0001-01-01T00:00:00",
|
||||
"modules": [
|
||||
{
|
||||
"authentication": {
|
||||
"symmetricKey": {
|
||||
"primaryKey": "XXXXXXXXXXXXXXXXXXX",
|
||||
"secondaryKey": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
|
||||
},
|
||||
"type": "sas",
|
||||
"x509Thumbprint": {
|
||||
"primaryThumbprint": null,
|
||||
"secondaryThumbprint": null
|
||||
}
|
||||
},
|
||||
"cloudToDeviceMessageCount": 0,
|
||||
"connectionState": "Disconnected",
|
||||
"connectionStateUpdatedTime": "0001-01-01T00:00:00",
|
||||
"deviceId": "testdevice",
|
||||
"etag": "MjgxOTE5ODE4",
|
||||
"generationId": "636903840872788074",
|
||||
"lastActivityTime": "0001-01-01T00:00:00",
|
||||
"managedBy": null,
|
||||
"moduleId": "test"
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"desired": {
|
||||
"$metadata": {
|
||||
"$lastUpdated": "2019-04-10T05:00:46.2702079Z",
|
||||
"$lastUpdatedVersion": 8,
|
||||
"period": {
|
||||
"$lastUpdated": "2019-04-10T05:00:46.2702079Z",
|
||||
"$lastUpdatedVersion": 8
|
||||
}
|
||||
},
|
||||
"$version": 1,
|
||||
"period": 100
|
||||
},
|
||||
"reported": {
|
||||
"$metadata": {
|
||||
"$lastUpdated": "2019-04-08T06:24:10.5613307Z"
|
||||
},
|
||||
"$version": 1
|
||||
}
|
||||
},
|
||||
"status": "enabled",
|
||||
"statusReason": null,
|
||||
"statusUpdatedTime": "0001-01-01T00:00:00",
|
||||
"tags": {
|
||||
"location": {
|
||||
"country": "us",
|
||||
"city": "Redmond"
|
||||
},
|
||||
"sensor": "humidity"
|
||||
}
|
||||
}
|
||||
''' # NOQA
|
||||
|
||||
import json
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id
|
||||
from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake
|
||||
|
||||
try:
|
||||
from msrestazure.tools import parse_resource_id
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMIoTDeviceFacts(AzureRMModuleBase):
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.module_arg_spec = dict(
|
||||
name=dict(type='str', aliases=['device_id']),
|
||||
module_id=dict(type='str'),
|
||||
query=dict(type='str'),
|
||||
hub=dict(type='str', required=True),
|
||||
hub_policy_name=dict(type='str', required=True),
|
||||
hub_policy_key=dict(type='str', required=True),
|
||||
top=dict(type='int')
|
||||
)
|
||||
|
||||
self.results = dict(
|
||||
changed=False,
|
||||
iot_devices=[]
|
||||
)
|
||||
|
||||
self.name = None
|
||||
self.module_id = None
|
||||
self.hub = None
|
||||
self.hub_policy_name = None
|
||||
self.hub_policy_key = None
|
||||
self.top = None
|
||||
|
||||
self._mgmt_client = None
|
||||
self._base_url = None
|
||||
self.query_parameters = {
|
||||
'api-version': '2018-06-30'
|
||||
}
|
||||
self.header_parameters = {
|
||||
'Content-Type': 'application/json; charset=utf-8',
|
||||
'accept-language': 'en-US'
|
||||
}
|
||||
super(AzureRMIoTDeviceFacts, self).__init__(self.module_arg_spec, supports_check_mode=True)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
for key in self.module_arg_spec.keys():
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
self._base_url = '{0}.azure-devices.net'.format(self.hub)
|
||||
config = {
|
||||
'base_url': self._base_url,
|
||||
'key': self.hub_policy_key,
|
||||
'policy': self.hub_policy_name
|
||||
}
|
||||
if self.top:
|
||||
self.query_parameters['top'] = self.top
|
||||
self._mgmt_client = self.get_data_svc_client(**config)
|
||||
|
||||
response = []
|
||||
if self.module_id:
|
||||
response = [self.get_device_module()]
|
||||
elif self.name:
|
||||
response = [self.get_device()]
|
||||
elif self.query:
|
||||
response = self.hub_query()
|
||||
else:
|
||||
response = self.list_devices()
|
||||
|
||||
self.results['iot_devices'] = response
|
||||
return self.results
|
||||
|
||||
def hub_query(self):
|
||||
try:
|
||||
url = '/devices/query'
|
||||
request = self._mgmt_client.post(url, self.query_parameters)
|
||||
query = {
|
||||
'query': self.query
|
||||
}
|
||||
response = self._mgmt_client.send(request=request, headers=self.header_parameters, content=query)
|
||||
if response.status_code not in [200]:
|
||||
raise CloudError(response)
|
||||
return json.loads(response.text)
|
||||
except Exception as exc:
|
||||
self.fail('Error when running query "{0}" in IoT Hub {1}: {2}'.format(self.query, self.hub, exc.message or str(exc)))
|
||||
|
||||
def get_device(self):
|
||||
try:
|
||||
url = '/devices/{0}'.format(self.name)
|
||||
device = self._https_get(url, self.query_parameters, self.header_parameters)
|
||||
device['modules'] = self.list_device_modules()
|
||||
return device
|
||||
except Exception as exc:
|
||||
self.fail('Error when getting IoT Hub device {0}: {1}'.format(self.name, exc.message or str(exc)))
|
||||
|
||||
def get_device_module(self):
|
||||
try:
|
||||
url = '/devices/{0}/modules/{1}'.format(self.name, self.module_id)
|
||||
return self._https_get(url, self.query_parameters, self.header_parameters)
|
||||
except Exception as exc:
|
||||
self.fail('Error when getting IoT Hub device {0}: {1}'.format(self.name, exc.message or str(exc)))
|
||||
|
||||
def list_device_modules(self):
|
||||
try:
|
||||
url = '/devices/{0}/modules'.format(self.name)
|
||||
return self._https_get(url, self.query_parameters, self.header_parameters)
|
||||
except Exception as exc:
|
||||
self.fail('Error when getting IoT Hub device {0}: {1}'.format(self.name, exc.message or str(exc)))
|
||||
|
||||
def list_devices(self):
|
||||
try:
|
||||
url = '/devices'
|
||||
return self._https_get(url, self.query_parameters, self.header_parameters)
|
||||
except Exception as exc:
|
||||
self.fail('Error when listing IoT Hub devices in {0}: {1}'.format(self.hub, exc.message or str(exc)))
|
||||
|
||||
def _https_get(self, url, query_parameters, header_parameters):
|
||||
request = self._mgmt_client.get(url, query_parameters)
|
||||
response = self._mgmt_client.send(request=request, headers=header_parameters, content=None)
|
||||
if response.status_code not in [200]:
|
||||
raise CloudError(response)
|
||||
return json.loads(response.text)
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMIoTDeviceFacts()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,378 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Yuwei Zhou, <yuwzho@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_iotdevicemodule
|
||||
version_added: "2.9"
|
||||
short_description: Manage Azure IoT hub device module
|
||||
description:
|
||||
- Create, delete an Azure IoT hub device module.
|
||||
options:
|
||||
hub:
|
||||
description:
|
||||
- Name of IoT Hub.
|
||||
type: str
|
||||
required: true
|
||||
hub_policy_name:
|
||||
description:
|
||||
- Policy name of the IoT Hub which will be used to query from IoT hub.
|
||||
- This policy should have at least 'Registry Read' access.
|
||||
type: str
|
||||
required: true
|
||||
hub_policy_key:
|
||||
description:
|
||||
- Key of the I(hub_policy_name).
|
||||
type: str
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
- Name of the IoT hub device identity.
|
||||
type: str
|
||||
required: true
|
||||
device:
|
||||
description:
|
||||
- Device name the module associate with.
|
||||
required: true
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- State of the IoT hub. Use C(present) to create or update an IoT hub device and C(absent) to delete an IoT hub device.
|
||||
type: str
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
auth_method:
|
||||
description:
|
||||
- The authorization type an entity is to be created with.
|
||||
type: str
|
||||
choices:
|
||||
- sas
|
||||
- certificate_authority
|
||||
- self_signed
|
||||
default: sas
|
||||
primary_key:
|
||||
description:
|
||||
- Explicit self-signed certificate thumbprint to use for primary key.
|
||||
- Explicit Shared Private Key to use for primary key.
|
||||
type: str
|
||||
aliases:
|
||||
- primary_thumbprint
|
||||
secondary_key:
|
||||
description:
|
||||
- Explicit self-signed certificate thumbprint to use for secondary key.
|
||||
- Explicit Shared Private Key to use for secondary key.
|
||||
type: str
|
||||
aliases:
|
||||
- secondary_thumbprint
|
||||
twin_tags:
|
||||
description:
|
||||
- A section that the solution back end can read from and write to.
|
||||
- Tags are not visible to device apps.
|
||||
- "The tag can be nested dictionary, '.', '$', '#', ' ' is not allowed in the key."
|
||||
- List is not supported.
|
||||
type: dict
|
||||
desired:
|
||||
description:
|
||||
- Used along with reported properties to synchronize device configuration or conditions.
|
||||
- "The tag can be nested dictionary, '.', '$', '#', ' ' is not allowed in the key."
|
||||
- List is not supported.
|
||||
type: dict
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Yuwei Zhou (@yuwzho)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create simplest Azure IoT Hub device module
|
||||
azure_rm_iotdevicemodule:
|
||||
hub: myHub
|
||||
name: Testing
|
||||
device: mydevice
|
||||
hub_policy_name: iothubowner
|
||||
hub_policy_key: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
|
||||
|
||||
- name: Create Azure IoT Edge device module
|
||||
azure_rm_iotdevice:
|
||||
hub: myHub
|
||||
device: mydevice
|
||||
name: Testing
|
||||
hub_policy_name: iothubowner
|
||||
hub_policy_key: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
|
||||
edge_enabled: yes
|
||||
|
||||
- name: Create Azure IoT Hub device module with module twin properties and tag
|
||||
azure_rm_iotdevice:
|
||||
hub: myHub
|
||||
name: Testing
|
||||
device: mydevice
|
||||
hub_policy_name: iothubowner
|
||||
hub_policy_key: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
|
||||
twin_tags:
|
||||
location:
|
||||
country: US
|
||||
city: Redmond
|
||||
sensor: humidity
|
||||
desired:
|
||||
period: 100
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
module:
|
||||
description:
|
||||
- IoT Hub device.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"authentication": {
|
||||
"symmetricKey": {
|
||||
"primaryKey": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
|
||||
"secondaryKey": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
|
||||
},
|
||||
"type": "sas",
|
||||
"x509Thumbprint": {
|
||||
"primaryThumbprint": null,
|
||||
"secondaryThumbprint": null
|
||||
}
|
||||
},
|
||||
"cloudToDeviceMessageCount": 0,
|
||||
"connectionState": "Disconnected",
|
||||
"connectionStateUpdatedTime": "0001-01-01T00:00:00",
|
||||
"deviceId": "mydevice",
|
||||
"etag": "ODM2NjI3ODg=",
|
||||
"generationId": "636904759703045768",
|
||||
"lastActivityTime": "0001-01-01T00:00:00",
|
||||
"managedBy": null,
|
||||
"moduleId": "Testing"
|
||||
}
|
||||
''' # NOQA
|
||||
|
||||
import json
|
||||
import copy
|
||||
import re
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id
|
||||
from ansible.module_utils.common.dict_transformations import _snake_to_camel
|
||||
|
||||
try:
|
||||
from msrestazure.tools import parse_resource_id
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMIoTDeviceModule(AzureRMModuleBase):
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.module_arg_spec = dict(
|
||||
name=dict(type='str', required=True),
|
||||
hub_policy_name=dict(type='str', required=True),
|
||||
hub_policy_key=dict(type='str', required=True),
|
||||
hub=dict(type='str', required=True),
|
||||
device=dict(type='str', required=True),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent']),
|
||||
twin_tags=dict(type='dict'),
|
||||
desired=dict(type='dict'),
|
||||
auth_method=dict(type='str', choices=['self_signed', 'sas', 'certificate_authority'], default='sas'),
|
||||
primary_key=dict(type='str', no_log=True, aliases=['primary_thumbprint']),
|
||||
secondary_key=dict(type='str', no_log=True, aliases=['secondary_thumbprint'])
|
||||
)
|
||||
|
||||
self.results = dict(
|
||||
changed=False,
|
||||
id=None
|
||||
)
|
||||
|
||||
self.name = None
|
||||
self.hub = None
|
||||
self.device = None
|
||||
self.hub_policy_key = None
|
||||
self.hub_policy_name = None
|
||||
self.state = None
|
||||
self.twin_tags = None
|
||||
self.desired = None
|
||||
self.auth_method = None
|
||||
self.primary_key = None
|
||||
self.secondary_key = None
|
||||
|
||||
required_if = [
|
||||
['auth_method', 'self_signed', ['certificate_authority']]
|
||||
]
|
||||
|
||||
self._base_url = None
|
||||
self._mgmt_client = None
|
||||
self.query_parameters = {
|
||||
'api-version': '2018-06-30'
|
||||
}
|
||||
self.header_parameters = {
|
||||
'Content-Type': 'application/json; charset=utf-8',
|
||||
'accept-language': 'en-US'
|
||||
}
|
||||
super(AzureRMIoTDeviceModule, self).__init__(self.module_arg_spec, supports_check_mode=True, required_if=required_if)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
for key in self.module_arg_spec.keys():
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
self._base_url = '{0}.azure-devices.net'.format(self.hub)
|
||||
config = {
|
||||
'base_url': self._base_url,
|
||||
'key': self.hub_policy_key,
|
||||
'policy': self.hub_policy_name
|
||||
}
|
||||
self._mgmt_client = self.get_data_svc_client(**config)
|
||||
|
||||
changed = False
|
||||
|
||||
module = self.get_module()
|
||||
if self.state == 'present':
|
||||
if not module:
|
||||
changed = True
|
||||
auth = {'type': _snake_to_camel(self.auth_method)}
|
||||
if self.auth_method == 'self_signed':
|
||||
auth['x509Thumbprint'] = {
|
||||
'primaryThumbprint': self.primary_key,
|
||||
'secondaryThumbprint': self.secondary_key
|
||||
}
|
||||
elif self.auth_method == 'sas':
|
||||
auth['symmetricKey'] = {
|
||||
'primaryKey': self.primary_key,
|
||||
'secondaryKey': self.secondary_key
|
||||
}
|
||||
module = {
|
||||
'deviceId': self.device,
|
||||
'moduleId': self.name,
|
||||
'authentication': auth
|
||||
}
|
||||
if changed and not self.check_mode:
|
||||
module = self.create_or_update_module(module)
|
||||
twin = self.get_twin()
|
||||
if not twin.get('tags'):
|
||||
twin['tags'] = dict()
|
||||
twin_change = False
|
||||
if self.twin_tags and not self.is_equal(self.twin_tags, twin['tags']):
|
||||
twin_change = True
|
||||
if self.desired and not self.is_equal(self.desired, twin['properties']['desired']):
|
||||
self.module.warn('desired')
|
||||
twin_change = True
|
||||
if twin_change and not self.check_mode:
|
||||
twin = self.update_twin(twin)
|
||||
changed = changed or twin_change
|
||||
module['tags'] = twin.get('tags') or dict()
|
||||
module['properties'] = twin['properties']
|
||||
elif module:
|
||||
if not self.check_mode:
|
||||
self.delete_module(module['etag'])
|
||||
changed = True
|
||||
module = None
|
||||
self.results = module or dict()
|
||||
self.results['changed'] = changed
|
||||
return self.results
|
||||
|
||||
def is_equal(self, updated, original):
|
||||
changed = False
|
||||
if not isinstance(updated, dict):
|
||||
self.fail('The Property or Tag should be a dict')
|
||||
for key in updated.keys():
|
||||
if re.search(r'[.|$|#|\s]', key):
|
||||
self.fail("Property or Tag name has invalid characters: '.', '$', '#' or ' '. Got '{0}'".format(key))
|
||||
original_value = original.get(key)
|
||||
updated_value = updated[key]
|
||||
if isinstance(updated_value, dict):
|
||||
if not isinstance(original_value, dict):
|
||||
changed = True
|
||||
original[key] = updated_value
|
||||
elif not self.is_equal(updated_value, original_value):
|
||||
changed = True
|
||||
elif original_value != updated_value:
|
||||
changed = True
|
||||
original[key] = updated_value
|
||||
return not changed
|
||||
|
||||
def create_or_update_module(self, module):
|
||||
try:
|
||||
url = '/devices/{0}/modules/{1}'.format(self.device, self.name)
|
||||
headers = copy.copy(self.header_parameters)
|
||||
if module.get('etag'):
|
||||
headers['If-Match'] = '"{0}"'.format(module['etag'])
|
||||
request = self._mgmt_client.put(url, self.query_parameters)
|
||||
response = self._mgmt_client.send(request=request, headers=headers, content=module)
|
||||
if response.status_code not in [200, 201]:
|
||||
raise CloudError(response)
|
||||
return json.loads(response.text)
|
||||
except Exception as exc:
|
||||
self.fail('Error when creating or updating IoT Hub device {0}: {1}'.format(self.name, exc.message or str(exc)))
|
||||
|
||||
def delete_module(self, etag):
|
||||
try:
|
||||
url = '/devices/{0}/modules/{1}'.format(self.device, self.name)
|
||||
headers = copy.copy(self.header_parameters)
|
||||
headers['If-Match'] = '"{0}"'.format(etag)
|
||||
request = self._mgmt_client.delete(url, self.query_parameters)
|
||||
response = self._mgmt_client.send(request=request, headers=headers)
|
||||
if response.status_code not in [204]:
|
||||
raise CloudError(response)
|
||||
except Exception as exc:
|
||||
self.fail('Error when deleting IoT Hub device {0}: {1}'.format(self.name, exc.message or str(exc)))
|
||||
|
||||
def get_module(self):
|
||||
try:
|
||||
url = '/devices/{0}/modules/{1}'.format(self.device, self.name)
|
||||
return self._https_get(url, self.query_parameters, self.header_parameters)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def get_twin(self):
|
||||
try:
|
||||
url = '/twins/{0}/modules/{1}'.format(self.device, self.name)
|
||||
return self._https_get(url, self.query_parameters, self.header_parameters)
|
||||
except Exception as exc:
|
||||
self.fail('Error when getting IoT Hub device {0} module twin {1}: {2}'.format(self.device, self.name, exc.message or str(exc)))
|
||||
|
||||
def update_twin(self, twin):
|
||||
try:
|
||||
url = '/twins/{0}/modules/{1}'.format(self.device, self.name)
|
||||
headers = copy.copy(self.header_parameters)
|
||||
headers['If-Match'] = twin['etag']
|
||||
request = self._mgmt_client.patch(url, self.query_parameters)
|
||||
response = self._mgmt_client.send(request=request, headers=headers, content=twin)
|
||||
if response.status_code not in [200]:
|
||||
raise CloudError(response)
|
||||
return json.loads(response.text)
|
||||
except Exception as exc:
|
||||
self.fail('Error when creating or updating IoT Hub device {0} module twin {1}: {2}'.format(self.device, self.name, exc.message or str(exc)))
|
||||
|
||||
def _https_get(self, url, query_parameters, header_parameters):
|
||||
request = self._mgmt_client.get(url, query_parameters)
|
||||
response = self._mgmt_client.send(request=request, headers=header_parameters, content=None)
|
||||
if response.status_code not in [200]:
|
||||
raise CloudError(response)
|
||||
return json.loads(response.text)
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMIoTDeviceModule()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,895 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Yuwei Zhou, <yuwzho@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_iothub
|
||||
version_added: "2.9"
|
||||
short_description: Manage Azure IoT hub
|
||||
description:
|
||||
- Create, delete an Azure IoT hub.
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- Name of resource group.
|
||||
type: str
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
- Name of the IoT hub.
|
||||
type: str
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- State of the IoT hub. Use C(present) to create or update an IoT hub and C(absent) to delete an IoT hub.
|
||||
type: str
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
location:
|
||||
description:
|
||||
- Location of the IoT hub.
|
||||
type: str
|
||||
sku:
|
||||
description:
|
||||
- Pricing tier for Azure IoT Hub.
|
||||
- Note that only one free IoT hub instance is allowed in each subscription. Exception will be thrown if free instances exceed one.
|
||||
- Default is C(s1) when creation.
|
||||
type: str
|
||||
choices:
|
||||
- b1
|
||||
- b2
|
||||
- b3
|
||||
- f1
|
||||
- s1
|
||||
- s2
|
||||
- s3
|
||||
unit:
|
||||
description:
|
||||
- Units in your IoT Hub.
|
||||
- Default is C(1).
|
||||
type: int
|
||||
event_endpoint:
|
||||
description:
|
||||
- The Event Hub-compatible endpoint property.
|
||||
type: dict
|
||||
suboptions:
|
||||
partition_count:
|
||||
description:
|
||||
- The number of partitions for receiving device-to-cloud messages in the Event Hub-compatible endpoint.
|
||||
- "See U(https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages)."
|
||||
- Default is C(2).
|
||||
type: int
|
||||
retention_time_in_days:
|
||||
description:
|
||||
- The retention time for device-to-cloud messages in days.
|
||||
- "See U(https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages)."
|
||||
- Default is C(1).
|
||||
type: int
|
||||
enable_file_upload_notifications:
|
||||
description:
|
||||
- File upload notifications are enabled if set to C(True).
|
||||
type: bool
|
||||
ip_filters:
|
||||
description:
|
||||
- Configure rules for rejecting or accepting traffic from specific IPv4 addresses.
|
||||
type: list
|
||||
suboptions:
|
||||
name:
|
||||
description:
|
||||
- Name of the filter.
|
||||
type: str
|
||||
required: yes
|
||||
ip_mask:
|
||||
description:
|
||||
- A string that contains the IP address range in CIDR notation for the rule.
|
||||
type: str
|
||||
required: yes
|
||||
action:
|
||||
description:
|
||||
- The desired action for requests captured by this rule.
|
||||
type: str
|
||||
required: yes
|
||||
choices:
|
||||
- accept
|
||||
- reject
|
||||
routing_endpoints:
|
||||
description:
|
||||
- Custom endpoints.
|
||||
type: list
|
||||
suboptions:
|
||||
name:
|
||||
description:
|
||||
- Name of the custom endpoint.
|
||||
type: str
|
||||
required: yes
|
||||
resource_group:
|
||||
description:
|
||||
- Resource group of the endpoint.
|
||||
- Default is the same as I(resource_group).
|
||||
type: str
|
||||
subscription:
|
||||
description:
|
||||
- Subscription id of the endpoint.
|
||||
- Default is the same as I(subscription).
|
||||
type: str
|
||||
resource_type:
|
||||
description:
|
||||
- Resource type of the custom endpoint.
|
||||
type: str
|
||||
choices:
|
||||
- eventhub
|
||||
- queue
|
||||
- storage
|
||||
- topic
|
||||
required: yes
|
||||
connection_string:
|
||||
description:
|
||||
- Connection string of the custom endpoint.
|
||||
- The connection string should have send privilege.
|
||||
type: str
|
||||
required: yes
|
||||
container:
|
||||
description:
|
||||
- Container name of the custom endpoint when I(resource_type=storage).
|
||||
type: str
|
||||
encoding:
|
||||
description:
|
||||
- Encoding of the message when I(resource_type=storage).
|
||||
type: str
|
||||
routes:
|
||||
description:
|
||||
- Route device-to-cloud messages to service-facing endpoints.
|
||||
type: list
|
||||
suboptions:
|
||||
name:
|
||||
description:
|
||||
- Name of the route.
|
||||
type: str
|
||||
required: yes
|
||||
source:
|
||||
description:
|
||||
- The origin of the data stream to be acted upon.
|
||||
type: str
|
||||
choices:
|
||||
- device_messages
|
||||
- twin_change_events
|
||||
- device_lifecycle_events
|
||||
- device_job_lifecycle_events
|
||||
required: yes
|
||||
enabled:
|
||||
description:
|
||||
- Whether to enable the route.
|
||||
type: bool
|
||||
required: yes
|
||||
endpoint_name:
|
||||
description:
|
||||
- The name of the endpoint in I(routing_endpoints) where IoT Hub sends messages that match the query.
|
||||
type: str
|
||||
required: yes
|
||||
condition:
|
||||
description:
|
||||
- "The query expression for the routing query that is run against the message application properties,
|
||||
system properties, message body, device twin tags, and device twin properties to determine if it is a match for the endpoint."
|
||||
- "For more information about constructing a query,
|
||||
see U(https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-devguide-routing-query-syntax)"
|
||||
type: str
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Yuwei Zhou (@yuwzho)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a simplest IoT hub
|
||||
azure_rm_iothub:
|
||||
name: Testing
|
||||
resource_group: myResourceGroup
|
||||
- name: Create an IoT hub with route
|
||||
azure_rm_iothub:
|
||||
resource_group: myResourceGroup
|
||||
name: Testing
|
||||
routing_endpoints:
|
||||
- connection_string: "Endpoint=sb://qux.servicebus.windows.net/;SharedAccessKeyName=quux;SharedAccessKey=****;EntityPath=myQueue"
|
||||
name: foo
|
||||
resource_type: queue
|
||||
resource_group: myResourceGroup1
|
||||
routes:
|
||||
- name: bar
|
||||
source: device_messages
|
||||
endpoint_name: foo
|
||||
enabled: yes
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- Resource ID of the IoT hub.
|
||||
sample: "/subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/resourceGroups/myResourceGroup/providers/Microsoft.Devices/IotHubs/Testing"
|
||||
returned: success
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- Name of the IoT hub.
|
||||
sample: Testing
|
||||
returned: success
|
||||
type: str
|
||||
resource_group:
|
||||
description:
|
||||
- Resource group of the IoT hub.
|
||||
sample: myResourceGroup.
|
||||
returned: success
|
||||
type: str
|
||||
location:
|
||||
description:
|
||||
- Location of the IoT hub.
|
||||
sample: eastus
|
||||
returned: success
|
||||
type: str
|
||||
unit:
|
||||
description:
|
||||
- Units in the IoT Hub.
|
||||
sample: 1
|
||||
returned: success
|
||||
type: int
|
||||
sku:
|
||||
description:
|
||||
- Pricing tier for Azure IoT Hub.
|
||||
sample: f1
|
||||
returned: success
|
||||
type: str
|
||||
cloud_to_device:
|
||||
description:
|
||||
- Cloud to device message properties.
|
||||
contains:
|
||||
max_delivery_count:
|
||||
description:
|
||||
- The number of times the IoT hub attempts to deliver a message on the feedback queue.
|
||||
- "See U(https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages)."
|
||||
type: int
|
||||
returned: success
|
||||
sample: 10
|
||||
ttl_as_iso8601:
|
||||
description:
|
||||
- The period of time for which a message is available to consume before it is expired by the IoT hub.
|
||||
- "See U(https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages)."
|
||||
type: str
|
||||
returned: success
|
||||
sample: "1:00:00"
|
||||
returned: success
|
||||
type: complex
|
||||
enable_file_upload_notifications:
|
||||
description:
|
||||
- Whether file upload notifications are enabled.
|
||||
sample: True
|
||||
returned: success
|
||||
type: bool
|
||||
event_endpoints:
|
||||
description:
|
||||
- Built-in endpoint where to deliver device message.
|
||||
contains:
|
||||
endpoint:
|
||||
description:
|
||||
- The Event Hub-compatible endpoint.
|
||||
type: str
|
||||
returned: success
|
||||
sample: "sb://iothub-ns-testing-1478811-9bbc4a15f0.servicebus.windows.net/"
|
||||
partition_count:
|
||||
description:
|
||||
- The number of partitions for receiving device-to-cloud messages in the Event Hub-compatible endpoint.
|
||||
- "See U(https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages)."
|
||||
type: int
|
||||
returned: success
|
||||
sample: 2
|
||||
retention_time_in_days:
|
||||
description:
|
||||
- The retention time for device-to-cloud messages in days.
|
||||
- "See U(https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages)."
|
||||
type: int
|
||||
returned: success
|
||||
sample: 1
|
||||
partition_ids:
|
||||
description:
|
||||
- List of the partition id for the event endpoint.
|
||||
type: list
|
||||
returned: success
|
||||
sample: ["0", "1"]
|
||||
returned: success
|
||||
type: complex
|
||||
host_name:
|
||||
description:
|
||||
- Host of the IoT hub.
|
||||
sample: "testing.azure-devices.net"
|
||||
returned: success
|
||||
type: str
|
||||
ip_filters:
|
||||
description:
|
||||
- Configure rules for rejecting or accepting traffic from specific IPv4 addresses.
|
||||
contains:
|
||||
name:
|
||||
description:
|
||||
- Name of the filter.
|
||||
type: str
|
||||
returned: success
|
||||
sample: filter
|
||||
ip_mask:
|
||||
description:
|
||||
- A string that contains the IP address range in CIDR notation for the rule.
|
||||
type: str
|
||||
returned: success
|
||||
sample: 40.54.7.3
|
||||
action:
|
||||
description:
|
||||
- The desired action for requests captured by this rule.
|
||||
type: str
|
||||
returned: success
|
||||
sample: Reject
|
||||
returned: success
|
||||
type: complex
|
||||
routing_endpoints:
|
||||
description:
|
||||
- Custom endpoints.
|
||||
contains:
|
||||
event_hubs:
|
||||
description:
|
||||
- List of custom endpoints of event hubs.
|
||||
type: complex
|
||||
returned: success
|
||||
contains:
|
||||
name:
|
||||
description:
|
||||
- Name of the custom endpoint.
|
||||
type: str
|
||||
returned: success
|
||||
sample: foo
|
||||
resource_group:
|
||||
description:
|
||||
- Resource group of the endpoint.
|
||||
type: str
|
||||
returned: success
|
||||
sample: bar
|
||||
subscription:
|
||||
description:
|
||||
- Subscription id of the endpoint.
|
||||
type: str
|
||||
returned: success
|
||||
sample: "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
|
||||
connection_string:
|
||||
description:
|
||||
- Connection string of the custom endpoint.
|
||||
type: str
|
||||
returned: success
|
||||
sample: "Endpoint=sb://quux.servicebus.windows.net:5671/;SharedAccessKeyName=qux;SharedAccessKey=****;EntityPath=foo"
|
||||
service_bus_queues:
|
||||
description:
|
||||
- List of custom endpoints of service bus queue.
|
||||
type: complex
|
||||
returned: always
|
||||
contains:
|
||||
name:
|
||||
description:
|
||||
- Name of the custom endpoint.
|
||||
type: str
|
||||
returned: success
|
||||
sample: foo
|
||||
resource_group:
|
||||
description:
|
||||
- Resource group of the endpoint.
|
||||
type: str
|
||||
returned: success
|
||||
sample: bar
|
||||
subscription:
|
||||
description:
|
||||
- Subscription ID of the endpoint.
|
||||
type: str
|
||||
returned: success
|
||||
sample: "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
|
||||
connection_string:
|
||||
description:
|
||||
- Connection string of the custom endpoint.
|
||||
type: str
|
||||
returned: success
|
||||
sample: "Endpoint=sb://quux.servicebus.windows.net:5671/;SharedAccessKeyName=qux;SharedAccessKey=****;EntityPath=foo"
|
||||
service_bus_topics:
|
||||
description:
|
||||
- List of custom endpoints of service bus topic.
|
||||
type: complex
|
||||
returned: success
|
||||
contains:
|
||||
name:
|
||||
description:
|
||||
- Name of the custom endpoint.
|
||||
type: str
|
||||
returned: success
|
||||
sample: foo
|
||||
resource_group:
|
||||
description:
|
||||
- Resource group of the endpoint.
|
||||
type: str
|
||||
returned: success
|
||||
sample: bar
|
||||
subscription:
|
||||
description:
|
||||
- Subscription ID of the endpoint.
|
||||
type: str
|
||||
returned: success
|
||||
sample: "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
|
||||
connection_string:
|
||||
description:
|
||||
- Connection string of the custom endpoint.
|
||||
type: str
|
||||
returned: success
|
||||
sample: "Endpoint=sb://quux.servicebus.windows.net:5671/;SharedAccessKeyName=qux;SharedAccessKey=****;EntityPath=foo"
|
||||
storage_containers:
|
||||
description:
|
||||
- List of custom endpoints of storage
|
||||
type: complex
|
||||
returned: success
|
||||
contains:
|
||||
name:
|
||||
description:
|
||||
- Name of the custom endpoint.
|
||||
type: str
|
||||
returned: success
|
||||
sample: foo
|
||||
resource_group:
|
||||
description:
|
||||
- Resource group of the endpoint.
|
||||
type: str
|
||||
returned: success
|
||||
sample: bar
|
||||
subscription:
|
||||
description:
|
||||
- Subscription ID of the endpoint.
|
||||
type: str
|
||||
returned: success
|
||||
sample: "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
|
||||
connection_string:
|
||||
description:
|
||||
- Connection string of the custom endpoint.
|
||||
type: str
|
||||
returned: success
|
||||
sample: "Endpoint=sb://quux.servicebus.windows.net:5671/;SharedAccessKeyName=qux;SharedAccessKey=****;EntityPath=foo"
|
||||
returned: success
|
||||
type: complex
|
||||
routes:
|
||||
description:
|
||||
- Route device-to-cloud messages to service-facing endpoints.
|
||||
type: complex
|
||||
returned: success
|
||||
contains:
|
||||
name:
|
||||
description:
|
||||
- Name of the route.
|
||||
type: str
|
||||
returned: success
|
||||
sample: route1
|
||||
source:
|
||||
description:
|
||||
- The origin of the data stream to be acted upon.
|
||||
type: str
|
||||
returned: success
|
||||
sample: device_messages
|
||||
enabled:
|
||||
description:
|
||||
- Whether to enable the route.
|
||||
type: str
|
||||
returned: success
|
||||
sample: true
|
||||
endpoint_name:
|
||||
description:
|
||||
- The name of the endpoint in C(routing_endpoints) where IoT Hub sends messages that match the query.
|
||||
type: str
|
||||
returned: success
|
||||
sample: foo
|
||||
condition:
|
||||
description:
|
||||
- "The query expression for the routing query that is run against the message application properties,
|
||||
system properties, message body, device twin tags, and device twin properties to determine if it is a match for the endpoint."
|
||||
- "For more information about constructing a query,
|
||||
see I(https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-devguide-routing-query-syntax)"
|
||||
type: bool
|
||||
returned: success
|
||||
sample: "true"
|
||||
''' # NOQA
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id
|
||||
from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake
|
||||
import re
|
||||
|
||||
try:
|
||||
from msrestazure.tools import parse_resource_id
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
ip_filter_spec = dict(
|
||||
name=dict(type='str', required=True),
|
||||
ip_mask=dict(type='str', required=True),
|
||||
action=dict(type='str', required=True, choices=['accept', 'reject'])
|
||||
)
|
||||
|
||||
|
||||
routing_endpoints_spec = dict(
|
||||
connection_string=dict(type='str', required=True),
|
||||
name=dict(type='str', required=True),
|
||||
resource_group=dict(type='str'),
|
||||
subscription=dict(type='str'),
|
||||
resource_type=dict(type='str', required=True, choices=['eventhub', 'queue', 'storage', 'topic']),
|
||||
container=dict(type='str'),
|
||||
encoding=dict(type='str')
|
||||
)
|
||||
|
||||
|
||||
routing_endpoints_resource_type_mapping = {
|
||||
'eventhub': {'model': 'RoutingEventHubProperties', 'attribute': 'event_hubs'},
|
||||
'queue': {'model': 'RoutingServiceBusQueueEndpointProperties', 'attribute': 'service_bus_queues'},
|
||||
'topic': {'model': 'RoutingServiceBusTopicEndpointProperties', 'attribute': 'service_bus_topics'},
|
||||
'storage': {'model': 'RoutingStorageContainerProperties', 'attribute': 'storage_containers'}
|
||||
}
|
||||
|
||||
|
||||
routes_spec = dict(
|
||||
name=dict(type='str', required=True),
|
||||
source=dict(type='str', required=True, choices=['device_messages', 'twin_change_events', 'device_lifecycle_events', 'device_job_lifecycle_events']),
|
||||
enabled=dict(type='bool', required=True),
|
||||
endpoint_name=dict(type='str', required=True),
|
||||
condition=dict(type='str')
|
||||
)
|
||||
|
||||
|
||||
event_endpoint_spec = dict(
|
||||
partition_count=dict(type='int'),
|
||||
retention_time_in_days=dict(type='int')
|
||||
)
|
||||
|
||||
|
||||
class AzureRMIoTHub(AzureRMModuleBase):
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(type='str', required=True),
|
||||
name=dict(type='str', required=True),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent']),
|
||||
location=dict(type='str'),
|
||||
sku=dict(type='str', choices=['b1', 'b2', 'b3', 'f1', 's1', 's2', 's3']),
|
||||
unit=dict(type='int'),
|
||||
event_endpoint=dict(type='dict', options=event_endpoint_spec),
|
||||
enable_file_upload_notifications=dict(type='bool'),
|
||||
ip_filters=dict(type='list', elements='dict', options=ip_filter_spec),
|
||||
routing_endpoints=dict(type='list', elements='dict', options=routing_endpoints_spec),
|
||||
routes=dict(type='list', elements='dict', options=routes_spec)
|
||||
)
|
||||
|
||||
self.results = dict(
|
||||
changed=False,
|
||||
id=None
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.state = None
|
||||
self.location = None
|
||||
self.sku = None
|
||||
self.unit = None
|
||||
self.event_endpoint = None
|
||||
self.tags = None
|
||||
self.enable_file_upload_notifications = None
|
||||
self.ip_filters = None
|
||||
self.routing_endpoints = None
|
||||
self.routes = None
|
||||
|
||||
super(AzureRMIoTHub, self).__init__(self.module_arg_spec, supports_check_mode=True)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
changed = False
|
||||
|
||||
if not self.location:
|
||||
# Set default location
|
||||
resource_group = self.get_resource_group(self.resource_group)
|
||||
self.location = resource_group.location
|
||||
self.sku = str.capitalize(self.sku) if self.sku else None
|
||||
iothub = self.get_hub()
|
||||
if self.state == 'present':
|
||||
if not iothub:
|
||||
changed = True
|
||||
self.sku = self.sku or 'S1'
|
||||
self.unit = self.unit or 1
|
||||
self.event_endpoint = self.event_endpoint or {}
|
||||
self.event_endpoint['partition_count'] = self.event_endpoint.get('partition_count') or 2
|
||||
self.event_endpoint['retention_time_in_days'] = self.event_endpoint.get('retention_time_in_days') or 1
|
||||
event_hub_properties = dict()
|
||||
event_hub_properties['events'] = self.IoThub_models.EventHubProperties(**self.event_endpoint)
|
||||
iothub_property = self.IoThub_models.IotHubProperties(event_hub_endpoints=event_hub_properties)
|
||||
if self.enable_file_upload_notifications:
|
||||
iothub_property.enable_file_upload_notifications = self.enable_file_upload_notifications
|
||||
if self.ip_filters:
|
||||
iothub_property.ip_filter_rules = self.construct_ip_filters()
|
||||
routing_endpoints = None
|
||||
routes = None
|
||||
if self.routing_endpoints:
|
||||
routing_endpoints = self.construct_routing_endpoint(self.routing_endpoints)
|
||||
if self.routes:
|
||||
routes = [self.construct_route(x) for x in self.routes]
|
||||
if routes or routing_endpoints:
|
||||
routing_property = self.IoThub_models.RoutingProperties(endpoints=routing_endpoints,
|
||||
routes=routes)
|
||||
iothub_property.routing = routing_property
|
||||
iothub = self.IoThub_models.IotHubDescription(location=self.location,
|
||||
sku=self.IoThub_models.IotHubSkuInfo(name=self.sku, capacity=self.unit),
|
||||
properties=iothub_property,
|
||||
tags=self.tags)
|
||||
if not self.check_mode:
|
||||
iothub = self.create_or_update_hub(iothub)
|
||||
else:
|
||||
# compare sku
|
||||
original_sku = iothub.sku
|
||||
if self.sku and self.sku != original_sku.name:
|
||||
self.log('SKU changed')
|
||||
iothub.sku.name = self.sku
|
||||
changed = True
|
||||
if self.unit and self.unit != original_sku.capacity:
|
||||
self.log('Unit count changed')
|
||||
iothub.sku.capacity = self.unit
|
||||
changed = True
|
||||
# compare event hub property
|
||||
event_hub = iothub.properties.event_hub_endpoints or dict()
|
||||
if self.event_endpoint:
|
||||
item = self.event_endpoint
|
||||
original_item = event_hub.get('events')
|
||||
if not original_item:
|
||||
changed = True
|
||||
event_hub['events'] = self.IoThub_models.EventHubProperties(partition_count=item.get('partition_count') or 2,
|
||||
retention_time_in_days=item.get('retention_time_in_days') or 1)
|
||||
elif item.get('partition_count') and original_item.partition_count != item['partition_count']:
|
||||
changed = True
|
||||
original_item.partition_count = item['partition_count']
|
||||
elif item.get('retention_time_in_days') and original_item.retention_time_in_days != item['retention_time_in_days']:
|
||||
changed = True
|
||||
original_item.retention_time_in_days = item['retention_time_in_days']
|
||||
# compare endpoint
|
||||
original_endpoints = iothub.properties.routing.endpoints
|
||||
endpoint_changed = False
|
||||
if self.routing_endpoints:
|
||||
# find the total length
|
||||
total_length = 0
|
||||
for item in routing_endpoints_resource_type_mapping.values():
|
||||
attribute = item['attribute']
|
||||
array = getattr(original_endpoints, attribute)
|
||||
total_length += len(array or [])
|
||||
if total_length != len(self.routing_endpoints):
|
||||
endpoint_changed = True
|
||||
else: # If already changed, no need to compare any more
|
||||
for item in self.routing_endpoints:
|
||||
if not self.lookup_endpoint(item, original_endpoints):
|
||||
endpoint_changed = True
|
||||
break
|
||||
if endpoint_changed:
|
||||
iothub.properties.routing.endpoints = self.construct_routing_endpoint(self.routing_endpoints)
|
||||
changed = True
|
||||
# compare routes
|
||||
original_routes = iothub.properties.routing.routes
|
||||
routes_changed = False
|
||||
if self.routes:
|
||||
if len(self.routes) != len(original_routes or []):
|
||||
routes_changed = True
|
||||
else:
|
||||
for item in self.routes:
|
||||
if not self.lookup_route(item, original_routes):
|
||||
routes_changed = True
|
||||
break
|
||||
if routes_changed:
|
||||
changed = True
|
||||
iothub.properties.routing.routes = [self.construct_route(x) for x in self.routes]
|
||||
# compare IP filter
|
||||
ip_filter_changed = False
|
||||
original_ip_filter = iothub.properties.ip_filter_rules
|
||||
if self.ip_filters:
|
||||
if len(self.ip_filters) != len(original_ip_filter or []):
|
||||
ip_filter_changed = True
|
||||
else:
|
||||
for item in self.ip_filters:
|
||||
if not self.lookup_ip_filter(item, original_ip_filter):
|
||||
ip_filter_changed = True
|
||||
break
|
||||
if ip_filter_changed:
|
||||
changed = True
|
||||
iothub.properties.ip_filter_rules = self.construct_ip_filters()
|
||||
|
||||
# compare tags
|
||||
tag_changed, updated_tags = self.update_tags(iothub.tags)
|
||||
iothub.tags = updated_tags
|
||||
if changed and not self.check_mode:
|
||||
iothub = self.create_or_update_hub(iothub)
|
||||
# only tags changed
|
||||
if not changed and tag_changed:
|
||||
changed = True
|
||||
if not self.check_mode:
|
||||
iothub = self.update_instance_tags(updated_tags)
|
||||
self.results = self.to_dict(iothub)
|
||||
elif iothub:
|
||||
changed = True
|
||||
if not self.check_mode:
|
||||
self.delete_hub()
|
||||
self.results['changed'] = changed
|
||||
return self.results
|
||||
|
||||
def lookup_ip_filter(self, target, ip_filters):
|
||||
if not ip_filters or len(ip_filters) == 0:
|
||||
return False
|
||||
for item in ip_filters:
|
||||
if item.filter_name == target['name']:
|
||||
if item.ip_mask != target['ip_mask']:
|
||||
return False
|
||||
if item.action.lower() != target['action']:
|
||||
return False
|
||||
return True
|
||||
return False
|
||||
|
||||
def lookup_route(self, target, routes):
|
||||
if not routes or len(routes) == 0:
|
||||
return False
|
||||
for item in routes:
|
||||
if item.name == target['name']:
|
||||
if target['source'] != _camel_to_snake(item.source):
|
||||
return False
|
||||
if target['enabled'] != item.is_enabled:
|
||||
return False
|
||||
if target['endpoint_name'] != item.endpoint_names[0]:
|
||||
return False
|
||||
if target.get('condition') and target['condition'] != item.condition:
|
||||
return False
|
||||
return True
|
||||
return False
|
||||
|
||||
def lookup_endpoint(self, target, routing_endpoints):
|
||||
resource_type = target['resource_type']
|
||||
attribute = routing_endpoints_resource_type_mapping[resource_type]['attribute']
|
||||
endpoints = getattr(routing_endpoints, attribute)
|
||||
if not endpoints or len(endpoints) == 0:
|
||||
return False
|
||||
for item in endpoints:
|
||||
if item.name == target['name']:
|
||||
if target.get('resource_group') and target['resource_group'] != (item.resource_group or self.resource_group):
|
||||
return False
|
||||
if target.get('subscription_id') and target['subscription_id'] != (item.subscription_id or self.subscription_id):
|
||||
return False
|
||||
connection_string_regex = item.connection_string.replace('****', '.*')
|
||||
connection_string_regex = re.sub(r':\d+/;', '/;', connection_string_regex)
|
||||
if not re.search(connection_string_regex, target['connection_string']):
|
||||
return False
|
||||
if resource_type == 'storage':
|
||||
if target.get('container') and item.container_name != target['container']:
|
||||
return False
|
||||
if target.get('encoding') and item.encoding != target['encoding']:
|
||||
return False
|
||||
return True
|
||||
return False
|
||||
|
||||
def construct_ip_filters(self):
|
||||
return [self.IoThub_models.IpFilterRule(filter_name=x['name'],
|
||||
action=self.IoThub_models.IpFilterActionType[x['action']],
|
||||
ip_mask=x['ip_mask']) for x in self.ip_filters]
|
||||
|
||||
def construct_routing_endpoint(self, routing_endpoints):
|
||||
if not routing_endpoints or len(routing_endpoints) == 0:
|
||||
return None
|
||||
result = self.IoThub_models.RoutingEndpoints()
|
||||
for endpoint in routing_endpoints:
|
||||
resource_type_property = routing_endpoints_resource_type_mapping.get(endpoint['resource_type'])
|
||||
resource_type = getattr(self.IoThub_models, resource_type_property['model'])
|
||||
array = getattr(result, resource_type_property['attribute']) or []
|
||||
array.append(resource_type(**endpoint))
|
||||
setattr(result, resource_type_property['attribute'], array)
|
||||
return result
|
||||
|
||||
def construct_route(self, route):
|
||||
if not route:
|
||||
return None
|
||||
return self.IoThub_models.RouteProperties(name=route['name'],
|
||||
source=_snake_to_camel(snake=route['source'], capitalize_first=True),
|
||||
is_enabled=route['enabled'],
|
||||
endpoint_names=[route['endpoint_name']],
|
||||
condition=route.get('condition'))
|
||||
|
||||
def get_hub(self):
|
||||
try:
|
||||
return self.IoThub_client.iot_hub_resource.get(self.resource_group, self.name)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def create_or_update_hub(self, hub):
|
||||
try:
|
||||
poller = self.IoThub_client.iot_hub_resource.create_or_update(self.resource_group, self.name, hub, if_match=hub.etag)
|
||||
return self.get_poller_result(poller)
|
||||
except Exception as exc:
|
||||
self.fail('Error creating or updating IoT Hub {0}: {1}'.format(self.name, exc.message or str(exc)))
|
||||
|
||||
def update_instance_tags(self, tags):
|
||||
try:
|
||||
poller = self.IoThub_client.iot_hub_resource.update(self.resource_group, self.name, tags=tags)
|
||||
return self.get_poller_result(poller)
|
||||
except Exception as exc:
|
||||
self.fail('Error updating IoT Hub {0}\'s tag: {1}'.format(self.name, exc.message or str(exc)))
|
||||
|
||||
def delete_hub(self):
|
||||
try:
|
||||
self.IoThub_client.iot_hub_resource.delete(self.resource_group, self.name)
|
||||
return True
|
||||
except Exception as exc:
|
||||
self.fail('Error deleting IoT Hub {0}: {1}'.format(self.name, exc.message or str(exc)))
|
||||
return False
|
||||
|
||||
def route_to_dict(self, route):
|
||||
return dict(
|
||||
name=route.name,
|
||||
source=_camel_to_snake(route.source),
|
||||
endpoint_name=route.endpoint_names[0],
|
||||
enabled=route.is_enabled,
|
||||
condition=route.condition
|
||||
)
|
||||
|
||||
def instance_dict_to_dict(self, instance_dict):
|
||||
result = dict()
|
||||
if not instance_dict:
|
||||
return result
|
||||
for key in instance_dict.keys():
|
||||
result[key] = instance_dict[key].as_dict()
|
||||
return result
|
||||
|
||||
def to_dict(self, hub):
|
||||
result = dict()
|
||||
properties = hub.properties
|
||||
result['id'] = hub.id
|
||||
result['name'] = hub.name
|
||||
result['resource_group'] = self.resource_group
|
||||
result['location'] = hub.location
|
||||
result['tags'] = hub.tags
|
||||
result['unit'] = hub.sku.capacity
|
||||
result['sku'] = hub.sku.name.lower()
|
||||
result['cloud_to_device'] = dict(
|
||||
max_delivery_count=properties.cloud_to_device.feedback.max_delivery_count,
|
||||
ttl_as_iso8601=str(properties.cloud_to_device.feedback.ttl_as_iso8601)
|
||||
) if properties.cloud_to_device else dict()
|
||||
result['enable_file_upload_notifications'] = properties.enable_file_upload_notifications
|
||||
result['event_endpoint'] = properties.event_hub_endpoints.get('events').as_dict() if properties.event_hub_endpoints.get('events') else None
|
||||
result['host_name'] = properties.host_name
|
||||
result['ip_filters'] = [x.as_dict() for x in properties.ip_filter_rules]
|
||||
if properties.routing:
|
||||
result['routing_endpoints'] = properties.routing.endpoints.as_dict()
|
||||
result['routes'] = [self.route_to_dict(x) for x in properties.routing.routes]
|
||||
result['fallback_route'] = self.route_to_dict(properties.routing.fallback_route)
|
||||
result['status'] = properties.state
|
||||
result['storage_endpoints'] = self.instance_dict_to_dict(properties.storage_endpoints)
|
||||
return result
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMIoTHub()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,618 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Yuwei Zhou, <yuwzho@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_iothub_info
|
||||
|
||||
version_added: "2.9"
|
||||
|
||||
short_description: Get IoT Hub facts
|
||||
|
||||
description:
|
||||
- Get facts for a specific IoT Hub or all IoT Hubs.
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Limit results to a specific resource group.
|
||||
type: str
|
||||
resource_group:
|
||||
description:
|
||||
- The resource group to search for the desired IoT Hub.
|
||||
type: str
|
||||
tags:
|
||||
description:
|
||||
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
|
||||
type: list
|
||||
show_stats:
|
||||
description:
|
||||
- Show the statistics for IoT Hub.
|
||||
- Note this will have network overhead for each IoT Hub.
|
||||
type: bool
|
||||
show_quota_metrics:
|
||||
description:
|
||||
- Get the quota metrics for an IoT hub.
|
||||
- Note this will have network overhead for each IoT Hub.
|
||||
type: bool
|
||||
show_endpoint_health:
|
||||
description:
|
||||
- Get the health for routing endpoints.
|
||||
- Note this will have network overhead for each IoT Hub.
|
||||
type: bool
|
||||
test_route_message:
|
||||
description:
|
||||
- Test routes message. It will be used to test all routes.
|
||||
type: str
|
||||
list_consumer_groups:
|
||||
description:
|
||||
- List the consumer group of the built-in event hub.
|
||||
type: bool
|
||||
list_keys:
|
||||
description:
|
||||
- List the keys of IoT Hub.
|
||||
- Note this will have network overhead for each IoT Hub.
|
||||
type: bool
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Yuwei Zhou (@yuwzho)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get facts for one IoT Hub
|
||||
azure_rm_iothub_info:
|
||||
name: Testing
|
||||
resource_group: myResourceGroup
|
||||
|
||||
- name: Get facts for all IoT Hubs
|
||||
azure_rm_iothub_info:
|
||||
|
||||
- name: Get facts for all IoT Hubs in a specific resource group
|
||||
azure_rm_iothub_info:
|
||||
resource_group: myResourceGroup
|
||||
|
||||
- name: Get facts by tags
|
||||
azure_rm_iothub_info:
|
||||
tags:
|
||||
- testing
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
azure_iothubs:
|
||||
description:
|
||||
- List of IoT Hub dicts.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- Resource ID of the IoT hub.
|
||||
type: str
|
||||
returned: always
|
||||
sample: "/subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/resourceGroups/myResourceGroup/providers/Microsoft.Devices/IotHubs/Testing"
|
||||
name:
|
||||
description:
|
||||
- Name of the IoT hub.
|
||||
type: str
|
||||
returned: always
|
||||
sample: Testing
|
||||
resource_group:
|
||||
description:
|
||||
- Resource group of the IoT hub.
|
||||
type: str
|
||||
returned: always
|
||||
sample: myResourceGroup.
|
||||
location:
|
||||
description:
|
||||
- Location of the IoT hub.
|
||||
type: str
|
||||
returned: always
|
||||
sample: eastus
|
||||
unit:
|
||||
description:
|
||||
- Units in the IoT Hub.
|
||||
type: int
|
||||
returned: always
|
||||
sample: 1
|
||||
sku:
|
||||
description:
|
||||
- Pricing tier for Azure IoT Hub.
|
||||
type: str
|
||||
returned: always
|
||||
sample: f1
|
||||
cloud_to_device:
|
||||
description:
|
||||
- Cloud to device message properties.
|
||||
type: complex
|
||||
returned: always
|
||||
contains:
|
||||
max_delivery_count:
|
||||
description:
|
||||
- The number of times the IoT hub attempts to deliver a message on the feedback queue.
|
||||
- "See U(https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages)."
|
||||
type: int
|
||||
returned: always
|
||||
sample: 10
|
||||
ttl_as_iso8601:
|
||||
description:
|
||||
- The period of time for which a message is available to consume before it is expired by the IoT hub.
|
||||
- "See U(https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages)."
|
||||
type: str
|
||||
returned: always
|
||||
sample: "1:00:00"
|
||||
enable_file_upload_notifications:
|
||||
description:
|
||||
- Whether file upload notifications are enabled.
|
||||
type: str
|
||||
returned: always
|
||||
sample: True
|
||||
event_endpoints:
|
||||
description:
|
||||
- Built-in endpoint where to deliver device message.
|
||||
type: complex
|
||||
returned: always
|
||||
contains:
|
||||
endpoint:
|
||||
description:
|
||||
- The Event Hub-compatible endpoint.
|
||||
type: str
|
||||
returned: always
|
||||
sample: "sb://iothub-ns-testing-1478811-9bbc4a15f0.servicebus.windows.net/"
|
||||
partition_count:
|
||||
description:
|
||||
- The number of partitions for receiving device-to-cloud messages in the Event Hub-compatible endpoint.
|
||||
- "See U(https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages)."
|
||||
type: int
|
||||
returned: always
|
||||
sample: 2
|
||||
retention_time_in_days:
|
||||
description:
|
||||
- The retention time for device-to-cloud messages in days.
|
||||
- "See U(https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages)."
|
||||
type: int
|
||||
returned: always
|
||||
sample: 1
|
||||
partition_ids:
|
||||
description:
|
||||
- List of the partition id for the event endpoint.
|
||||
type: list
|
||||
returned: always
|
||||
sample: ["0", "1"]
|
||||
host_name:
|
||||
description:
|
||||
- Host of the IoT hub.
|
||||
type: str
|
||||
returned: always
|
||||
sample: "testing.azure-devices.net"
|
||||
ip_filters:
|
||||
description:
|
||||
- Configure rules for rejecting or accepting traffic from specific IPv4 addresses.
|
||||
type: complex
|
||||
returned: always
|
||||
contains:
|
||||
name:
|
||||
description:
|
||||
- Name of the filter.
|
||||
type: str
|
||||
returned: always
|
||||
sample: filter
|
||||
ip_mask:
|
||||
description:
|
||||
- A string that contains the IP address range in CIDR notation for the rule.
|
||||
type: str
|
||||
returned: always
|
||||
sample: 40.54.7.3
|
||||
action:
|
||||
description:
|
||||
- The desired action for requests captured by this rule.
|
||||
type: str
|
||||
returned: always
|
||||
sample: Reject
|
||||
routing_endpoints:
|
||||
description:
|
||||
- Custom endpoints.
|
||||
type: complex
|
||||
returned: always
|
||||
contains:
|
||||
event_hubs:
|
||||
description:
|
||||
- List of custom endpoints of event hubs.
|
||||
type: complex
|
||||
returned: always
|
||||
contains:
|
||||
name:
|
||||
description:
|
||||
- Name of the custom endpoint.
|
||||
type: str
|
||||
returned: always
|
||||
sample: foo
|
||||
resource_group:
|
||||
description:
|
||||
- Resource group of the endpoint.
|
||||
type: str
|
||||
returned: always
|
||||
sample: bar
|
||||
subscription:
|
||||
description:
|
||||
- Subscription ID of the endpoint.
|
||||
type: str
|
||||
returned: always
|
||||
sample: "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
|
||||
connection_string:
|
||||
description:
|
||||
- Connection string of the custom endpoint.
|
||||
type: str
|
||||
returned: always
|
||||
sample: "Endpoint=sb://quux.servicebus.windows.net:5671/;SharedAccessKeyName=qux;SharedAccessKey=****;EntityPath=foo"
|
||||
service_bus_queues:
|
||||
description:
|
||||
- List of custom endpoints of service bus queue.
|
||||
type: complex
|
||||
returned: always
|
||||
contains:
|
||||
name:
|
||||
description:
|
||||
- Name of the custom endpoint.
|
||||
type: str
|
||||
returned: always
|
||||
sample: foo
|
||||
resource_group:
|
||||
description:
|
||||
- Resource group of the endpoint.
|
||||
type: str
|
||||
returned: always
|
||||
sample: bar
|
||||
subscription:
|
||||
description:
|
||||
- Subscription ID of the endpoint.
|
||||
type: str
|
||||
returned: always
|
||||
sample: "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
|
||||
connection_string:
|
||||
description:
|
||||
- Connection string of the custom endpoint.
|
||||
type: str
|
||||
returned: always
|
||||
sample: "Endpoint=sb://quux.servicebus.windows.net:5671/;SharedAccessKeyName=qux;SharedAccessKey=****;EntityPath=foo"
|
||||
service_bus_topics:
|
||||
description:
|
||||
- List of custom endpoints of service bus topic.
|
||||
type: complex
|
||||
returned: always
|
||||
contains:
|
||||
name:
|
||||
description:
|
||||
- Name of the custom endpoint.
|
||||
type: str
|
||||
returned: always
|
||||
sample: foo
|
||||
resource_group:
|
||||
description:
|
||||
- Resource group of the endpoint.
|
||||
type: str
|
||||
returned: always
|
||||
sample: bar
|
||||
subscription:
|
||||
description:
|
||||
- Subscription ID of the endpoint.
|
||||
type: str
|
||||
returned: always
|
||||
sample: "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
|
||||
connection_string:
|
||||
description:
|
||||
- Connection string of the custom endpoint.
|
||||
type: str
|
||||
returned: always
|
||||
sample: "Endpoint=sb://quux.servicebus.windows.net:5671/;SharedAccessKeyName=qux;SharedAccessKey=****;EntityPath=foo"
|
||||
storage_containers:
|
||||
description:
|
||||
- List of custom endpoints of storage.
|
||||
type: complex
|
||||
returned: always
|
||||
contains:
|
||||
name:
|
||||
description:
|
||||
- Name of the custom endpoint.
|
||||
type: str
|
||||
returned: always
|
||||
sample: foo
|
||||
resource_group:
|
||||
description:
|
||||
- Resource group of the endpoint.
|
||||
type: str
|
||||
returned: always
|
||||
sample: bar
|
||||
subscription:
|
||||
description:
|
||||
- Subscription ID of the endpoint.
|
||||
type: str
|
||||
returned: always
|
||||
sample: "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
|
||||
connection_string:
|
||||
description:
|
||||
- Connection string of the custom endpoint.
|
||||
type: str
|
||||
returned: always
|
||||
sample: "Endpoint=sb://quux.servicebus.windows.net:5671/;SharedAccessKeyName=qux;SharedAccessKey=****;EntityPath=foo"
|
||||
routes:
|
||||
description:
|
||||
- Route device-to-cloud messages to service-facing endpoints.
|
||||
type: complex
|
||||
returned: always
|
||||
contains:
|
||||
name:
|
||||
description:
|
||||
- Name of the route.
|
||||
type: str
|
||||
returned: always
|
||||
sample: route1
|
||||
source:
|
||||
description:
|
||||
- The origin of the data stream to be acted upon.
|
||||
type: str
|
||||
returned: always
|
||||
sample: device_messages
|
||||
enabled:
|
||||
description:
|
||||
- Whether to enable the route.
|
||||
type: bool
|
||||
returned: always
|
||||
sample: true
|
||||
endpoint_name:
|
||||
description:
|
||||
- The name of the endpoint in I(routing_endpoints) where IoT Hub sends messages that match the query.
|
||||
type: str
|
||||
returned: always
|
||||
sample: foo
|
||||
condition:
|
||||
description:
|
||||
- "The query expression for the routing query that is run against the message application properties,
|
||||
system properties, message body, device twin tags, and device twin properties to determine if it is a match for the endpoint."
|
||||
- "For more information about constructing a query,
|
||||
see U(https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-devguide-routing-query-syntax)"
|
||||
type: bool
|
||||
returned: always
|
||||
sample: "true"
|
||||
tags:
|
||||
description:
|
||||
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
|
||||
type: dict
|
||||
returned: always
|
||||
sample: { 'key1': 'value1' }
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
from ansible.module_utils.common.dict_transformations import _camel_to_snake
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrestazure.tools import parse_resource_id
|
||||
from azure.common import AzureHttpError
|
||||
except Exception:
|
||||
# handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMIoTHubFacts(AzureRMModuleBase):
|
||||
"""Utility class to get IoT Hub facts"""
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.module_args = dict(
|
||||
name=dict(type='str'),
|
||||
resource_group=dict(type='str'),
|
||||
tags=dict(type='list'),
|
||||
show_stats=dict(type='bool'),
|
||||
show_quota_metrics=dict(type='bool'),
|
||||
show_endpoint_health=dict(type='bool'),
|
||||
list_keys=dict(type='bool'),
|
||||
test_route_message=dict(type='str'),
|
||||
list_consumer_groups=dict(type='bool')
|
||||
)
|
||||
|
||||
self.results = dict(
|
||||
changed=False,
|
||||
azure_iothubs=[]
|
||||
)
|
||||
|
||||
self.name = None
|
||||
self.resource_group = None
|
||||
self.tags = None
|
||||
self.show_stats = None
|
||||
self.show_quota_metrics = None
|
||||
self.show_endpoint_health = None
|
||||
self.list_keys = None
|
||||
self.test_route_message = None
|
||||
self.list_consumer_groups = None
|
||||
|
||||
super(AzureRMIoTHubFacts, self).__init__(
|
||||
derived_arg_spec=self.module_args,
|
||||
supports_tags=False,
|
||||
facts_module=True
|
||||
)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
for key in self.module_args:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
response = []
|
||||
if self.name:
|
||||
response = self.get_item()
|
||||
elif self.resource_group:
|
||||
response = self.list_by_resource_group()
|
||||
else:
|
||||
response = self.list_all()
|
||||
self.results['iothubs'] = [self.to_dict(x) for x in response if self.has_tags(x.tags, self.tags)]
|
||||
return self.results
|
||||
|
||||
def get_item(self):
|
||||
"""Get a single IoT Hub"""
|
||||
|
||||
self.log('Get properties for {0}'.format(self.name))
|
||||
|
||||
item = None
|
||||
|
||||
try:
|
||||
item = self.IoThub_client.iot_hub_resource.get(self.resource_group, self.name)
|
||||
return [item]
|
||||
except Exception as exc:
|
||||
self.fail('Error when getting IoT Hub {0}: {1}'.format(self.name, exc.message or str(exc)))
|
||||
|
||||
def list_all(self):
|
||||
"""Get all IoT Hubs"""
|
||||
|
||||
self.log('List all IoT Hubs')
|
||||
|
||||
try:
|
||||
return self.IoThub_client.iot_hub_resource.list_by_subscription()
|
||||
except Exception as exc:
|
||||
self.fail('Failed to list all IoT Hubs - {0}'.format(str(exc)))
|
||||
|
||||
def list_by_resource_group(self):
|
||||
try:
|
||||
return self.IoThub_client.iot_hub_resource.list(self.resource_group)
|
||||
except Exception as exc:
|
||||
self.fail('Failed to list IoT Hub in resource group {0} - {1}'.format(self.resource_group, exc.message or str(exc)))
|
||||
|
||||
def show_hub_stats(self, resource_group, name):
|
||||
try:
|
||||
return self.IoThub_client.iot_hub_resource.get_stats(resource_group, name).as_dict()
|
||||
except Exception as exc:
|
||||
self.fail('Failed to getting statistics for IoT Hub {0}/{1}: {2}'.format(resource_group, name, str(exc)))
|
||||
|
||||
def show_hub_quota_metrics(self, resource_group, name):
|
||||
result = []
|
||||
try:
|
||||
resp = self.IoThub_client.iot_hub_resource.get_quota_metrics(resource_group, name)
|
||||
while True:
|
||||
result.append(resp.next().as_dict())
|
||||
except StopIteration:
|
||||
pass
|
||||
except Exception as exc:
|
||||
self.fail('Failed to getting quota metrics for IoT Hub {0}/{1}: {2}'.format(resource_group, name, str(exc)))
|
||||
return result
|
||||
|
||||
def show_hub_endpoint_health(self, resource_group, name):
|
||||
result = []
|
||||
try:
|
||||
resp = self.IoThub_client.iot_hub_resource.get_endpoint_health(resource_group, name)
|
||||
while True:
|
||||
result.append(resp.next().as_dict())
|
||||
except StopIteration:
|
||||
pass
|
||||
except Exception as exc:
|
||||
self.fail('Failed to getting health for IoT Hub {0}/{1} routing endpoint: {2}'.format(resource_group, name, str(exc)))
|
||||
return result
|
||||
|
||||
def test_all_routes(self, resource_group, name):
|
||||
try:
|
||||
return self.IoThub_client.iot_hub_resource.test_all_routes(self.test_route_message, resource_group, name).routes.as_dict()
|
||||
except Exception as exc:
|
||||
self.fail('Failed to getting statistics for IoT Hub {0}/{1}: {2}'.format(resource_group, name, str(exc)))
|
||||
|
||||
def list_hub_keys(self, resource_group, name):
|
||||
result = []
|
||||
try:
|
||||
resp = self.IoThub_client.iot_hub_resource.list_keys(resource_group, name)
|
||||
while True:
|
||||
result.append(resp.next().as_dict())
|
||||
except StopIteration:
|
||||
pass
|
||||
except Exception as exc:
|
||||
self.fail('Failed to getting health for IoT Hub {0}/{1} routing endpoint: {2}'.format(resource_group, name, str(exc)))
|
||||
return result
|
||||
|
||||
def list_event_hub_consumer_groups(self, resource_group, name, event_hub_endpoint='events'):
|
||||
result = []
|
||||
try:
|
||||
resp = self.IoThub_client.iot_hub_resource.list_event_hub_consumer_groups(resource_group, name, event_hub_endpoint)
|
||||
while True:
|
||||
cg = resp.next()
|
||||
result.append(dict(
|
||||
id=cg.id,
|
||||
name=cg.name
|
||||
))
|
||||
except StopIteration:
|
||||
pass
|
||||
except Exception as exc:
|
||||
self.fail('Failed to listing consumer group for IoT Hub {0}/{1} routing endpoint: {2}'.format(resource_group, name, str(exc)))
|
||||
return result
|
||||
|
||||
def route_to_dict(self, route):
|
||||
return dict(
|
||||
name=route.name,
|
||||
source=_camel_to_snake(route.source),
|
||||
endpoint_name=route.endpoint_names[0],
|
||||
enabled=route.is_enabled,
|
||||
condition=route.condition
|
||||
)
|
||||
|
||||
def instance_dict_to_dict(self, instance_dict):
|
||||
result = dict()
|
||||
for key in instance_dict.keys():
|
||||
result[key] = instance_dict[key].as_dict()
|
||||
return result
|
||||
|
||||
def to_dict(self, hub):
|
||||
result = dict()
|
||||
properties = hub.properties
|
||||
result['id'] = hub.id
|
||||
result['name'] = hub.name
|
||||
result['resource_group'] = parse_resource_id(hub.id).get('resource_group')
|
||||
result['location'] = hub.location
|
||||
result['tags'] = hub.tags
|
||||
result['unit'] = hub.sku.capacity
|
||||
result['sku'] = hub.sku.name.lower()
|
||||
result['cloud_to_device'] = dict(
|
||||
max_delivery_count=properties.cloud_to_device.feedback.max_delivery_count,
|
||||
ttl_as_iso8601=str(properties.cloud_to_device.feedback.ttl_as_iso8601)
|
||||
)
|
||||
result['enable_file_upload_notifications'] = properties.enable_file_upload_notifications
|
||||
result['event_hub_endpoints'] = self.instance_dict_to_dict(properties.event_hub_endpoints)
|
||||
result['host_name'] = properties.host_name
|
||||
result['ip_filters'] = [x.as_dict() for x in properties.ip_filter_rules]
|
||||
result['routing_endpoints'] = properties.routing.endpoints.as_dict()
|
||||
result['routes'] = [self.route_to_dict(x) for x in properties.routing.routes]
|
||||
result['fallback_route'] = self.route_to_dict(properties.routing.fallback_route)
|
||||
result['status'] = properties.state
|
||||
result['storage_endpoints'] = self.instance_dict_to_dict(properties.storage_endpoints)
|
||||
|
||||
# network overhead part
|
||||
if self.show_stats:
|
||||
result['statistics'] = self.show_hub_stats(result['resource_group'], hub.name)
|
||||
if self.show_quota_metrics:
|
||||
result['quota_metrics'] = self.show_hub_quota_metrics(result['resource_group'], hub.name)
|
||||
if self.show_endpoint_health:
|
||||
result['endpoint_health'] = self.show_hub_endpoint_health(result['resource_group'], hub.name)
|
||||
if self.list_keys:
|
||||
result['keys'] = self.list_hub_keys(result['resource_group'], hub.name)
|
||||
if self.test_route_message:
|
||||
result['test_route_result'] = self.test_all_routes(result['resource_group'], hub.name)
|
||||
if self.list_consumer_groups:
|
||||
result['consumer_groups'] = self.list_event_hub_consumer_groups(result['resource_group'], hub.name)
|
||||
return result
|
||||
|
||||
|
||||
def main():
|
||||
"""Main module execution code path"""
|
||||
|
||||
AzureRMIoTHubFacts()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,169 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Yuwei Zhou, <yuwzho@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_iothubconsumergroup
|
||||
version_added: "2.9"
|
||||
short_description: Manage Azure IoT hub
|
||||
description:
|
||||
- Create, delete an Azure IoT hub.
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- Name of resource group.
|
||||
type: str
|
||||
required: true
|
||||
hub:
|
||||
description:
|
||||
- Name of the IoT hub.
|
||||
type: str
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- State of the IoT hub. Use C(present) to create or update an IoT hub and C(absent) to delete an IoT hub.
|
||||
type: str
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
event_hub:
|
||||
description:
|
||||
- Event hub endpoint name.
|
||||
type: str
|
||||
default: events
|
||||
name:
|
||||
description:
|
||||
- Name of the consumer group.
|
||||
type: str
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Yuwei Zhou (@yuwzho)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create an IoT hub consumer group
|
||||
azure_rm_iothubconsumergroup:
|
||||
name: test
|
||||
resource_group: myResourceGroup
|
||||
hub: Testing
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- Resource ID of the consumer group.
|
||||
returned: success
|
||||
type: str
|
||||
sample: "/subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/resourceGroups/myResourceGroup
|
||||
/providers/Microsoft.Devices/IotHubs/Testing/events/ConsumerGroups/%24Default"
|
||||
name:
|
||||
description:
|
||||
- Name of the consumer group.
|
||||
sample: Testing
|
||||
returned: success
|
||||
type: str
|
||||
''' # NOQA
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id
|
||||
from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake
|
||||
import re
|
||||
|
||||
try:
|
||||
from msrestazure.tools import parse_resource_id
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMIoTHubConsumerGroup(AzureRMModuleBase):
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(type='str', required=True),
|
||||
name=dict(type='str', required=True),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent']),
|
||||
hub=dict(type='str', required=True),
|
||||
event_hub=dict(type='str', default='events')
|
||||
)
|
||||
|
||||
self.results = dict(
|
||||
changed=False,
|
||||
id=None
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.state = None
|
||||
self.hub = None
|
||||
self.event_hub = None
|
||||
|
||||
super(AzureRMIoTHubConsumerGroup, self).__init__(self.module_arg_spec, supports_check_mode=True)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
for key in self.module_arg_spec.keys():
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
changed = False
|
||||
cg = self.get_cg()
|
||||
if not cg and self.state == 'present':
|
||||
changed = True
|
||||
if not self.check_mode:
|
||||
cg = self.create_cg()
|
||||
elif cg and self.state == 'absent':
|
||||
changed = True
|
||||
cg = None
|
||||
if not self.check_mode:
|
||||
self.delete_cg()
|
||||
self.results = dict(
|
||||
id=cg.id,
|
||||
name=cg.name
|
||||
) if cg else dict()
|
||||
self.results['changed'] = changed
|
||||
return self.results
|
||||
|
||||
def get_cg(self):
|
||||
try:
|
||||
return self.IoThub_client.iot_hub_resource.get_event_hub_consumer_group(self.resource_group, self.hub, self.event_hub, self.name)
|
||||
except Exception:
|
||||
pass
|
||||
return None
|
||||
|
||||
def create_cg(self):
|
||||
try:
|
||||
return self.IoThub_client.iot_hub_resource.create_event_hub_consumer_group(self.resource_group, self.hub, self.event_hub, self.name)
|
||||
except Exception as exc:
|
||||
self.fail('Error when creating the consumer group {0} for IoT Hub {1} event hub {2}: {3}'.format(self.name, self.hub, self.event_hub, str(exc)))
|
||||
|
||||
def delete_cg(self):
|
||||
try:
|
||||
return self.IoThub_client.iot_hub_resource.delete_event_hub_consumer_group(self.resource_group, self.hub, self.event_hub, self.name)
|
||||
except Exception as exc:
|
||||
self.fail('Error when deleting the consumer group {0} for IoT Hub {1} event hub {2}: {3}'.format(self.name, self.hub, self.event_hub, str(exc)))
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMIoTHubConsumerGroup()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,504 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_keyvault
|
||||
version_added: "2.5"
|
||||
short_description: Manage Key Vault instance
|
||||
description:
|
||||
- Create, update and delete instance of Key Vault.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the Resource Group to which the server belongs.
|
||||
required: True
|
||||
vault_name:
|
||||
description:
|
||||
- Name of the vault.
|
||||
required: True
|
||||
location:
|
||||
description:
|
||||
- Resource location. If not set, location from the resource group will be used as default.
|
||||
vault_tenant:
|
||||
description:
|
||||
- The Azure Active Directory tenant ID that should be used for authenticating requests to the key vault.
|
||||
sku:
|
||||
description:
|
||||
- SKU details.
|
||||
suboptions:
|
||||
family:
|
||||
description:
|
||||
- SKU family name.
|
||||
name:
|
||||
description:
|
||||
- SKU name to specify whether the key vault is a standard vault or a premium vault.
|
||||
required: True
|
||||
choices:
|
||||
- 'standard'
|
||||
- 'premium'
|
||||
access_policies:
|
||||
description:
|
||||
- An array of 0 to 16 identities that have access to the key vault.
|
||||
- All identities in the array must use the same tenant ID as the key vault's tenant ID.
|
||||
suboptions:
|
||||
tenant_id:
|
||||
description:
|
||||
- The Azure Active Directory tenant ID that should be used for authenticating requests to the key vault.
|
||||
- Current keyvault C(tenant_id) value will be used if not specified.
|
||||
object_id:
|
||||
description:
|
||||
- The object ID of a user, service principal or security group in the Azure Active Directory tenant for the vault.
|
||||
- The object ID must be unique for the list of access policies.
|
||||
- Please note this is not application id. Object id can be obtained by running "az ad sp show --id <application id>".
|
||||
required: True
|
||||
application_id:
|
||||
description:
|
||||
- Application ID of the client making request on behalf of a principal.
|
||||
keys:
|
||||
description:
|
||||
- List of permissions to keys.
|
||||
choices:
|
||||
- 'encrypt'
|
||||
- 'decrypt'
|
||||
- 'wrapkey'
|
||||
- 'unwrapkey'
|
||||
- 'sign'
|
||||
- 'verify'
|
||||
- 'get'
|
||||
- 'list'
|
||||
- 'create'
|
||||
- 'update'
|
||||
- 'import'
|
||||
- 'delete'
|
||||
- 'backup'
|
||||
- 'restore'
|
||||
- 'recover'
|
||||
- 'purge'
|
||||
secrets:
|
||||
description:
|
||||
- List of permissions to secrets.
|
||||
choices:
|
||||
- 'get'
|
||||
- 'list'
|
||||
- 'set'
|
||||
- 'delete'
|
||||
- 'backup'
|
||||
- 'restore'
|
||||
- 'recover'
|
||||
- 'purge'
|
||||
certificates:
|
||||
description:
|
||||
- List of permissions to certificates.
|
||||
choices:
|
||||
- 'get'
|
||||
- 'list'
|
||||
- 'delete'
|
||||
- 'create'
|
||||
- 'import'
|
||||
- 'update'
|
||||
- 'managecontacts'
|
||||
- 'getissuers'
|
||||
- 'listissuers'
|
||||
- 'setissuers'
|
||||
- 'deleteissuers'
|
||||
- 'manageissuers'
|
||||
- 'recover'
|
||||
- 'purge'
|
||||
storage:
|
||||
description:
|
||||
- List of permissions to storage accounts.
|
||||
enabled_for_deployment:
|
||||
description:
|
||||
- Property to specify whether Azure Virtual Machines are permitted to retrieve certificates stored as secrets from the key vault.
|
||||
type: bool
|
||||
enabled_for_disk_encryption:
|
||||
description:
|
||||
- Property to specify whether Azure Disk Encryption is permitted to retrieve secrets from the vault and unwrap keys.
|
||||
type: bool
|
||||
enabled_for_template_deployment:
|
||||
description:
|
||||
- Property to specify whether Azure Resource Manager is permitted to retrieve secrets from the key vault.
|
||||
type: bool
|
||||
enable_soft_delete:
|
||||
description:
|
||||
- Property to specify whether the soft delete functionality is enabled for this key vault.
|
||||
type: bool
|
||||
recover_mode:
|
||||
description:
|
||||
- Create vault in recovery mode.
|
||||
type: bool
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the KeyVault. Use C(present) to create or update an KeyVault and C(absent) to delete it.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create instance of Key Vault
|
||||
azure_rm_keyvault:
|
||||
resource_group: myResourceGroup
|
||||
vault_name: samplekeyvault
|
||||
enabled_for_deployment: yes
|
||||
vault_tenant: 72f98888-8666-4144-9199-2d7cd0111111
|
||||
sku:
|
||||
name: standard
|
||||
access_policies:
|
||||
- tenant_id: 72f98888-8666-4144-9199-2d7cd0111111
|
||||
object_id: 99998888-8666-4144-9199-2d7cd0111111
|
||||
keys:
|
||||
- get
|
||||
- list
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- The Azure Resource Manager resource ID for the key vault.
|
||||
returned: always
|
||||
type: str
|
||||
sample: id
|
||||
'''
|
||||
|
||||
import collections
|
||||
import time
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.mgmt.keyvault import KeyVaultManagementClient
|
||||
from msrest.polling import LROPoller
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class Actions:
|
||||
NoAction, Create, Update, Delete = range(4)
|
||||
|
||||
|
||||
class AzureRMVaults(AzureRMModuleBase):
|
||||
"""Configuration class for an Azure RM Key Vault resource"""
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
vault_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
location=dict(
|
||||
type='str'
|
||||
),
|
||||
vault_tenant=dict(
|
||||
type='str'
|
||||
),
|
||||
sku=dict(
|
||||
type='dict'
|
||||
),
|
||||
access_policies=dict(
|
||||
type='list',
|
||||
elements='dict',
|
||||
options=dict(
|
||||
tenant_id=dict(type='str'),
|
||||
object_id=dict(type='str', required=True),
|
||||
application_id=dict(type='str'),
|
||||
# FUTURE: add `choices` support once choices supports lists of values
|
||||
keys=dict(type='list'),
|
||||
secrets=dict(type='list'),
|
||||
certificates=dict(type='list'),
|
||||
storage=dict(type='list')
|
||||
)
|
||||
),
|
||||
enabled_for_deployment=dict(
|
||||
type='bool'
|
||||
),
|
||||
enabled_for_disk_encryption=dict(
|
||||
type='bool'
|
||||
),
|
||||
enabled_for_template_deployment=dict(
|
||||
type='bool'
|
||||
),
|
||||
enable_soft_delete=dict(
|
||||
type='bool'
|
||||
),
|
||||
recover_mode=dict(
|
||||
type='bool'
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
)
|
||||
)
|
||||
|
||||
self.module_required_if = [['state', 'present', ['vault_tenant']]]
|
||||
|
||||
self.resource_group = None
|
||||
self.vault_name = None
|
||||
self.parameters = dict()
|
||||
self.tags = None
|
||||
|
||||
self.results = dict(changed=False)
|
||||
self.mgmt_client = None
|
||||
self.state = None
|
||||
self.to_do = Actions.NoAction
|
||||
|
||||
super(AzureRMVaults, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=True,
|
||||
required_if=self.module_required_if)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
"""Main module execution method"""
|
||||
|
||||
# translate Ansible input to SDK-formatted dict in self.parameters
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
if hasattr(self, key):
|
||||
setattr(self, key, kwargs[key])
|
||||
elif kwargs[key] is not None:
|
||||
if key == "location":
|
||||
self.parameters["location"] = kwargs[key]
|
||||
elif key == "vault_tenant":
|
||||
self.parameters.setdefault("properties", {})["tenant_id"] = kwargs[key]
|
||||
elif key == "sku":
|
||||
self.parameters.setdefault("properties", {})["sku"] = kwargs[key]
|
||||
elif key == "access_policies":
|
||||
access_policies = kwargs[key]
|
||||
for policy in access_policies:
|
||||
if 'keys' in policy:
|
||||
policy.setdefault("permissions", {})["keys"] = policy["keys"]
|
||||
policy.pop("keys", None)
|
||||
if 'secrets' in policy:
|
||||
policy.setdefault("permissions", {})["secrets"] = policy["secrets"]
|
||||
policy.pop("secrets", None)
|
||||
if 'certificates' in policy:
|
||||
policy.setdefault("permissions", {})["certificates"] = policy["certificates"]
|
||||
policy.pop("certificates", None)
|
||||
if 'storage' in policy:
|
||||
policy.setdefault("permissions", {})["storage"] = policy["storage"]
|
||||
policy.pop("storage", None)
|
||||
if policy.get('tenant_id') is None:
|
||||
# default to key vault's tenant, since that's all that's currently supported anyway
|
||||
policy['tenant_id'] = kwargs['vault_tenant']
|
||||
self.parameters.setdefault("properties", {})["access_policies"] = access_policies
|
||||
elif key == "enabled_for_deployment":
|
||||
self.parameters.setdefault("properties", {})["enabled_for_deployment"] = kwargs[key]
|
||||
elif key == "enabled_for_disk_encryption":
|
||||
self.parameters.setdefault("properties", {})["enabled_for_disk_encryption"] = kwargs[key]
|
||||
elif key == "enabled_for_template_deployment":
|
||||
self.parameters.setdefault("properties", {})["enabled_for_template_deployment"] = kwargs[key]
|
||||
elif key == "enable_soft_delete":
|
||||
self.parameters.setdefault("properties", {})["enable_soft_delete"] = kwargs[key]
|
||||
elif key == "recover_mode":
|
||||
self.parameters.setdefault("properties", {})["create_mode"] = 'recover' if kwargs[key] else 'default'
|
||||
|
||||
old_response = None
|
||||
response = None
|
||||
|
||||
self.mgmt_client = self.get_mgmt_svc_client(KeyVaultManagementClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager,
|
||||
api_version="2018-02-14")
|
||||
|
||||
resource_group = self.get_resource_group(self.resource_group)
|
||||
|
||||
if "location" not in self.parameters:
|
||||
self.parameters["location"] = resource_group.location
|
||||
|
||||
old_response = self.get_keyvault()
|
||||
|
||||
if not old_response:
|
||||
self.log("Key Vault instance doesn't exist")
|
||||
if self.state == 'absent':
|
||||
self.log("Old instance didn't exist")
|
||||
else:
|
||||
self.to_do = Actions.Create
|
||||
else:
|
||||
self.log("Key Vault instance already exists")
|
||||
if self.state == 'absent':
|
||||
self.to_do = Actions.Delete
|
||||
elif self.state == 'present':
|
||||
self.log("Need to check if Key Vault instance has to be deleted or may be updated")
|
||||
if ('location' in self.parameters) and (self.parameters['location'] != old_response['location']):
|
||||
self.to_do = Actions.Update
|
||||
elif ('tenant_id' in self.parameters) and (self.parameters['tenant_id'] != old_response['tenant_id']):
|
||||
self.to_do = Actions.Update
|
||||
elif ('enabled_for_deployment' in self.parameters) and (self.parameters['enabled_for_deployment'] != old_response['enabled_for_deployment']):
|
||||
self.to_do = Actions.Update
|
||||
elif (('enabled_for_disk_encryption' in self.parameters) and
|
||||
(self.parameters['enabled_for_deployment'] != old_response['enabled_for_deployment'])):
|
||||
self.to_do = Actions.Update
|
||||
elif (('enabled_for_template_deployment' in self.parameters) and
|
||||
(self.parameters['enabled_for_template_deployment'] != old_response['enabled_for_template_deployment'])):
|
||||
self.to_do = Actions.Update
|
||||
elif ('enable_soft_delete' in self.parameters) and (self.parameters['enabled_soft_delete'] != old_response['enable_soft_delete']):
|
||||
self.to_do = Actions.Update
|
||||
elif ('create_mode' in self.parameters) and (self.parameters['create_mode'] != old_response['create_mode']):
|
||||
self.to_do = Actions.Update
|
||||
elif 'access_policies' in self.parameters['properties']:
|
||||
if len(self.parameters['properties']['access_policies']) != len(old_response['properties']['access_policies']):
|
||||
self.to_do = Actions.Update
|
||||
else:
|
||||
# FUTURE: this list isn't really order-dependent- we should be set-ifying the rules list for order-independent comparison
|
||||
for i in range(len(old_response['properties']['access_policies'])):
|
||||
n = self.parameters['properties']['access_policies'][i]
|
||||
o = old_response['properties']['access_policies'][i]
|
||||
if n.get('tenant_id', False) != o.get('tenant_id', False):
|
||||
self.to_do = Actions.Update
|
||||
break
|
||||
if n.get('object_id', None) != o.get('object_id', None):
|
||||
self.to_do = Actions.Update
|
||||
break
|
||||
if n.get('application_id', None) != o.get('application_id', None):
|
||||
self.to_do = Actions.Update
|
||||
break
|
||||
if sorted(n.get('keys', [])) != sorted(o.get('keys', [])):
|
||||
self.to_do = Actions.Update
|
||||
break
|
||||
if sorted(n.get('secrets', [])) != sorted(o.get('secrets', [])):
|
||||
self.to_do = Actions.Update
|
||||
break
|
||||
if sorted(n.get('certificates', [])) != sorted(o.get('certificates', [])):
|
||||
self.to_do = Actions.Update
|
||||
break
|
||||
if sorted(n.get('storage', [])) != sorted(o.get('storage', [])):
|
||||
self.to_do = Actions.Update
|
||||
break
|
||||
|
||||
update_tags, newtags = self.update_tags(old_response.get('tags', dict()))
|
||||
|
||||
if update_tags:
|
||||
self.to_do = Actions.Update
|
||||
self.tags = newtags
|
||||
|
||||
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
|
||||
self.log("Need to Create / Update the Key Vault instance")
|
||||
|
||||
if self.check_mode:
|
||||
self.results['changed'] = True
|
||||
return self.results
|
||||
|
||||
self.parameters["tags"] = self.tags
|
||||
|
||||
response = self.create_update_keyvault()
|
||||
|
||||
if not old_response:
|
||||
self.results['changed'] = True
|
||||
else:
|
||||
self.results['changed'] = old_response.__ne__(response)
|
||||
self.log("Creation / Update done")
|
||||
elif self.to_do == Actions.Delete:
|
||||
self.log("Key Vault instance deleted")
|
||||
self.results['changed'] = True
|
||||
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
self.delete_keyvault()
|
||||
# make sure instance is actually deleted, for some Azure resources, instance is hanging around
|
||||
# for some time after deletion -- this should be really fixed in Azure
|
||||
while self.get_keyvault():
|
||||
time.sleep(20)
|
||||
else:
|
||||
self.log("Key Vault instance unchanged")
|
||||
self.results['changed'] = False
|
||||
response = old_response
|
||||
|
||||
if response:
|
||||
self.results["id"] = response["id"]
|
||||
|
||||
return self.results
|
||||
|
||||
def create_update_keyvault(self):
|
||||
'''
|
||||
Creates or updates Key Vault with the specified configuration.
|
||||
|
||||
:return: deserialized Key Vault instance state dictionary
|
||||
'''
|
||||
self.log("Creating / Updating the Key Vault instance {0}".format(self.vault_name))
|
||||
|
||||
try:
|
||||
response = self.mgmt_client.vaults.create_or_update(resource_group_name=self.resource_group,
|
||||
vault_name=self.vault_name,
|
||||
parameters=self.parameters)
|
||||
if isinstance(response, LROPoller):
|
||||
response = self.get_poller_result(response)
|
||||
|
||||
except CloudError as exc:
|
||||
self.log('Error attempting to create the Key Vault instance.')
|
||||
self.fail("Error creating the Key Vault instance: {0}".format(str(exc)))
|
||||
return response.as_dict()
|
||||
|
||||
def delete_keyvault(self):
|
||||
'''
|
||||
Deletes specified Key Vault instance in the specified subscription and resource group.
|
||||
|
||||
:return: True
|
||||
'''
|
||||
self.log("Deleting the Key Vault instance {0}".format(self.vault_name))
|
||||
try:
|
||||
response = self.mgmt_client.vaults.delete(resource_group_name=self.resource_group,
|
||||
vault_name=self.vault_name)
|
||||
except CloudError as e:
|
||||
self.log('Error attempting to delete the Key Vault instance.')
|
||||
self.fail("Error deleting the Key Vault instance: {0}".format(str(e)))
|
||||
|
||||
return True
|
||||
|
||||
def get_keyvault(self):
|
||||
'''
|
||||
Gets the properties of the specified Key Vault.
|
||||
|
||||
:return: deserialized Key Vault instance state dictionary
|
||||
'''
|
||||
self.log("Checking if the Key Vault instance {0} is present".format(self.vault_name))
|
||||
found = False
|
||||
try:
|
||||
response = self.mgmt_client.vaults.get(resource_group_name=self.resource_group,
|
||||
vault_name=self.vault_name)
|
||||
found = True
|
||||
self.log("Response : {0}".format(response))
|
||||
self.log("Key Vault instance : {0} found".format(response.name))
|
||||
except CloudError as e:
|
||||
self.log('Did not find the Key Vault instance.')
|
||||
if found is True:
|
||||
return response.as_dict()
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution"""
|
||||
AzureRMVaults()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,323 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Yunge Zhu, <yungez@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_keyvault_info
|
||||
version_added: "2.9"
|
||||
short_description: Get Azure Key Vault facts
|
||||
description:
|
||||
- Get facts of Azure Key Vault.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group to which the key vault belongs.
|
||||
name:
|
||||
description:
|
||||
- The name of the key vault.
|
||||
tags:
|
||||
description:
|
||||
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Yunge Zhu (@yungezz)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get Key Vault by name
|
||||
azure_rm_keyvault_info:
|
||||
resource_group: myResourceGroup
|
||||
name: myVault
|
||||
|
||||
- name: List Key Vaults in specific resource group
|
||||
azure_rm_keyvault_info:
|
||||
resource_group: myResourceGroup
|
||||
|
||||
- name: List Key Vaults in current subscription
|
||||
azure_rm_keyvault_info:
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
keyvaults:
|
||||
description: List of Azure Key Vaults.
|
||||
returned: always
|
||||
type: list
|
||||
contains:
|
||||
name:
|
||||
description:
|
||||
- Name of the vault.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myVault
|
||||
id:
|
||||
description:
|
||||
- Resource Id of the vault.
|
||||
returned: always
|
||||
type: str
|
||||
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.KeyVault/vaults/myVault
|
||||
vault_uri:
|
||||
description:
|
||||
- Vault uri.
|
||||
returned: always
|
||||
type: str
|
||||
sample: https://myVault.vault.azure.net/
|
||||
location:
|
||||
description:
|
||||
- Location of the vault.
|
||||
returned: always
|
||||
type: str
|
||||
sample: eastus
|
||||
enabled_for_deployments:
|
||||
description:
|
||||
- Whether Azure Virtual Machines are permitted to retrieve certificates stored as secrets from the key vault.
|
||||
returned: always
|
||||
type: bool
|
||||
sample: False
|
||||
enabled_for_disk_encryption:
|
||||
description:
|
||||
- Whether Azure Disk Encryption is permitted to retrieve secrets from the vault and unwrap keys.
|
||||
returned: always
|
||||
type: bool
|
||||
sample: False
|
||||
enabled_for_template_deployment:
|
||||
description:
|
||||
- Whether Azure Resource Manager is permitted to retrieve secrets from the key vault.
|
||||
returned: always
|
||||
type: bool
|
||||
sample: False
|
||||
tags:
|
||||
description:
|
||||
- List of tags.
|
||||
type: list
|
||||
sample:
|
||||
- foo
|
||||
sku:
|
||||
description:
|
||||
- Sku of the vault.
|
||||
returned: always
|
||||
type: dict
|
||||
contains:
|
||||
family:
|
||||
description: Sku family name.
|
||||
type: str
|
||||
returned: always
|
||||
sample: A
|
||||
name:
|
||||
description: Sku name.
|
||||
type: str
|
||||
returned: always
|
||||
sample: standard
|
||||
access_policies:
|
||||
description:
|
||||
- Location of the vault.
|
||||
returned: always
|
||||
type: list
|
||||
contains:
|
||||
object_id:
|
||||
description: The object if of a user, service principal or security group in AAD for the vault.
|
||||
type: str
|
||||
returned: always
|
||||
sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
tenant_id:
|
||||
description: The AAD tenant iD that should be used for authenticating requests to the key vault.
|
||||
type: str
|
||||
returned: always
|
||||
sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
permissions:
|
||||
description: Permissions the identity has for keys, secrets and certificates.
|
||||
type: complex
|
||||
returned: always
|
||||
contains:
|
||||
keys:
|
||||
description:
|
||||
Permissions to keys.
|
||||
type: list
|
||||
returned: always
|
||||
sample:
|
||||
- get
|
||||
- create
|
||||
secrets:
|
||||
description:
|
||||
Permissions to secrets.
|
||||
type: list
|
||||
returned: always
|
||||
sample:
|
||||
- list
|
||||
- set
|
||||
certificates:
|
||||
description:
|
||||
Permissions to secrets.
|
||||
type: list
|
||||
returned: always
|
||||
sample:
|
||||
- get
|
||||
- import
|
||||
'''
|
||||
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from azure.mgmt.keyvault import KeyVaultManagementClient
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
def keyvault_to_dict(vault):
|
||||
return dict(
|
||||
id=vault.id,
|
||||
name=vault.name,
|
||||
location=vault.location,
|
||||
tags=vault.tags,
|
||||
vault_uri=vault.properties.vault_uri,
|
||||
enabled_for_deployment=vault.properties.enabled_for_deployment,
|
||||
enabled_for_disk_encryption=vault.properties.enabled_for_disk_encryption,
|
||||
enabled_for_template_deployment=vault.properties.enabled_for_template_deployment,
|
||||
access_policies=[dict(
|
||||
tenant_id=policy.tenant_id,
|
||||
object_id=policy.object_id,
|
||||
permissions=dict(
|
||||
keys=[kp.lower() for kp in policy.permissions.keys] if policy.permissions.keys else None,
|
||||
secrets=[sp.lower() for sp in policy.permissions.secrets] if policy.permissions.secrets else None,
|
||||
certificates=[cp.lower() for cp in policy.permissions.certificates] if policy.permissions.certificates else None
|
||||
) if policy.permissions else None,
|
||||
) for policy in vault.properties.access_policies] if vault.properties.access_policies else None,
|
||||
sku=dict(
|
||||
family=vault.properties.sku.family,
|
||||
name=vault.properties.sku.name.name
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class AzureRMKeyVaultInfo(AzureRMModuleBase):
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(type='str'),
|
||||
name=dict(type='str'),
|
||||
tags=dict(type='list')
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.tags = None
|
||||
|
||||
self.results = dict(changed=False)
|
||||
self._client = None
|
||||
|
||||
super(AzureRMKeyVaultInfo, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=False,
|
||||
supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
"""Main module execution method"""
|
||||
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
if hasattr(self, key):
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
self._client = self.get_mgmt_svc_client(KeyVaultManagementClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager,
|
||||
api_version="2018-02-14")
|
||||
|
||||
if self.name:
|
||||
if self.resource_group:
|
||||
self.results['keyvaults'] = self.get_by_name()
|
||||
else:
|
||||
self.fail("resource_group is required when filtering by name")
|
||||
elif self.resource_group:
|
||||
self.results['keyvaults'] = self.list_by_resource_group()
|
||||
else:
|
||||
self.results['keyvaults'] = self.list()
|
||||
|
||||
return self.results
|
||||
|
||||
def get_by_name(self):
|
||||
'''
|
||||
Gets the properties of the specified key vault.
|
||||
|
||||
:return: deserialized key vaultstate dictionary
|
||||
'''
|
||||
self.log("Get the key vault {0}".format(self.name))
|
||||
|
||||
results = []
|
||||
try:
|
||||
response = self._client.vaults.get(resource_group_name=self.resource_group,
|
||||
vault_name=self.name)
|
||||
self.log("Response : {0}".format(response))
|
||||
|
||||
if response and self.has_tags(response.tags, self.tags):
|
||||
results.append(keyvault_to_dict(response))
|
||||
except CloudError as e:
|
||||
self.log("Did not find the key vault {0}: {1}".format(self.name, str(e)))
|
||||
return results
|
||||
|
||||
def list_by_resource_group(self):
|
||||
'''
|
||||
Lists the properties of key vaults in specific resource group.
|
||||
|
||||
:return: deserialized key vaults state dictionary
|
||||
'''
|
||||
self.log("Get the key vaults in resource group {0}".format(self.resource_group))
|
||||
|
||||
results = []
|
||||
try:
|
||||
response = list(self._client.vaults.list_by_resource_group(resource_group_name=self.resource_group))
|
||||
self.log("Response : {0}".format(response))
|
||||
|
||||
if response:
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(keyvault_to_dict(item))
|
||||
except CloudError as e:
|
||||
self.log("Did not find key vaults in resource group {0} : {1}.".format(self.resource_group, str(e)))
|
||||
return results
|
||||
|
||||
def list(self):
|
||||
'''
|
||||
Lists the properties of key vaults in specific subscription.
|
||||
|
||||
:return: deserialized key vaults state dictionary
|
||||
'''
|
||||
self.log("Get the key vaults in current subscription")
|
||||
|
||||
results = []
|
||||
try:
|
||||
response = list(self._client.vaults.list())
|
||||
self.log("Response : {0}".format(response))
|
||||
|
||||
if response:
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(keyvault_to_dict(item))
|
||||
except CloudError as e:
|
||||
self.log("Did not find key vault in current subscription {0}.".format(str(e)))
|
||||
return results
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution"""
|
||||
AzureRMKeyVaultInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,310 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright: Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_keyvaultkey
|
||||
version_added: 2.5
|
||||
short_description: Use Azure KeyVault keys
|
||||
description:
|
||||
- Create or delete a key within a given keyvault.
|
||||
- By using Key Vault, you can encrypt keys and secrets.
|
||||
- Such as authentication keys, storage account keys, data encryption keys, .PFX files, and passwords.
|
||||
options:
|
||||
keyvault_uri:
|
||||
description:
|
||||
- URI of the keyvault endpoint.
|
||||
required: true
|
||||
key_name:
|
||||
description:
|
||||
- Name of the keyvault key.
|
||||
required: true
|
||||
byok_file:
|
||||
description:
|
||||
- BYOK file.
|
||||
pem_file:
|
||||
description:
|
||||
- PEM file.
|
||||
pem_password:
|
||||
description:
|
||||
- PEM password.
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the key. Use C(present) to create a key and C(absent) to delete a key.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Ian Philpot (@iphilpot)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a key
|
||||
azure_rm_keyvaultkey:
|
||||
key_name: MyKey
|
||||
keyvault_uri: https://contoso.vault.azure.net/
|
||||
|
||||
- name: Delete a key
|
||||
azure_rm_keyvaultkey:
|
||||
key_name: MyKey
|
||||
keyvault_uri: https://contoso.vault.azure.net/
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
state:
|
||||
description:
|
||||
- Current state of the key.
|
||||
returned: success
|
||||
type: complex
|
||||
contains:
|
||||
key_id:
|
||||
description:
|
||||
- key resource path.
|
||||
type: str
|
||||
example: https://contoso.vault.azure.net/keys/hello/e924f053839f4431b35bc54393f98423
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
import re
|
||||
import codecs
|
||||
from azure.keyvault import KeyVaultClient, KeyVaultId, KeyVaultAuthentication
|
||||
from azure.keyvault.models import KeyAttributes, JsonWebKey
|
||||
from azure.common.credentials import ServicePrincipalCredentials
|
||||
from azure.keyvault.models.key_vault_error import KeyVaultErrorException
|
||||
from msrestazure.azure_active_directory import MSIAuthentication
|
||||
from OpenSSL import crypto
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMKeyVaultKey(AzureRMModuleBase):
|
||||
''' Module that creates or deletes keys in Azure KeyVault '''
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.module_arg_spec = dict(
|
||||
key_name=dict(type='str', required=True),
|
||||
keyvault_uri=dict(type='str', required=True),
|
||||
pem_file=dict(type='str'),
|
||||
pem_password=dict(type='str'),
|
||||
byok_file=dict(type='str'),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent'])
|
||||
)
|
||||
|
||||
self.results = dict(
|
||||
changed=False,
|
||||
state=dict()
|
||||
)
|
||||
|
||||
self.key_name = None
|
||||
self.keyvault_uri = None
|
||||
self.pem_file = None
|
||||
self.pem_password = None
|
||||
self.state = None
|
||||
self.client = None
|
||||
self.tags = None
|
||||
|
||||
required_if = [
|
||||
('pem_password', 'present', ['pem_file'])
|
||||
]
|
||||
|
||||
super(AzureRMKeyVaultKey, self).__init__(self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
required_if=required_if,
|
||||
supports_tags=True)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
# Create KeyVaultClient
|
||||
self.client = self.get_keyvault_client()
|
||||
|
||||
results = dict()
|
||||
changed = False
|
||||
|
||||
try:
|
||||
results['key_id'] = self.get_key(self.key_name)
|
||||
|
||||
# Key exists and will be deleted
|
||||
if self.state == 'absent':
|
||||
changed = True
|
||||
|
||||
except KeyVaultErrorException:
|
||||
# Key doesn't exist
|
||||
if self.state == 'present':
|
||||
changed = True
|
||||
|
||||
self.results['changed'] = changed
|
||||
self.results['state'] = results
|
||||
|
||||
if not self.check_mode:
|
||||
|
||||
# Create key
|
||||
if self.state == 'present' and changed:
|
||||
results['key_id'] = self.create_key(self.key_name, self.tags)
|
||||
self.results['state'] = results
|
||||
self.results['state']['status'] = 'Created'
|
||||
# Delete key
|
||||
elif self.state == 'absent' and changed:
|
||||
results['key_id'] = self.delete_key(self.key_name)
|
||||
self.results['state'] = results
|
||||
self.results['state']['status'] = 'Deleted'
|
||||
else:
|
||||
if self.state == 'present' and changed:
|
||||
self.results['state']['status'] = 'Created'
|
||||
elif self.state == 'absent' and changed:
|
||||
self.results['state']['status'] = 'Deleted'
|
||||
|
||||
return self.results
|
||||
|
||||
def get_keyvault_client(self):
|
||||
try:
|
||||
self.log("Get KeyVaultClient from MSI")
|
||||
credentials = MSIAuthentication(resource='https://vault.azure.net')
|
||||
return KeyVaultClient(credentials)
|
||||
except Exception:
|
||||
self.log("Get KeyVaultClient from service principal")
|
||||
|
||||
# Create KeyVault Client using KeyVault auth class and auth_callback
|
||||
def auth_callback(server, resource, scope):
|
||||
if self.credentials['client_id'] is None or self.credentials['secret'] is None:
|
||||
self.fail('Please specify client_id, secret and tenant to access azure Key Vault.')
|
||||
|
||||
tenant = self.credentials.get('tenant')
|
||||
if not self.credentials['tenant']:
|
||||
tenant = "common"
|
||||
|
||||
authcredential = ServicePrincipalCredentials(
|
||||
client_id=self.credentials['client_id'],
|
||||
secret=self.credentials['secret'],
|
||||
tenant=tenant,
|
||||
cloud_environment=self._cloud_environment,
|
||||
resource="https://vault.azure.net")
|
||||
|
||||
token = authcredential.token
|
||||
return token['token_type'], token['access_token']
|
||||
|
||||
return KeyVaultClient(KeyVaultAuthentication(auth_callback))
|
||||
|
||||
def get_key(self, name, version=''):
|
||||
''' Gets an existing key '''
|
||||
key_bundle = self.client.get_key(self.keyvault_uri, name, version)
|
||||
if key_bundle:
|
||||
key_id = KeyVaultId.parse_key_id(key_bundle.key.kid)
|
||||
return key_id.id
|
||||
|
||||
def create_key(self, name, tags, kty='RSA'):
|
||||
''' Creates a key '''
|
||||
key_bundle = self.client.create_key(vault_base_url=self.keyvault_uri, key_name=name, kty=kty, tags=tags)
|
||||
key_id = KeyVaultId.parse_key_id(key_bundle.key.kid)
|
||||
return key_id.id
|
||||
|
||||
def delete_key(self, name):
|
||||
''' Deletes a key '''
|
||||
deleted_key = self.client.delete_key(self.keyvault_uri, name)
|
||||
key_id = KeyVaultId.parse_key_id(deleted_key.key.kid)
|
||||
return key_id.id
|
||||
|
||||
def import_key(self, key_name, destination=None, key_ops=None, disabled=False, expires=None,
|
||||
not_before=None, tags=None, pem_file=None, pem_password=None, byok_file=None):
|
||||
""" Import a private key. Supports importing base64 encoded private keys from PEM files.
|
||||
Supports importing BYOK keys into HSM for premium KeyVaults. """
|
||||
|
||||
def _to_bytes(hex_string):
|
||||
# zero pads and decodes a hex string
|
||||
if len(hex_string) % 2:
|
||||
hex_string = '{0}'.format(hex_string)
|
||||
return codecs.decode(hex_string, 'hex_codec')
|
||||
|
||||
def _set_rsa_parameters(dest, src):
|
||||
# map OpenSSL parameter names to JsonWebKey property names
|
||||
conversion_dict = {
|
||||
'modulus': 'n',
|
||||
'publicExponent': 'e',
|
||||
'privateExponent': 'd',
|
||||
'prime1': 'p',
|
||||
'prime2': 'q',
|
||||
'exponent1': 'dp',
|
||||
'exponent2': 'dq',
|
||||
'coefficient': 'qi'
|
||||
}
|
||||
# regex: looks for matches that fit the following patterns:
|
||||
# integerPattern: 65537 (0x10001)
|
||||
# hexPattern:
|
||||
# 00:a0:91:4d:00:23:4a:c6:83:b2:1b:4c:15:d5:be:
|
||||
# d8:87:bd:c9:59:c2:e5:7a:f5:4a:e7:34:e8:f0:07:
|
||||
# The desired match should always be the first component of the match
|
||||
regex = re.compile(r'([^:\s]*(:[^\:)]+\))|([^:\s]*(:\s*[0-9A-Fa-f]{2})+))')
|
||||
# regex2: extracts the hex string from a format like: 65537 (0x10001)
|
||||
regex2 = re.compile(r'(?<=\(0x{1})([0-9A-Fa-f]*)(?=\))')
|
||||
|
||||
key_params = crypto.dump_privatekey(crypto.FILETYPE_TEXT, src).decode('utf-8')
|
||||
for match in regex.findall(key_params):
|
||||
comps = match[0].split(':', 1)
|
||||
name = conversion_dict.get(comps[0], None)
|
||||
if name:
|
||||
value = comps[1].replace(' ', '').replace('\n', '').replace(':', '')
|
||||
try:
|
||||
value = _to_bytes(value)
|
||||
except Exception: # pylint:disable=broad-except
|
||||
# if decoding fails it is because of an integer pattern. Extract the hex
|
||||
# string and retry
|
||||
value = _to_bytes(regex2.findall(value)[0])
|
||||
setattr(dest, name, value)
|
||||
|
||||
key_attrs = KeyAttributes(not disabled, not_before, expires)
|
||||
key_obj = JsonWebKey(key_ops=key_ops)
|
||||
if pem_file:
|
||||
key_obj.kty = 'RSA'
|
||||
with open(pem_file, 'r') as f:
|
||||
pem_data = f.read()
|
||||
# load private key and prompt for password if encrypted
|
||||
try:
|
||||
pem_password = str(pem_password).encode() if pem_password else None
|
||||
# despite documentation saying password should be a string, it needs to actually
|
||||
# be UTF-8 encoded bytes
|
||||
pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, pem_data, pem_password)
|
||||
except crypto.Error:
|
||||
pass # wrong password
|
||||
except TypeError:
|
||||
pass # no pass provided
|
||||
_set_rsa_parameters(key_obj, pkey)
|
||||
elif byok_file:
|
||||
with open(byok_file, 'rb') as f:
|
||||
byok_data = f.read()
|
||||
key_obj.kty = 'RSA-HSM'
|
||||
key_obj.t = byok_data
|
||||
|
||||
return self.client.import_key(
|
||||
self.keyvault_uri, key_name, key_obj, destination == 'hsm', key_attrs, tags)
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMKeyVaultKey()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,466 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Yunge Zhu, <yungez@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_keyvaultkey_info
|
||||
version_added: "2.9"
|
||||
short_description: Get Azure Key Vault key facts
|
||||
description:
|
||||
- Get facts of Azure Key Vault key.
|
||||
|
||||
options:
|
||||
vault_uri:
|
||||
description:
|
||||
- Vault uri where the key stored in.
|
||||
required: True
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- Key name. If not set, will list all keys in I(vault_uri).
|
||||
type: str
|
||||
version:
|
||||
description:
|
||||
- Key version.
|
||||
- Set it to C(current) to show latest version of a key.
|
||||
- Set it to C(all) to list all versions of a key.
|
||||
- Set it to specific version to list specific version of a key. eg. fd2682392a504455b79c90dd04a1bf46.
|
||||
default: current
|
||||
type: str
|
||||
show_deleted_key:
|
||||
description:
|
||||
- Set to C(true) to show deleted keys. Set to C(false) to show not deleted keys.
|
||||
type: bool
|
||||
default: false
|
||||
tags:
|
||||
description:
|
||||
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
|
||||
type: list
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Yunge Zhu (@yungezz)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get latest version of specific key
|
||||
azure_rm_keyvaultkey_info:
|
||||
vault_uri: "https://myVault.vault.azure.net"
|
||||
name: myKey
|
||||
|
||||
- name: List all versions of specific key
|
||||
azure_rm_keyvaultkey_info:
|
||||
vault_uri: "https://myVault.vault.azure.net"
|
||||
name: myKey
|
||||
version: all
|
||||
|
||||
- name: List specific version of specific key
|
||||
azure_rm_keyvaultkey_info:
|
||||
vault_uri: "https://myVault.vault.azure.net"
|
||||
name: myKey
|
||||
version: fd2682392a504455b79c90dd04a1bf46
|
||||
|
||||
- name: List all keys in specific key vault
|
||||
azure_rm_keyvaultkey_info:
|
||||
vault_uri: "https://myVault.vault.azure.net"
|
||||
|
||||
- name: List deleted keys in specific key vault
|
||||
azure_rm_keyvaultkey_info:
|
||||
vault_uri: "https://myVault.vault.azure.net"
|
||||
show_deleted_key: True
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
keyvaults:
|
||||
description:
|
||||
- List of keys in Azure Key Vault.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
kid:
|
||||
description:
|
||||
- Key identifier.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "https://myVault.vault.azure.net/keys/key1/fd2682392a504455b79c90dd04a1bf46"
|
||||
permitted_operations:
|
||||
description:
|
||||
- Permitted operations on the key.
|
||||
type: list
|
||||
returned: always
|
||||
sample: encrypt
|
||||
type:
|
||||
description:
|
||||
- Key type.
|
||||
type: str
|
||||
returned: always
|
||||
sample: RSA
|
||||
version:
|
||||
description:
|
||||
- Key version.
|
||||
type: str
|
||||
returned: always
|
||||
sample: fd2682392a504455b79c90dd04a1bf46
|
||||
key:
|
||||
description:
|
||||
- public part of a key.
|
||||
contains:
|
||||
n:
|
||||
description:
|
||||
- RSA modules.
|
||||
type: str
|
||||
e:
|
||||
description:
|
||||
- RSA public exponent.
|
||||
type: str
|
||||
crv:
|
||||
description:
|
||||
- Elliptic curve name.
|
||||
type: str
|
||||
x:
|
||||
description:
|
||||
- X component of an EC public key.
|
||||
type: str
|
||||
y:
|
||||
description:
|
||||
- Y component of an EC public key.
|
||||
type: str
|
||||
managed:
|
||||
description:
|
||||
- C(True) if the key's lifetime is managed by key vault.
|
||||
type: bool
|
||||
sample: True
|
||||
tags:
|
||||
description:
|
||||
- Tags of the key.
|
||||
returned: always
|
||||
type: list
|
||||
sample: [foo, ]
|
||||
attributes:
|
||||
description:
|
||||
- Key attributes.
|
||||
contains:
|
||||
created:
|
||||
description:
|
||||
- Creation datetime.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "2019-04-25T07:26:49+00:00"
|
||||
not_before:
|
||||
description:
|
||||
- Not before datetime.
|
||||
type: str
|
||||
sample: "2019-04-25T07:26:49+00:00"
|
||||
expires:
|
||||
description:
|
||||
- Expiration datetime.
|
||||
type: str
|
||||
sample: "2019-04-25T07:26:49+00:00"
|
||||
updated:
|
||||
description:
|
||||
- Update datetime.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "2019-04-25T07:26:49+00:00"
|
||||
enabled:
|
||||
description:
|
||||
- Indicate whether the key is enabled.
|
||||
returned: always
|
||||
type: str
|
||||
sample: true
|
||||
recovery_level:
|
||||
description:
|
||||
- Reflects the deletion recovery level currently in effect for keys in the current vault.
|
||||
- If it contains C(Purgeable) the key can be permanently deleted by a privileged user.
|
||||
- Otherwise, only the system can purge the key, at the end of the retention interval.
|
||||
returned: always
|
||||
type: str
|
||||
sample: Purgable
|
||||
'''
|
||||
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from azure.keyvault import KeyVaultClient, KeyVaultId, KeyVaultAuthentication, KeyId
|
||||
from azure.keyvault.models import KeyAttributes, JsonWebKey
|
||||
from azure.common.credentials import ServicePrincipalCredentials
|
||||
from azure.keyvault.models.key_vault_error import KeyVaultErrorException
|
||||
from msrestazure.azure_active_directory import MSIAuthentication
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
def keybundle_to_dict(bundle):
|
||||
return dict(
|
||||
tags=bundle.tags,
|
||||
managed=bundle.managed,
|
||||
attributes=dict(
|
||||
enabled=bundle.attributes.enabled,
|
||||
not_before=bundle.attributes.not_before,
|
||||
expires=bundle.attributes.expires,
|
||||
created=bundle.attributes.created,
|
||||
updated=bundle.attributes.updated,
|
||||
recovery_level=bundle.attributes.recovery_level
|
||||
),
|
||||
kid=bundle.key.kid,
|
||||
version=KeyVaultId.parse_key_id(bundle.key.kid).version,
|
||||
type=bundle.key.kty,
|
||||
permitted_operations=bundle.key.key_ops,
|
||||
key=dict(
|
||||
n=bundle.key.n if hasattr(bundle.key, 'n') else None,
|
||||
e=bundle.key.e if hasattr(bundle.key, 'e') else None,
|
||||
crv=bundle.key.crv if hasattr(bundle.key, 'crv') else None,
|
||||
x=bundle.key.x if hasattr(bundle.key, 'x') else None,
|
||||
y=bundle.k.y if hasattr(bundle.key, 'y') else None
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def deletedkeybundle_to_dict(bundle):
|
||||
keybundle = keybundle_to_dict(bundle)
|
||||
keybundle['recovery_id'] = bundle.recovery_id,
|
||||
keybundle['scheduled_purge_date'] = bundle.scheduled_purge_date,
|
||||
keybundle['deleted_date'] = bundle.deleted_date
|
||||
return keybundle
|
||||
|
||||
|
||||
def keyitem_to_dict(keyitem):
|
||||
return dict(
|
||||
kid=keyitem.kid,
|
||||
version=KeyVaultId.parse_key_id(keyitem.kid).version,
|
||||
tags=keyitem.tags,
|
||||
manged=keyitem.managed,
|
||||
attributes=dict(
|
||||
enabled=keyitem.attributes.enabled,
|
||||
not_before=keyitem.attributes.not_before,
|
||||
expires=keyitem.attributes.expires,
|
||||
created=keyitem.attributes.created,
|
||||
updated=keyitem.attributes.updated,
|
||||
recovery_level=keyitem.attributes.recovery_level
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def deletedkeyitem_to_dict(keyitem):
|
||||
item = keyitem_to_dict(keyitem)
|
||||
item['recovery_id'] = keyitem.recovery_id,
|
||||
item['scheduled_purge_date'] = keyitem.scheduled_purge_date,
|
||||
item['deleted_date'] = keyitem.deleted_date
|
||||
return item
|
||||
|
||||
|
||||
class AzureRMKeyVaultKeyInfo(AzureRMModuleBase):
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
version=dict(type='str', default='current'),
|
||||
name=dict(type='str'),
|
||||
vault_uri=dict(type='str', required=True),
|
||||
show_deleted_key=dict(type='bool', default=False),
|
||||
tags=dict(type='list')
|
||||
)
|
||||
|
||||
self.vault_uri = None
|
||||
self.name = None
|
||||
self.version = None
|
||||
self.show_deleted_key = False
|
||||
self.tags = None
|
||||
|
||||
self.results = dict(changed=False)
|
||||
self._client = None
|
||||
|
||||
super(AzureRMKeyVaultKeyInfo, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=False,
|
||||
supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
"""Main module execution method"""
|
||||
|
||||
for key in list(self.module_arg_spec.keys()):
|
||||
if hasattr(self, key):
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
self._client = self.get_keyvault_client()
|
||||
|
||||
if self.name:
|
||||
if self.show_deleted_key:
|
||||
self.results['keys'] = self.get_deleted_key()
|
||||
else:
|
||||
if self.version == 'all':
|
||||
self.results['keys'] = self.get_key_versions()
|
||||
else:
|
||||
self.results['keys'] = self.get_key()
|
||||
else:
|
||||
if self.show_deleted_key:
|
||||
self.results['keys'] = self.list_deleted_keys()
|
||||
else:
|
||||
self.results['keys'] = self.list_keys()
|
||||
|
||||
return self.results
|
||||
|
||||
def get_keyvault_client(self):
|
||||
try:
|
||||
self.log("Get KeyVaultClient from MSI")
|
||||
credentials = MSIAuthentication(resource='https://vault.azure.net')
|
||||
return KeyVaultClient(credentials)
|
||||
except Exception:
|
||||
self.log("Get KeyVaultClient from service principal")
|
||||
|
||||
# Create KeyVault Client using KeyVault auth class and auth_callback
|
||||
def auth_callback(server, resource, scope):
|
||||
if self.credentials['client_id'] is None or self.credentials['secret'] is None:
|
||||
self.fail('Please specify client_id, secret and tenant to access azure Key Vault.')
|
||||
|
||||
tenant = self.credentials.get('tenant')
|
||||
if not self.credentials['tenant']:
|
||||
tenant = "common"
|
||||
|
||||
authcredential = ServicePrincipalCredentials(
|
||||
client_id=self.credentials['client_id'],
|
||||
secret=self.credentials['secret'],
|
||||
tenant=tenant,
|
||||
cloud_environment=self._cloud_environment,
|
||||
resource="https://vault.azure.net")
|
||||
|
||||
token = authcredential.token
|
||||
return token['token_type'], token['access_token']
|
||||
|
||||
return KeyVaultClient(KeyVaultAuthentication(auth_callback))
|
||||
|
||||
def get_key(self):
|
||||
'''
|
||||
Gets the properties of the specified key in key vault.
|
||||
|
||||
:return: deserialized key state dictionary
|
||||
'''
|
||||
self.log("Get the key {0}".format(self.name))
|
||||
|
||||
results = []
|
||||
try:
|
||||
if self.version == 'current':
|
||||
response = self._client.get_key(vault_base_url=self.vault_uri,
|
||||
key_name=self.name,
|
||||
key_version='')
|
||||
else:
|
||||
response = self._client.get_key(vault_base_url=self.vault_uri,
|
||||
key_name=self.name,
|
||||
key_version=self.version)
|
||||
|
||||
if response and self.has_tags(response.tags, self.tags):
|
||||
self.log("Response : {0}".format(response))
|
||||
results.append(keybundle_to_dict(response))
|
||||
|
||||
except KeyVaultErrorException as e:
|
||||
self.log("Did not find the key vault key {0}: {1}".format(self.name, str(e)))
|
||||
return results
|
||||
|
||||
def get_key_versions(self):
|
||||
'''
|
||||
Lists keys versions.
|
||||
|
||||
:return: deserialized versions of key, includes key identifier, attributes and tags
|
||||
'''
|
||||
self.log("Get the key versions {0}".format(self.name))
|
||||
|
||||
results = []
|
||||
try:
|
||||
response = self._client.get_key_versions(vault_base_url=self.vault_uri,
|
||||
key_name=self.name)
|
||||
self.log("Response : {0}".format(response))
|
||||
|
||||
if response:
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(keyitem_to_dict(item))
|
||||
except KeyVaultErrorException as e:
|
||||
self.log("Did not find key versions {0} : {1}.".format(self.name, str(e)))
|
||||
return results
|
||||
|
||||
def list_keys(self):
|
||||
'''
|
||||
Lists keys in specific key vault.
|
||||
|
||||
:return: deserialized keys, includes key identifier, attributes and tags.
|
||||
'''
|
||||
self.log("Get the key vaults in current subscription")
|
||||
|
||||
results = []
|
||||
try:
|
||||
response = self._client.get_keys(vault_base_url=self.vault_uri)
|
||||
self.log("Response : {0}".format(response))
|
||||
|
||||
if response:
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(keyitem_to_dict(item))
|
||||
except KeyVaultErrorException as e:
|
||||
self.log("Did not find key vault in current subscription {0}.".format(str(e)))
|
||||
return results
|
||||
|
||||
def get_deleted_key(self):
|
||||
'''
|
||||
Gets the properties of the specified deleted key in key vault.
|
||||
|
||||
:return: deserialized key state dictionary
|
||||
'''
|
||||
self.log("Get the key {0}".format(self.name))
|
||||
|
||||
results = []
|
||||
try:
|
||||
response = self._client.get_deleted_key(vault_base_url=self.vault_uri,
|
||||
key_name=self.name)
|
||||
|
||||
if response and self.has_tags(response.tags, self.tags):
|
||||
self.log("Response : {0}".format(response))
|
||||
results.append(deletedkeybundle_to_dict(response))
|
||||
|
||||
except KeyVaultErrorException as e:
|
||||
self.log("Did not find the key vault key {0}: {1}".format(self.name, str(e)))
|
||||
return results
|
||||
|
||||
def list_deleted_keys(self):
|
||||
'''
|
||||
Lists deleted keys in specific key vault.
|
||||
|
||||
:return: deserialized keys, includes key identifier, attributes and tags.
|
||||
'''
|
||||
self.log("Get the key vaults in current subscription")
|
||||
|
||||
results = []
|
||||
try:
|
||||
response = self._client.get_deleted_keys(vault_base_url=self.vault_uri)
|
||||
self.log("Response : {0}".format(response))
|
||||
|
||||
if response:
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(deletedkeyitem_to_dict(item))
|
||||
except KeyVaultErrorException as e:
|
||||
self.log("Did not find key vault in current subscription {0}.".format(str(e)))
|
||||
return results
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution"""
|
||||
AzureRMKeyVaultKeyInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,231 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright: Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_keyvaultsecret
|
||||
version_added: 2.5
|
||||
short_description: Use Azure KeyVault Secrets
|
||||
description:
|
||||
- Create or delete a secret within a given keyvault.
|
||||
- By using Key Vault, you can encrypt keys and secrets.
|
||||
- Such as authentication keys, storage account keys, data encryption keys, .PFX files, and passwords.
|
||||
options:
|
||||
keyvault_uri:
|
||||
description:
|
||||
- URI of the keyvault endpoint.
|
||||
required: true
|
||||
secret_name:
|
||||
description:
|
||||
- Name of the keyvault secret.
|
||||
required: true
|
||||
secret_value:
|
||||
description:
|
||||
- Secret to be secured by keyvault.
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the subnet. Use C(present) to create or update a secret and C(absent) to delete a secret .
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Ian Philpot (@iphilpot)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a secret
|
||||
azure_rm_keyvaultsecret:
|
||||
secret_name: MySecret
|
||||
secret_value: My_Pass_Sec
|
||||
keyvault_uri: https://contoso.vault.azure.net/
|
||||
tags:
|
||||
testing: testing
|
||||
delete: never
|
||||
|
||||
- name: Delete a secret
|
||||
azure_rm_keyvaultsecret:
|
||||
secret_name: MySecret
|
||||
keyvault_uri: https://contoso.vault.azure.net/
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
state:
|
||||
description:
|
||||
- Current state of the secret.
|
||||
returned: success
|
||||
type: complex
|
||||
contains:
|
||||
secret_id:
|
||||
description:
|
||||
- Secret resource path.
|
||||
type: str
|
||||
example: https://contoso.vault.azure.net/secrets/hello/e924f053839f4431b35bc54393f98423
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from azure.keyvault import KeyVaultClient, KeyVaultAuthentication, KeyVaultId
|
||||
from azure.common.credentials import ServicePrincipalCredentials
|
||||
from azure.keyvault.models.key_vault_error import KeyVaultErrorException
|
||||
from msrestazure.azure_active_directory import MSIAuthentication
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMKeyVaultSecret(AzureRMModuleBase):
|
||||
''' Module that creates or deletes secrets in Azure KeyVault '''
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.module_arg_spec = dict(
|
||||
secret_name=dict(type='str', required=True),
|
||||
secret_value=dict(type='str', no_log=True),
|
||||
keyvault_uri=dict(type='str', required=True),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent'])
|
||||
)
|
||||
|
||||
required_if = [
|
||||
('state', 'present', ['secret_value'])
|
||||
]
|
||||
|
||||
self.results = dict(
|
||||
changed=False,
|
||||
state=dict()
|
||||
)
|
||||
|
||||
self.secret_name = None
|
||||
self.secret_value = None
|
||||
self.keyvault_uri = None
|
||||
self.state = None
|
||||
self.data_creds = None
|
||||
self.client = None
|
||||
self.tags = None
|
||||
|
||||
super(AzureRMKeyVaultSecret, self).__init__(self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
required_if=required_if,
|
||||
supports_tags=True)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
# Create KeyVault Client
|
||||
self.client = self.get_keyvault_client()
|
||||
|
||||
results = dict()
|
||||
changed = False
|
||||
|
||||
try:
|
||||
results = self.get_secret(self.secret_name)
|
||||
|
||||
# Secret exists and will be deleted
|
||||
if self.state == 'absent':
|
||||
changed = True
|
||||
elif self.secret_value and results['secret_value'] != self.secret_value:
|
||||
changed = True
|
||||
|
||||
except KeyVaultErrorException:
|
||||
# Secret doesn't exist
|
||||
if self.state == 'present':
|
||||
changed = True
|
||||
|
||||
self.results['changed'] = changed
|
||||
self.results['state'] = results
|
||||
|
||||
if not self.check_mode:
|
||||
# Create secret
|
||||
if self.state == 'present' and changed:
|
||||
results['secret_id'] = self.create_update_secret(self.secret_name, self.secret_value, self.tags)
|
||||
self.results['state'] = results
|
||||
self.results['state']['status'] = 'Created'
|
||||
# Delete secret
|
||||
elif self.state == 'absent' and changed:
|
||||
results['secret_id'] = self.delete_secret(self.secret_name)
|
||||
self.results['state'] = results
|
||||
self.results['state']['status'] = 'Deleted'
|
||||
else:
|
||||
if self.state == 'present' and changed:
|
||||
self.results['state']['status'] = 'Created'
|
||||
elif self.state == 'absent' and changed:
|
||||
self.results['state']['status'] = 'Deleted'
|
||||
|
||||
return self.results
|
||||
|
||||
def get_keyvault_client(self):
|
||||
try:
|
||||
self.log("Get KeyVaultClient from MSI")
|
||||
credentials = MSIAuthentication(resource='https://vault.azure.net')
|
||||
return KeyVaultClient(credentials)
|
||||
except Exception:
|
||||
self.log("Get KeyVaultClient from service principal")
|
||||
|
||||
# Create KeyVault Client using KeyVault auth class and auth_callback
|
||||
def auth_callback(server, resource, scope):
|
||||
if self.credentials['client_id'] is None or self.credentials['secret'] is None:
|
||||
self.fail('Please specify client_id, secret and tenant to access azure Key Vault.')
|
||||
|
||||
tenant = self.credentials.get('tenant')
|
||||
if not self.credentials['tenant']:
|
||||
tenant = "common"
|
||||
|
||||
authcredential = ServicePrincipalCredentials(
|
||||
client_id=self.credentials['client_id'],
|
||||
secret=self.credentials['secret'],
|
||||
tenant=tenant,
|
||||
cloud_environment=self._cloud_environment,
|
||||
resource="https://vault.azure.net")
|
||||
|
||||
token = authcredential.token
|
||||
return token['token_type'], token['access_token']
|
||||
|
||||
return KeyVaultClient(KeyVaultAuthentication(auth_callback))
|
||||
|
||||
def get_secret(self, name, version=''):
|
||||
''' Gets an existing secret '''
|
||||
secret_bundle = self.client.get_secret(self.keyvault_uri, name, version)
|
||||
if secret_bundle:
|
||||
secret_id = KeyVaultId.parse_secret_id(secret_bundle.id)
|
||||
return dict(secret_id=secret_id.id, secret_value=secret_bundle.value)
|
||||
return None
|
||||
|
||||
def create_update_secret(self, name, secret, tags):
|
||||
''' Creates/Updates a secret '''
|
||||
secret_bundle = self.client.set_secret(self.keyvault_uri, name, secret, tags)
|
||||
secret_id = KeyVaultId.parse_secret_id(secret_bundle.id)
|
||||
return secret_id.id
|
||||
|
||||
def delete_secret(self, name):
|
||||
''' Deletes a secret '''
|
||||
deleted_secret = self.client.delete_secret(self.keyvault_uri, name)
|
||||
secret_id = KeyVaultId.parse_secret_id(deleted_secret.id)
|
||||
return secret_id.id
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMKeyVaultSecret()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,177 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2016, Thomas Stringer <tomstr@microsoft.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_loadbalancer_info
|
||||
|
||||
version_added: "2.9"
|
||||
|
||||
short_description: Get load balancer facts
|
||||
|
||||
description:
|
||||
- Get facts for a specific load balancer or all load balancers.
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Limit results to a specific resource group.
|
||||
resource_group:
|
||||
description:
|
||||
- The resource group to search for the desired load balancer.
|
||||
tags:
|
||||
description:
|
||||
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Thomas Stringer (@trstringer)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get facts for one load balancer
|
||||
azure_rm_loadbalancer_info:
|
||||
name: Testing
|
||||
resource_group: myResourceGroup
|
||||
|
||||
- name: Get facts for all load balancers
|
||||
azure_rm_loadbalancer_info:
|
||||
|
||||
- name: Get facts for all load balancers in a specific resource group
|
||||
azure_rm_loadbalancer_info:
|
||||
resource_group: myResourceGroup
|
||||
|
||||
- name: Get facts by tags
|
||||
azure_rm_loadbalancer_info:
|
||||
tags:
|
||||
- testing
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
azure_loadbalancers:
|
||||
description:
|
||||
- List of load balancer dicts.
|
||||
returned: always
|
||||
type: list
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.common import AzureHttpError
|
||||
except Exception:
|
||||
# handled in azure_rm_common
|
||||
pass
|
||||
|
||||
AZURE_OBJECT_CLASS = 'LoadBalancer'
|
||||
|
||||
|
||||
class AzureRMLoadBalancerInfo(AzureRMModuleBase):
|
||||
"""Utility class to get load balancer facts"""
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.module_args = dict(
|
||||
name=dict(type='str'),
|
||||
resource_group=dict(type='str'),
|
||||
tags=dict(type='list')
|
||||
)
|
||||
|
||||
self.results = dict(
|
||||
changed=False,
|
||||
ansible_info=dict(
|
||||
azure_loadbalancers=[]
|
||||
)
|
||||
)
|
||||
|
||||
self.name = None
|
||||
self.resource_group = None
|
||||
self.tags = None
|
||||
|
||||
super(AzureRMLoadBalancerInfo, self).__init__(
|
||||
derived_arg_spec=self.module_args,
|
||||
supports_tags=False,
|
||||
facts_module=True
|
||||
)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
is_old_facts = self.module._name == 'azure_rm_loadbalancer_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_loadbalancer_facts' module has been renamed to 'azure_rm_loadbalancer_info'", version='2.13')
|
||||
|
||||
for key in self.module_args:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
self.results['ansible_info']['azure_loadbalancers'] = (
|
||||
self.get_item() if self.name
|
||||
else self.list_items()
|
||||
)
|
||||
|
||||
return self.results
|
||||
|
||||
def get_item(self):
|
||||
"""Get a single load balancer"""
|
||||
|
||||
self.log('Get properties for {0}'.format(self.name))
|
||||
|
||||
item = None
|
||||
result = []
|
||||
|
||||
try:
|
||||
item = self.network_client.load_balancers.get(self.resource_group, self.name)
|
||||
except CloudError:
|
||||
pass
|
||||
|
||||
if item and self.has_tags(item.tags, self.tags):
|
||||
result = [self.serialize_obj(item, AZURE_OBJECT_CLASS)]
|
||||
|
||||
return result
|
||||
|
||||
def list_items(self):
|
||||
"""Get all load balancers"""
|
||||
|
||||
self.log('List all load balancers')
|
||||
|
||||
if self.resource_group:
|
||||
try:
|
||||
response = self.network_client.load_balancers.list(self.resource_group)
|
||||
except AzureHttpError as exc:
|
||||
self.fail('Failed to list items in resource group {0} - {1}'.format(self.resource_group, str(exc)))
|
||||
else:
|
||||
try:
|
||||
response = self.network_client.load_balancers.list_all()
|
||||
except AzureHttpError as exc:
|
||||
self.fail('Failed to list all items - {0}'.format(str(exc)))
|
||||
|
||||
results = []
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def main():
|
||||
"""Main module execution code path"""
|
||||
|
||||
AzureRMLoadBalancerInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,216 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Yuwei Zhou, <yuwzho@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_lock
|
||||
version_added: "2.9"
|
||||
short_description: Manage Azure locks
|
||||
description:
|
||||
- Create, delete an Azure lock.
|
||||
- To create or delete management locks, you must have access to Microsoft.Authorization/* or Microsoft.Authorization/locks/* actions.
|
||||
- Of the built-in roles, only Owner and User Access Administrator are granted those actions.
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the lock.
|
||||
type: str
|
||||
required: true
|
||||
managed_resource_id:
|
||||
description:
|
||||
- Manage a lock for the specified resource ID.
|
||||
- Mutually exclusive with I(resource_group).
|
||||
- If neither I(managed_resource_id) or I(resource_group) are specified, manage a lock for the current subscription.
|
||||
- "'/subscriptions/{subscriptionId}' for subscriptions."
|
||||
- "'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}' for resource groups."
|
||||
- "'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{namespace}/{resourceType}/{resourceName}' for resources."
|
||||
type: str
|
||||
resource_group:
|
||||
description:
|
||||
- Manage a lock for the named resource group.
|
||||
- Mutually exclusive with I(managed_resource_id).
|
||||
- If neither I(managed_resource_id) or I(resource_group) are specified, manage a lock for the current subscription.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- State of the lock.
|
||||
- Use C(present) to create or update a lock and C(absent) to delete a lock.
|
||||
type: str
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
level:
|
||||
description:
|
||||
- The lock level type.
|
||||
type: str
|
||||
choices:
|
||||
- can_not_delete
|
||||
- read_only
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Yuwei Zhou (@yuwzho)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a lock for a resource
|
||||
azure_rm_lock:
|
||||
managed_resource_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/myVM
|
||||
name: myLock
|
||||
level: read_only
|
||||
|
||||
- name: Create a lock for a resource group
|
||||
azure_rm_lock:
|
||||
managed_resource_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup
|
||||
name: myLock
|
||||
level: read_only
|
||||
|
||||
- name: Create a lock for a resource group
|
||||
azure_rm_lock:
|
||||
resource_group: myResourceGroup
|
||||
name: myLock
|
||||
level: read_only
|
||||
|
||||
- name: Create a lock for a subscription
|
||||
azure_rm_lock:
|
||||
name: myLock
|
||||
level: read_only
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- Resource ID of the lock.
|
||||
returned: success
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Authorization/locks/keep"
|
||||
''' # NOQA
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMLock(AzureRMModuleBase):
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.module_arg_spec = dict(
|
||||
name=dict(type='str', required=True),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent']),
|
||||
resource_group=dict(type='str'),
|
||||
managed_resource_id=dict(type='str'),
|
||||
level=dict(type='str', choices=['can_not_delete', 'read_only'])
|
||||
)
|
||||
|
||||
self.results = dict(
|
||||
changed=False,
|
||||
id=None
|
||||
)
|
||||
|
||||
required_if = [
|
||||
('state', 'present', ['level'])
|
||||
]
|
||||
|
||||
mutually_exclusive = [['resource_group', 'managed_resource_id']]
|
||||
|
||||
self.name = None
|
||||
self.state = None
|
||||
self.level = None
|
||||
self.resource_group = None
|
||||
self.managed_resource_id = None
|
||||
|
||||
super(AzureRMLock, self).__init__(self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
required_if=required_if,
|
||||
mutually_exclusive=mutually_exclusive,
|
||||
supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
for key in self.module_arg_spec.keys():
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
changed = False
|
||||
# construct scope id
|
||||
scope = self.get_scope()
|
||||
lock = self.get_lock(scope)
|
||||
if self.state == 'present':
|
||||
lock_level = getattr(self.lock_models.LockLevel, self.level)
|
||||
if not lock:
|
||||
changed = True
|
||||
lock = self.lock_models.ManagementLockObject(level=lock_level)
|
||||
elif lock.level != lock_level:
|
||||
self.log('Lock level changed')
|
||||
lock.level = lock_level
|
||||
changed = True
|
||||
if not self.check_mode:
|
||||
lock = self.create_or_update_lock(scope, lock)
|
||||
self.results['id'] = lock.id
|
||||
elif lock:
|
||||
changed = True
|
||||
if not self.check_mode:
|
||||
self.delete_lock(scope)
|
||||
self.results['changed'] = changed
|
||||
return self.results
|
||||
|
||||
def delete_lock(self, scope):
|
||||
try:
|
||||
return self.lock_client.management_locks.delete_by_scope(scope, self.name)
|
||||
except CloudError as exc:
|
||||
self.fail('Error when deleting lock {0} for {1}: {2}'.format(self.name, scope, exc.message))
|
||||
|
||||
def create_or_update_lock(self, scope, lock):
|
||||
try:
|
||||
return self.lock_client.management_locks.create_or_update_by_scope(scope, self.name, lock)
|
||||
except CloudError as exc:
|
||||
self.fail('Error when creating or updating lock {0} for {1}: {2}'.format(self.name, scope, exc.message))
|
||||
|
||||
def get_lock(self, scope):
|
||||
try:
|
||||
return self.lock_client.management_locks.get_by_scope(scope, self.name)
|
||||
except CloudError as exc:
|
||||
if exc.status_code in [404]:
|
||||
return None
|
||||
self.fail('Error when getting lock {0} for {1}: {2}'.format(self.name, scope, exc.message))
|
||||
|
||||
def get_scope(self):
|
||||
'''
|
||||
Get the resource scope of the lock management.
|
||||
'/subscriptions/{subscriptionId}' for subscriptions,
|
||||
'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}' for resource groups,
|
||||
'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{namespace}/{resourceType}/{resourceName}' for resources.
|
||||
'''
|
||||
if self.managed_resource_id:
|
||||
return self.managed_resource_id
|
||||
elif self.resource_group:
|
||||
return '/subscriptions/{0}/resourcegroups/{1}'.format(self.subscription_id, self.resource_group)
|
||||
else:
|
||||
return '/subscriptions/{0}'.format(self.subscription_id)
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMLock()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,223 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Yuwei Zhou, <yuwzho@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_lock_info
|
||||
version_added: "2.9"
|
||||
short_description: Manage Azure locks
|
||||
description:
|
||||
- Create, delete an Azure lock.
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the lock.
|
||||
type: str
|
||||
required: true
|
||||
managed_resource_id:
|
||||
description:
|
||||
- ID of the resource where need to manage the lock.
|
||||
- Get this via facts module.
|
||||
- Cannot be set mutual with I(resource_group).
|
||||
- Manage subscription if both I(managed_resource_id) and I(resource_group) not defined.
|
||||
- "'/subscriptions/{subscriptionId}' for subscriptions."
|
||||
- "'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}' for resource groups."
|
||||
- "'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{namespace}/{resourceType}/{resourceName}' for resources."
|
||||
- Can get all locks with 'child scope' for this resource, use I(managed_resource_id) in response for further management.
|
||||
type: str
|
||||
resource_group:
|
||||
description:
|
||||
- Resource group name where need to manage the lock.
|
||||
- The lock is in the resource group level.
|
||||
- Cannot be set mutual with I(managed_resource_id).
|
||||
- Query subscription if both I(managed_resource_id) and I(resource_group) not defined.
|
||||
- Can get all locks with 'child scope' in this resource group, use the I(managed_resource_id) in response for further management.
|
||||
type: str
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Yuwei Zhou (@yuwzho)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get myLock details of myVM
|
||||
azure_rm_lock_info:
|
||||
name: myLock
|
||||
managed_resource_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/myVM
|
||||
|
||||
- name: List locks of myVM
|
||||
azure_rm_lock_info:
|
||||
managed_resource_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/myVM
|
||||
|
||||
- name: List locks of myResourceGroup
|
||||
azure_rm_lock_info:
|
||||
resource_group: myResourceGroup
|
||||
|
||||
- name: List locks of myResourceGroup
|
||||
azure_rm_lock_info:
|
||||
managed_resource_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup
|
||||
|
||||
- name: List locks of mySubscription
|
||||
azure_rm_lock_info:
|
||||
|
||||
- name: List locks of mySubscription
|
||||
azure_rm_lock_info:
|
||||
managed_resource_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
locks:
|
||||
description:
|
||||
- List of locks dicts.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- ID of the Lock.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Authorization/locks/myLock"
|
||||
name:
|
||||
description:
|
||||
- Name of the lock.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myLock
|
||||
level:
|
||||
description:
|
||||
- Type level of the lock.
|
||||
returned: always
|
||||
type: str
|
||||
sample: can_not_delete
|
||||
notes:
|
||||
description:
|
||||
- Notes of the lock added by creator.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "This is a lock"
|
||||
''' # NOQA
|
||||
|
||||
import json
|
||||
import re
|
||||
from ansible.module_utils.common.dict_transformations import _camel_to_snake
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
from ansible.module_utils.azure_rm_common_rest import GenericRestClient
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMLockInfo(AzureRMModuleBase):
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.module_arg_spec = dict(
|
||||
name=dict(type='str'),
|
||||
resource_group=dict(type='str'),
|
||||
managed_resource_id=dict(type='str')
|
||||
)
|
||||
|
||||
self.results = dict(
|
||||
changed=False,
|
||||
locks=[]
|
||||
)
|
||||
|
||||
mutually_exclusive = [['resource_group', 'managed_resource_id']]
|
||||
|
||||
self.name = None
|
||||
self.resource_group = None
|
||||
self.managed_resource_id = None
|
||||
self._mgmt_client = None
|
||||
self._query_parameters = {'api-version': '2016-09-01'}
|
||||
self._header_parameters = {'Content-Type': 'application/json; charset=utf-8'}
|
||||
|
||||
super(AzureRMLockInfo, self).__init__(self.module_arg_spec, facts_module=True, mutually_exclusive=mutually_exclusive, supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
is_old_facts = self.module._name == 'azure_rm_lock_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_lock_facts' module has been renamed to 'azure_rm_lock_info'", version='2.13')
|
||||
|
||||
for key in self.module_arg_spec.keys():
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
self._mgmt_client = self.get_mgmt_svc_client(GenericRestClient, base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
changed = False
|
||||
# construct scope id
|
||||
scope = self.get_scope()
|
||||
url = '/{0}/providers/Microsoft.Authorization/locks'.format(scope)
|
||||
if self.name:
|
||||
url = '{0}/{1}'.format(url, self.name)
|
||||
locks = self.list_locks(url)
|
||||
resp = locks.get('value') if 'value' in locks else [locks]
|
||||
self.results['locks'] = [self.to_dict(x) for x in resp]
|
||||
return self.results
|
||||
|
||||
def to_dict(self, lock):
|
||||
resp = dict(
|
||||
id=lock['id'],
|
||||
name=lock['name'],
|
||||
level=_camel_to_snake(lock['properties']['level']),
|
||||
managed_resource_id=re.sub('/providers/Microsoft.Authorization/locks/.+', '', lock['id'])
|
||||
)
|
||||
if lock['properties'].get('notes'):
|
||||
resp['notes'] = lock['properties']['notes']
|
||||
if lock['properties'].get('owners'):
|
||||
resp['owners'] = [x['application_id'] for x in lock['properties']['owners']]
|
||||
return resp
|
||||
|
||||
def list_locks(self, url):
|
||||
try:
|
||||
resp = self._mgmt_client.query(url=url,
|
||||
method='GET',
|
||||
query_parameters=self._query_parameters,
|
||||
header_parameters=self._header_parameters,
|
||||
body=None,
|
||||
expected_status_codes=[200],
|
||||
polling_timeout=None,
|
||||
polling_interval=None)
|
||||
return json.loads(resp.text)
|
||||
except CloudError as exc:
|
||||
self.fail('Error when finding locks {0}: {1}'.format(url, exc.message))
|
||||
|
||||
def get_scope(self):
|
||||
'''
|
||||
Get the resource scope of the lock management.
|
||||
'/subscriptions/{subscriptionId}' for subscriptions,
|
||||
'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}' for resource groups,
|
||||
'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{namespace}/{resourceType}/{resourceName}' for resources.
|
||||
'''
|
||||
if self.managed_resource_id:
|
||||
return self.managed_resource_id
|
||||
elif self.resource_group:
|
||||
return '/subscriptions/{0}/resourcegroups/{1}'.format(self.subscription_id, self.resource_group)
|
||||
else:
|
||||
return '/subscriptions/{0}'.format(self.subscription_id)
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMLockInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,321 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Yuwei Zhou, <yuwzho@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_loganalyticsworkspace
|
||||
version_added: "2.8"
|
||||
short_description: Manage Azure Log Analytics workspaces
|
||||
description:
|
||||
- Create, delete Azure Log Analytics workspaces.
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- Name of resource group.
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
- Name of the workspace.
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the image. Use C(present) to create or update a image and C(absent) to delete an image.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
location:
|
||||
description:
|
||||
- Resource location.
|
||||
sku:
|
||||
description:
|
||||
- The SKU of the workspace.
|
||||
choices:
|
||||
- free
|
||||
- standard
|
||||
- premium
|
||||
- unlimited
|
||||
- per_node
|
||||
- per_gb2018
|
||||
- standalone
|
||||
default: per_gb2018
|
||||
retention_in_days:
|
||||
description:
|
||||
- The workspace data retention in days.
|
||||
- -1 means Unlimited retention for I(sku=unlimited).
|
||||
- 730 days is the maximum allowed for all other SKUs.
|
||||
intelligence_packs:
|
||||
description:
|
||||
- Manage intelligence packs possible for this workspace.
|
||||
- Enable one pack by setting it to C(true). For example "Backup:true".
|
||||
- Disable one pack by setting it to C(false). For example "Backup:false".
|
||||
- Other intelligence packs not list in this property will not be changed.
|
||||
type: dict
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Yuwei Zhou (@yuwzho)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a workspace with backup enabled
|
||||
azure_rm_loganalyticsworkspace:
|
||||
resource_group: myResourceGroup
|
||||
name: myLogAnalyticsWorkspace
|
||||
intelligence_packs:
|
||||
Backup: true
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- Workspace resource path.
|
||||
type: str
|
||||
returned: success
|
||||
example: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.OperationalInsights/workspaces/m
|
||||
yLogAnalyticsWorkspace"
|
||||
location:
|
||||
description:
|
||||
- Resource location.
|
||||
type: str
|
||||
returned: success
|
||||
example: eastus
|
||||
sku:
|
||||
description:
|
||||
- The SKU of the workspace.
|
||||
type: str
|
||||
returned: success
|
||||
example: "per_gb2018"
|
||||
retention_in_days:
|
||||
description:
|
||||
- The workspace data retention in days.
|
||||
- -1 means Unlimited retention for I(sku=unlimited).
|
||||
- 730 days is the maximum allowed for all other SKUs.
|
||||
type: int
|
||||
returned: success
|
||||
example: 40
|
||||
intelligence_packs:
|
||||
description:
|
||||
- Lists all the intelligence packs possible and whether they are enabled or disabled for a given workspace.
|
||||
type: list
|
||||
returned: success
|
||||
example: ['name': 'CapacityPerformance', 'enabled': true]
|
||||
management_groups:
|
||||
description:
|
||||
- Management groups connected to the workspace.
|
||||
type: dict
|
||||
returned: success
|
||||
example: {'value': []}
|
||||
shared_keys:
|
||||
description:
|
||||
- Shared keys for the workspace.
|
||||
type: dict
|
||||
returned: success
|
||||
example: {
|
||||
'primarySharedKey': 'BozLY1JnZbxu0jWUQSY8iRPEM8ObmpP8rW+8bUl3+HpDJI+n689SxXgTgU7k1qdxo/WugRLxechxbolAfHM5uA==',
|
||||
'secondarySharedKey': '7tDt5W0JBrCQKtQA3igfFltLSzJeyr9LmuT+B/ibzd8cdC1neZ1ePOQLBx5NUzc0q2VUIK0cLhWNyFvo/hT8Ww=='
|
||||
}
|
||||
usages:
|
||||
description:
|
||||
- Usage metrics for the workspace.
|
||||
type: dict
|
||||
returned: success
|
||||
example: {
|
||||
'value': [
|
||||
{
|
||||
'name': {
|
||||
'value': 'DataAnalyzed',
|
||||
'localizedValue': 'Data Analyzed'
|
||||
},
|
||||
'unit': 'Bytes',
|
||||
'currentValue': 0,
|
||||
'limit': 524288000,
|
||||
'nextResetTime': '2017-10-03T00:00:00Z',
|
||||
'quotaPeriod': 'P1D'
|
||||
}
|
||||
]
|
||||
}
|
||||
''' # NOQA
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id
|
||||
from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake
|
||||
|
||||
try:
|
||||
from msrestazure.tools import parse_resource_id
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMLogAnalyticsWorkspace(AzureRMModuleBase):
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(type='str', required=True),
|
||||
name=dict(type='str', required=True),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent']),
|
||||
location=dict(type='str'),
|
||||
sku=dict(type='str', default='per_gb2018', choices=['free', 'standard', 'premium', 'unlimited', 'per_node', 'per_gb2018', 'standalone']),
|
||||
retention_in_days=dict(type='int'),
|
||||
intelligence_packs=dict(type='dict')
|
||||
)
|
||||
|
||||
self.results = dict(
|
||||
changed=False,
|
||||
id=None
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.state = None
|
||||
self.location = None
|
||||
self.sku = None
|
||||
self.retention_in_days = None
|
||||
self.intelligence_packs = None
|
||||
|
||||
super(AzureRMLogAnalyticsWorkspace, self).__init__(self.module_arg_spec, supports_check_mode=True)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
self.results = dict()
|
||||
changed = False
|
||||
|
||||
if not self.location:
|
||||
resource_group = self.get_resource_group(self.resource_group)
|
||||
self.location = resource_group.location
|
||||
|
||||
if self.sku == 'per_gb2018':
|
||||
self.sku = 'PerGB2018'
|
||||
else:
|
||||
self.sku = _snake_to_camel(self.sku)
|
||||
workspace = self.get_workspace()
|
||||
if not workspace and self.state == 'present':
|
||||
changed = True
|
||||
workspace = self.log_analytics_models.Workspace(sku=self.log_analytics_models.Sku(name=self.sku),
|
||||
retention_in_days=self.retention_in_days,
|
||||
location=self.location)
|
||||
if not self.check_mode:
|
||||
workspace = self.create_workspace(workspace)
|
||||
elif workspace and self.state == 'absent':
|
||||
changed = True
|
||||
workspace = None
|
||||
if not self.check_mode:
|
||||
self.delete_workspace()
|
||||
if workspace and workspace.id:
|
||||
self.results = self.to_dict(workspace)
|
||||
self.results['intelligence_packs'] = self.list_intelligence_packs()
|
||||
self.results['management_groups'] = self.list_management_groups()
|
||||
self.results['usages'] = self.list_usages()
|
||||
self.results['shared_keys'] = self.get_shared_keys()
|
||||
# handle the intelligence pack
|
||||
if workspace and workspace.id and self.intelligence_packs:
|
||||
intelligence_packs = self.results['intelligence_packs']
|
||||
for key in self.intelligence_packs.keys():
|
||||
enabled = self.intelligence_packs[key]
|
||||
for x in intelligence_packs:
|
||||
if x['name'].lower() == key.lower():
|
||||
if x['enabled'] != enabled:
|
||||
changed = True
|
||||
if not self.check_mode:
|
||||
self.change_intelligence(x['name'], enabled)
|
||||
x['enabled'] = enabled
|
||||
break
|
||||
self.results['changed'] = changed
|
||||
return self.results
|
||||
|
||||
def create_workspace(self, workspace):
|
||||
try:
|
||||
poller = self.log_analytics_client.workspaces.create_or_update(self.resource_group, self.name, workspace)
|
||||
return self.get_poller_result(poller)
|
||||
except CloudError as exc:
|
||||
self.fail('Error when creating workspace {0} - {1}'.format(self.name, exc.message or str(exc)))
|
||||
|
||||
def get_workspace(self):
|
||||
try:
|
||||
return self.log_analytics_client.workspaces.get(self.resource_group, self.name)
|
||||
except CloudError:
|
||||
pass
|
||||
|
||||
def delete_workspace(self):
|
||||
try:
|
||||
self.log_analytics_client.workspaces.delete(self.resource_group, self.name)
|
||||
except CloudError as exc:
|
||||
self.fail('Error when deleting workspace {0} - {1}'.format(self.name, exc.message or str(exc)))
|
||||
|
||||
def to_dict(self, workspace):
|
||||
result = workspace.as_dict()
|
||||
result['sku'] = _camel_to_snake(workspace.sku.name)
|
||||
return result
|
||||
|
||||
def list_intelligence_packs(self):
|
||||
try:
|
||||
response = self.log_analytics_client.workspaces.list_intelligence_packs(self.resource_group, self.name)
|
||||
return [x.as_dict() for x in response]
|
||||
except CloudError as exc:
|
||||
self.fail('Error when listing intelligence packs {0}'.format(exc.message or str(exc)))
|
||||
|
||||
def change_intelligence(self, key, value):
|
||||
try:
|
||||
if value:
|
||||
self.log_analytics_client.workspaces.enable_intelligence_pack(self.resource_group, self.name, key)
|
||||
else:
|
||||
self.log_analytics_client.workspaces.disable_intelligence_pack(self.resource_group, self.name, key)
|
||||
except CloudError as exc:
|
||||
self.fail('Error when changing intelligence pack {0} - {1}'.format(key, exc.message or str(exc)))
|
||||
|
||||
def list_management_groups(self):
|
||||
result = []
|
||||
try:
|
||||
response = self.log_analytics_client.workspaces.list_management_groups(self.resource_group, self.name)
|
||||
while True:
|
||||
result.append(response.next().as_dict())
|
||||
except StopIteration:
|
||||
pass
|
||||
except CloudError as exc:
|
||||
self.fail('Error when listing management groups {0}'.format(exc.message or str(exc)))
|
||||
return result
|
||||
|
||||
def list_usages(self):
|
||||
result = []
|
||||
try:
|
||||
response = self.log_analytics_client.workspaces.list_usages(self.resource_group, self.name)
|
||||
while True:
|
||||
result.append(response.next().as_dict())
|
||||
except StopIteration:
|
||||
pass
|
||||
except CloudError as exc:
|
||||
self.fail('Error when listing usages {0}'.format(exc.message or str(exc)))
|
||||
return result
|
||||
|
||||
def get_shared_keys(self):
|
||||
try:
|
||||
return self.log_analytics_client.workspaces.get_shared_keys(self.resource_group, self.name).as_dict()
|
||||
except CloudError as exc:
|
||||
self.fail('Error when getting shared key {0}'.format(exc.message or str(exc)))
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMLogAnalyticsWorkspace()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,269 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Yuwei Zhou, <yuwzho@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_loganalyticsworkspace_info
|
||||
version_added: "2.9"
|
||||
short_description: Get facts of Azure Log Analytics workspaces
|
||||
description:
|
||||
- Get, query Azure Log Analytics workspaces.
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- Name of resource group.
|
||||
required: True
|
||||
name:
|
||||
description:
|
||||
- Name of the workspace.
|
||||
tags:
|
||||
description:
|
||||
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
|
||||
show_intelligence_packs:
|
||||
description:
|
||||
- Show the intelligence packs for a workspace.
|
||||
- Note this will cost one more network overhead for each workspace, expected slow response.
|
||||
show_management_groups:
|
||||
description:
|
||||
- Show the management groups for a workspace.
|
||||
- Note this will cost one more network overhead for each workspace, expected slow response.
|
||||
show_shared_keys:
|
||||
description:
|
||||
- Show the shared keys for a workspace.
|
||||
- Note this will cost one more network overhead for each workspace, expected slow response.
|
||||
show_usages:
|
||||
description:
|
||||
- Show the list of usages for a workspace.
|
||||
- Note this will cost one more network overhead for each workspace, expected slow response.
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Yuwei Zhou (@yuwzho)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Query a workspace
|
||||
azure_rm_loganalyticsworkspace_info:
|
||||
resource_group: myResourceGroup
|
||||
name: myLogAnalyticsWorkspace
|
||||
show_intelligence_packs: true
|
||||
show_management_groups: true
|
||||
show_shared_keys: true
|
||||
show_usages: true
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- Workspace resource path.
|
||||
type: str
|
||||
returned: success
|
||||
example: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.OperationalInsights/workspaces/m
|
||||
yLogAnalyticsWorkspace"
|
||||
location:
|
||||
description:
|
||||
- Resource location.
|
||||
type: str
|
||||
returned: success
|
||||
example: "eastus"
|
||||
sku:
|
||||
description:
|
||||
- The SKU of the workspace.
|
||||
type: str
|
||||
returned: success
|
||||
example: "per_gb2018"
|
||||
retention_in_days:
|
||||
description:
|
||||
- The workspace data retention in days.
|
||||
- -1 means Unlimited retention for I(sku=unlimited).
|
||||
- 730 days is the maximum allowed for all other SKUs.
|
||||
type: int
|
||||
returned: success
|
||||
example: 40
|
||||
intelligence_packs:
|
||||
description:
|
||||
- Lists all the intelligence packs possible and whether they are enabled or disabled for a given workspace.
|
||||
type: list
|
||||
returned: success
|
||||
example: [ {'name': 'CapacityPerformance', 'enabled': true} ]
|
||||
management_groups:
|
||||
description:
|
||||
- Management groups connected to the workspace.
|
||||
type: dict
|
||||
returned: success
|
||||
example: {'value': []}
|
||||
shared_keys:
|
||||
description:
|
||||
- Shared keys for the workspace.
|
||||
type: dict
|
||||
returned: success
|
||||
example: {
|
||||
'primarySharedKey': 'BozLY1JnZbxu0jWUQSY8iRPEM8ObmpP8rW+8bUl3+HpDJI+n689SxXgTgU7k1qdxo/WugRLxechxbolAfHM5uA==',
|
||||
'secondarySharedKey': '7tDt5W0JBrCQKtQA3igfFltLSzJeyr9LmuT+B/ibzd8cdC1neZ1ePOQLBx5NUzc0q2VUIK0cLhWNyFvo/hT8Ww=='
|
||||
}
|
||||
usages:
|
||||
description:
|
||||
- Usage metrics for the workspace.
|
||||
type: dict
|
||||
returned: success
|
||||
example: {
|
||||
'value': [
|
||||
{
|
||||
'name': {
|
||||
'value': 'DataAnalyzed',
|
||||
'localizedValue': 'Data Analyzed'
|
||||
},
|
||||
'unit': 'Bytes',
|
||||
'currentValue': 0,
|
||||
'limit': 524288000,
|
||||
'nextResetTime': '2017-10-03T00:00:00Z',
|
||||
'quotaPeriod': 'P1D'
|
||||
}
|
||||
]
|
||||
}
|
||||
''' # NOQA
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id
|
||||
from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake
|
||||
|
||||
try:
|
||||
from msrestazure.tools import parse_resource_id
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMLogAnalyticsWorkspaceInfo(AzureRMModuleBase):
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(type='str', required=True),
|
||||
name=dict(type='str'),
|
||||
tags=dict(type='list'),
|
||||
show_shared_keys=dict(type='bool'),
|
||||
show_intelligence_packs=dict(type='bool'),
|
||||
show_usages=dict(type='bool'),
|
||||
show_management_groups=dict(type='bool')
|
||||
)
|
||||
|
||||
self.results = dict(
|
||||
changed=False,
|
||||
workspaces=[]
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.tags = None
|
||||
self.show_intelligence_packs = None
|
||||
self.show_shared_keys = None
|
||||
self.show_usages = None
|
||||
self.show_management_groups = None
|
||||
|
||||
super(AzureRMLogAnalyticsWorkspaceInfo, self).__init__(self.module_arg_spec, supports_tags=False, facts_module=True)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
is_old_facts = self.module._name == 'azure_rm_loganalyticsworkspace_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_loganalyticsworkspace_facts' module has been renamed to 'azure_rm_loganalyticsworkspace_info'",
|
||||
version='2.13')
|
||||
|
||||
for key in list(self.module_arg_spec.keys()):
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
if self.name:
|
||||
item = self.get_workspace()
|
||||
response = [item] if item else []
|
||||
else:
|
||||
response = self.list_by_resource_group()
|
||||
|
||||
self.results['workspaces'] = [self.to_dict(x) for x in response if self.has_tags(x.tags, self.tags)]
|
||||
return self.results
|
||||
|
||||
def get_workspace(self):
|
||||
try:
|
||||
return self.log_analytics_client.workspaces.get(self.resource_group, self.name)
|
||||
except CloudError:
|
||||
pass
|
||||
return None
|
||||
|
||||
def list_by_resource_group(self):
|
||||
try:
|
||||
return self.log_analytics_client.workspaces.list_by_resource_group(self.resource_group)
|
||||
except CloudError:
|
||||
pass
|
||||
return []
|
||||
|
||||
def list_intelligence_packs(self):
|
||||
try:
|
||||
response = self.log_analytics_client.workspaces.list_intelligence_packs(self.resource_group, self.name)
|
||||
return [x.as_dict() for x in response]
|
||||
except CloudError as exc:
|
||||
self.fail('Error when listing intelligence packs {0}'.format(exc.message or str(exc)))
|
||||
|
||||
def list_management_groups(self):
|
||||
result = []
|
||||
try:
|
||||
response = self.log_analytics_client.workspaces.list_management_groups(self.resource_group, self.name)
|
||||
while True:
|
||||
result.append(response.next().as_dict())
|
||||
except StopIteration:
|
||||
pass
|
||||
except CloudError as exc:
|
||||
self.fail('Error when listing management groups {0}'.format(exc.message or str(exc)))
|
||||
return result
|
||||
|
||||
def list_usages(self):
|
||||
result = []
|
||||
try:
|
||||
response = self.log_analytics_client.workspaces.list_usages(self.resource_group, self.name)
|
||||
while True:
|
||||
result.append(response.next().as_dict())
|
||||
except StopIteration:
|
||||
pass
|
||||
except CloudError as exc:
|
||||
self.fail('Error when listing usages {0}'.format(exc.message or str(exc)))
|
||||
return result
|
||||
|
||||
def get_shared_keys(self):
|
||||
try:
|
||||
return self.log_analytics_client.workspaces.get_shared_keys(self.resource_group, self.name).as_dict()
|
||||
except CloudError as exc:
|
||||
self.fail('Error when getting shared key {0}'.format(exc.message or str(exc)))
|
||||
|
||||
def to_dict(self, workspace):
|
||||
result = workspace.as_dict()
|
||||
result['sku'] = _camel_to_snake(workspace.sku.name)
|
||||
if self.show_intelligence_packs:
|
||||
result['intelligence_packs'] = self.list_intelligence_packs()
|
||||
if self.show_management_groups:
|
||||
result['management_groups'] = self.list_management_groups()
|
||||
if self.show_shared_keys:
|
||||
result['shared_keys'] = self.get_shared_keys()
|
||||
if self.show_usages:
|
||||
result['usages'] = self.list_usages()
|
||||
return result
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMLogAnalyticsWorkspaceInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,493 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2017 Bruno Medina Bolanos Cacho <bruno.medina@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_manageddisk
|
||||
|
||||
version_added: "2.4"
|
||||
|
||||
short_description: Manage Azure Manage Disks
|
||||
|
||||
description:
|
||||
- Create, update and delete an Azure Managed Disk.
|
||||
|
||||
notes:
|
||||
- This module was called M(azure_rm_managed_disk) before Ansible 2.8. The usage did not change.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- Name of a resource group where the managed disk exists or will be created.
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
- Name of the managed disk.
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the managed disk. Use C(present) to create or update a managed disk and C(absent) to delete a managed disk.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
location:
|
||||
description:
|
||||
- Valid Azure location. Defaults to location of the resource group.
|
||||
storage_account_type:
|
||||
description:
|
||||
- Type of storage for the managed disk.
|
||||
- If not specified, the disk is created as C(Standard_LRS).
|
||||
- C(Standard_LRS) is for Standard HDD.
|
||||
- C(StandardSSD_LRS) (added in 2.8) is for Standard SSD.
|
||||
- C(Premium_LRS) is for Premium SSD.
|
||||
- C(UltraSSD_LRS) (added in 2.8) is for Ultra SSD, which is in preview mode, and only available on select instance types.
|
||||
- See U(https://docs.microsoft.com/en-us/azure/virtual-machines/windows/disks-types) for more information about disk types.
|
||||
choices:
|
||||
- Standard_LRS
|
||||
- StandardSSD_LRS
|
||||
- Premium_LRS
|
||||
- UltraSSD_LRS
|
||||
create_option:
|
||||
description:
|
||||
- C(import) from a VHD file in I(source_uri) and C(copy) from previous managed disk I(source_uri).
|
||||
choices:
|
||||
- empty
|
||||
- import
|
||||
- copy
|
||||
source_uri:
|
||||
description:
|
||||
- URI to a valid VHD file to be used or the resource ID of the managed disk to copy.
|
||||
aliases:
|
||||
- source_resource_uri
|
||||
os_type:
|
||||
description:
|
||||
- Type of Operating System.
|
||||
- Used when I(create_option=copy) or I(create_option=import) and the source is an OS disk.
|
||||
- If omitted during creation, no value is set.
|
||||
- If omitted during an update, no change is made.
|
||||
- Once set, this value cannot be cleared.
|
||||
choices:
|
||||
- linux
|
||||
- windows
|
||||
disk_size_gb:
|
||||
description:
|
||||
- Size in GB of the managed disk to be created.
|
||||
- If I(create_option=copy) then the value must be greater than or equal to the source's size.
|
||||
managed_by:
|
||||
description:
|
||||
- Name of an existing virtual machine with which the disk is or will be associated, this VM should be in the same resource group.
|
||||
- To detach a disk from a vm, explicitly set to ''.
|
||||
- If this option is unset, the value will not be changed.
|
||||
version_added: '2.5'
|
||||
attach_caching:
|
||||
description:
|
||||
- Disk caching policy controlled by VM. Will be used when attached to the VM defined by C(managed_by).
|
||||
- If this option is different from the current caching policy, the managed disk will be deattached and attached with current caching option again.
|
||||
choices:
|
||||
- ''
|
||||
- read_only
|
||||
- read_write
|
||||
version_added: '2.8'
|
||||
tags:
|
||||
description:
|
||||
- Tags to assign to the managed disk.
|
||||
- Format tags as 'key' or 'key:value'.
|
||||
zone:
|
||||
description:
|
||||
- The Azure managed disk's zone.
|
||||
- Allowed values are C(1), C(2), C(3) and C(' ').
|
||||
choices:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- ''
|
||||
version_added: "2.8"
|
||||
lun:
|
||||
description:
|
||||
- The logical unit number for data disk.
|
||||
- This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM.
|
||||
type: int
|
||||
version_added: '2.10'
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
author:
|
||||
- Bruno Medina (@brusMX)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create managed disk
|
||||
azure_rm_manageddisk:
|
||||
name: mymanageddisk
|
||||
location: eastus
|
||||
resource_group: myResourceGroup
|
||||
disk_size_gb: 4
|
||||
|
||||
- name: Create managed operating system disk from page blob
|
||||
azure_rm_manageddisk:
|
||||
name: mymanageddisk
|
||||
location: eastus2
|
||||
resource_group: myResourceGroup
|
||||
create_option: import
|
||||
source_uri: https://storageaccountname.blob.core.windows.net/containername/blob-name.vhd
|
||||
os_type: windows
|
||||
storage_account_type: Premium_LRS
|
||||
|
||||
- name: Mount the managed disk to VM
|
||||
azure_rm_manageddisk:
|
||||
name: mymanageddisk
|
||||
location: eastus
|
||||
resource_group: myResourceGroup
|
||||
disk_size_gb: 4
|
||||
managed_by: testvm001
|
||||
attach_caching: read_only
|
||||
|
||||
- name: Unmount the managed disk to VM
|
||||
azure_rm_manageddisk:
|
||||
name: mymanageddisk
|
||||
location: eastus
|
||||
resource_group: myResourceGroup
|
||||
disk_size_gb: 4
|
||||
|
||||
- name: Delete managed disk
|
||||
azure_rm_manageddisk:
|
||||
name: mymanageddisk
|
||||
location: eastus
|
||||
resource_group: myResourceGroup
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- The managed disk resource ID.
|
||||
returned: always
|
||||
type: dict
|
||||
state:
|
||||
description:
|
||||
- Current state of the managed disk.
|
||||
returned: always
|
||||
type: dict
|
||||
changed:
|
||||
description:
|
||||
- Whether or not the resource has changed.
|
||||
returned: always
|
||||
type: bool
|
||||
'''
|
||||
|
||||
import re
|
||||
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
try:
|
||||
from msrestazure.tools import parse_resource_id
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
# duplicated in azure_rm_manageddisk_facts
|
||||
def managed_disk_to_dict(managed_disk):
|
||||
create_data = managed_disk.creation_data
|
||||
return dict(
|
||||
id=managed_disk.id,
|
||||
name=managed_disk.name,
|
||||
location=managed_disk.location,
|
||||
tags=managed_disk.tags,
|
||||
create_option=create_data.create_option.lower(),
|
||||
source_uri=create_data.source_uri or create_data.source_resource_id,
|
||||
disk_size_gb=managed_disk.disk_size_gb,
|
||||
os_type=managed_disk.os_type.lower() if managed_disk.os_type else None,
|
||||
storage_account_type=managed_disk.sku.name if managed_disk.sku else None,
|
||||
managed_by=managed_disk.managed_by,
|
||||
zone=managed_disk.zones[0] if managed_disk.zones and len(managed_disk.zones) > 0 else ''
|
||||
)
|
||||
|
||||
|
||||
class AzureRMManagedDisk(AzureRMModuleBase):
|
||||
"""Configuration class for an Azure RM Managed Disk resource"""
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
),
|
||||
location=dict(
|
||||
type='str'
|
||||
),
|
||||
storage_account_type=dict(
|
||||
type='str',
|
||||
choices=['Standard_LRS', 'StandardSSD_LRS', 'Premium_LRS', 'UltraSSD_LRS']
|
||||
),
|
||||
create_option=dict(
|
||||
type='str',
|
||||
choices=['empty', 'import', 'copy']
|
||||
),
|
||||
source_uri=dict(
|
||||
type='str',
|
||||
aliases=['source_resource_uri']
|
||||
),
|
||||
os_type=dict(
|
||||
type='str',
|
||||
choices=['linux', 'windows']
|
||||
),
|
||||
disk_size_gb=dict(
|
||||
type='int'
|
||||
),
|
||||
managed_by=dict(
|
||||
type='str'
|
||||
),
|
||||
zone=dict(
|
||||
type='str',
|
||||
choices=['', '1', '2', '3']
|
||||
),
|
||||
attach_caching=dict(
|
||||
type='str',
|
||||
choices=['', 'read_only', 'read_write']
|
||||
),
|
||||
lun=dict(
|
||||
type='int'
|
||||
)
|
||||
)
|
||||
required_if = [
|
||||
('create_option', 'import', ['source_uri']),
|
||||
('create_option', 'copy', ['source_uri']),
|
||||
('create_option', 'empty', ['disk_size_gb'])
|
||||
]
|
||||
self.results = dict(
|
||||
changed=False,
|
||||
state=dict())
|
||||
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.location = None
|
||||
self.storage_account_type = None
|
||||
self.create_option = None
|
||||
self.source_uri = None
|
||||
self.os_type = None
|
||||
self.disk_size_gb = None
|
||||
self.tags = None
|
||||
self.zone = None
|
||||
self.managed_by = None
|
||||
self.attach_caching = None
|
||||
self.lun = None
|
||||
super(AzureRMManagedDisk, self).__init__(
|
||||
derived_arg_spec=self.module_arg_spec,
|
||||
required_if=required_if,
|
||||
supports_check_mode=True,
|
||||
supports_tags=True)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
"""Main module execution method"""
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
result = None
|
||||
changed = False
|
||||
|
||||
resource_group = self.get_resource_group(self.resource_group)
|
||||
if not self.location:
|
||||
self.location = resource_group.location
|
||||
|
||||
disk_instance = self.get_managed_disk()
|
||||
result = disk_instance
|
||||
|
||||
# need create or update
|
||||
if self.state == 'present':
|
||||
parameter = self.generate_managed_disk_property()
|
||||
if not disk_instance or self.is_different(disk_instance, parameter):
|
||||
changed = True
|
||||
if not self.check_mode:
|
||||
result = self.create_or_update_managed_disk(parameter)
|
||||
else:
|
||||
result = True
|
||||
|
||||
# unmount from the old virtual machine and mount to the new virtual machine
|
||||
if self.managed_by or self.managed_by == '':
|
||||
vm_name = parse_resource_id(disk_instance.get('managed_by', '')).get('name') if disk_instance else None
|
||||
vm_name = vm_name or ''
|
||||
if self.managed_by != vm_name or self.is_attach_caching_option_different(vm_name, result):
|
||||
changed = True
|
||||
if not self.check_mode:
|
||||
if vm_name:
|
||||
self.detach(vm_name, result)
|
||||
if self.managed_by:
|
||||
self.attach(self.managed_by, result)
|
||||
result = self.get_managed_disk()
|
||||
|
||||
if self.state == 'absent' and disk_instance:
|
||||
changed = True
|
||||
if not self.check_mode:
|
||||
self.delete_managed_disk()
|
||||
result = True
|
||||
|
||||
self.results['changed'] = changed
|
||||
self.results['state'] = result
|
||||
return self.results
|
||||
|
||||
def attach(self, vm_name, disk):
|
||||
vm = self._get_vm(vm_name)
|
||||
# find the lun
|
||||
if self.lun:
|
||||
lun = self.lun
|
||||
else:
|
||||
luns = ([d.lun for d in vm.storage_profile.data_disks]
|
||||
if vm.storage_profile.data_disks else [])
|
||||
lun = max(luns) + 1 if luns else 0
|
||||
|
||||
# prepare the data disk
|
||||
params = self.compute_models.ManagedDiskParameters(id=disk.get('id'), storage_account_type=disk.get('storage_account_type'))
|
||||
caching_options = self.compute_models.CachingTypes[self.attach_caching] if self.attach_caching and self.attach_caching != '' else None
|
||||
data_disk = self.compute_models.DataDisk(lun=lun,
|
||||
create_option=self.compute_models.DiskCreateOptionTypes.attach,
|
||||
managed_disk=params,
|
||||
caching=caching_options)
|
||||
vm.storage_profile.data_disks.append(data_disk)
|
||||
self._update_vm(vm_name, vm)
|
||||
|
||||
def detach(self, vm_name, disk):
|
||||
vm = self._get_vm(vm_name)
|
||||
leftovers = [d for d in vm.storage_profile.data_disks if d.name.lower() != disk.get('name').lower()]
|
||||
if len(vm.storage_profile.data_disks) == len(leftovers):
|
||||
self.fail("No disk with the name '{0}' was found".format(disk.get('name')))
|
||||
vm.storage_profile.data_disks = leftovers
|
||||
self._update_vm(vm_name, vm)
|
||||
|
||||
def _update_vm(self, name, params):
|
||||
try:
|
||||
poller = self.compute_client.virtual_machines.create_or_update(self.resource_group, name, params)
|
||||
self.get_poller_result(poller)
|
||||
except Exception as exc:
|
||||
self.fail("Error updating virtual machine {0} - {1}".format(name, str(exc)))
|
||||
|
||||
def _get_vm(self, name):
|
||||
try:
|
||||
return self.compute_client.virtual_machines.get(self.resource_group, name, expand='instanceview')
|
||||
except Exception as exc:
|
||||
self.fail("Error getting virtual machine {0} - {1}".format(name, str(exc)))
|
||||
|
||||
def generate_managed_disk_property(self):
|
||||
# TODO: Add support for EncryptionSettings, DiskIOPSReadWrite, DiskMBpsReadWrite
|
||||
disk_params = {}
|
||||
creation_data = {}
|
||||
disk_params['location'] = self.location
|
||||
disk_params['tags'] = self.tags
|
||||
if self.zone:
|
||||
disk_params['zones'] = [self.zone]
|
||||
if self.storage_account_type:
|
||||
storage_account_type = self.compute_models.DiskSku(name=self.storage_account_type)
|
||||
disk_params['sku'] = storage_account_type
|
||||
disk_params['disk_size_gb'] = self.disk_size_gb
|
||||
creation_data['create_option'] = self.compute_models.DiskCreateOption.empty
|
||||
if self.create_option == 'import':
|
||||
creation_data['create_option'] = self.compute_models.DiskCreateOption.import_enum
|
||||
creation_data['source_uri'] = self.source_uri
|
||||
elif self.create_option == 'copy':
|
||||
creation_data['create_option'] = self.compute_models.DiskCreateOption.copy
|
||||
creation_data['source_resource_id'] = self.source_uri
|
||||
if self.os_type:
|
||||
typecon = {
|
||||
'linux': self.compute_models.OperatingSystemTypes.linux,
|
||||
'windows': self.compute_models.OperatingSystemTypes.windows
|
||||
}
|
||||
disk_params['os_type'] = typecon[self.os_type]
|
||||
else:
|
||||
disk_params['os_type'] = None
|
||||
disk_params['creation_data'] = creation_data
|
||||
return disk_params
|
||||
|
||||
def create_or_update_managed_disk(self, parameter):
|
||||
try:
|
||||
poller = self.compute_client.disks.create_or_update(
|
||||
self.resource_group,
|
||||
self.name,
|
||||
parameter)
|
||||
aux = self.get_poller_result(poller)
|
||||
return managed_disk_to_dict(aux)
|
||||
except CloudError as e:
|
||||
self.fail("Error creating the managed disk: {0}".format(str(e)))
|
||||
|
||||
# This method accounts for the difference in structure between the
|
||||
# Azure retrieved disk and the parameters for the new disk to be created.
|
||||
def is_different(self, found_disk, new_disk):
|
||||
resp = False
|
||||
if new_disk.get('disk_size_gb'):
|
||||
if not found_disk['disk_size_gb'] == new_disk['disk_size_gb']:
|
||||
resp = True
|
||||
if new_disk.get('os_type'):
|
||||
if not found_disk['os_type'] == new_disk['os_type']:
|
||||
resp = True
|
||||
if new_disk.get('sku'):
|
||||
if not found_disk['storage_account_type'] == new_disk['sku'].name:
|
||||
resp = True
|
||||
# Check how to implement tags
|
||||
if new_disk.get('tags') is not None:
|
||||
if not found_disk['tags'] == new_disk['tags']:
|
||||
resp = True
|
||||
if self.zone is not None:
|
||||
if not found_disk['zone'] == self.zone:
|
||||
resp = True
|
||||
return resp
|
||||
|
||||
def delete_managed_disk(self):
|
||||
try:
|
||||
poller = self.compute_client.disks.delete(
|
||||
self.resource_group,
|
||||
self.name)
|
||||
return self.get_poller_result(poller)
|
||||
except CloudError as e:
|
||||
self.fail("Error deleting the managed disk: {0}".format(str(e)))
|
||||
|
||||
def get_managed_disk(self):
|
||||
try:
|
||||
resp = self.compute_client.disks.get(
|
||||
self.resource_group,
|
||||
self.name)
|
||||
return managed_disk_to_dict(resp)
|
||||
except CloudError as e:
|
||||
self.log('Did not find managed disk')
|
||||
|
||||
def is_attach_caching_option_different(self, vm_name, disk):
|
||||
resp = False
|
||||
if vm_name:
|
||||
vm = self._get_vm(vm_name)
|
||||
correspondence = next((d for d in vm.storage_profile.data_disks if d.name.lower() == disk.get('name').lower()), None)
|
||||
if correspondence and correspondence.caching.name != self.attach_caching:
|
||||
resp = True
|
||||
if correspondence.caching.name == 'none' and self.attach_caching == '':
|
||||
resp = False
|
||||
return resp
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution"""
|
||||
AzureRMManagedDisk()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,243 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2016, Bruno Medina Bolanos Cacho <bruno.medina@microsoft.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: azure_rm_manageddisk_info
|
||||
|
||||
version_added: "2.9"
|
||||
|
||||
short_description: Get managed disk facts
|
||||
|
||||
description:
|
||||
- Get facts for a specific managed disk or all managed disks.
|
||||
|
||||
notes:
|
||||
- This module was called M(azure_rm_managed_disk_facts) before Ansible 2.8. The usage did not change.
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Limit results to a specific managed disk.
|
||||
type: str
|
||||
resource_group:
|
||||
description:
|
||||
- Limit results to a specific resource group.
|
||||
type: str
|
||||
tags:
|
||||
description:
|
||||
- Limit results by providing a list of tags.
|
||||
- Format tags as 'key' or 'key:value'.
|
||||
type: list
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Bruno Medina (@brusMX)
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Get facts for one managed disk
|
||||
azure_rm_manageddisk_info:
|
||||
name: Testing
|
||||
resource_group: myResourceGroup
|
||||
|
||||
- name: Get facts for all managed disks
|
||||
azure_rm_manageddisk_info:
|
||||
|
||||
- name: Get facts by tags
|
||||
azure_rm_manageddisk_info:
|
||||
tags:
|
||||
- testing
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
azure_managed_disk:
|
||||
description:
|
||||
- List of managed disk dicts.
|
||||
returned: always
|
||||
type: list
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- Resource id.
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- Name of the managed disk.
|
||||
type: str
|
||||
location:
|
||||
description:
|
||||
- Valid Azure location.
|
||||
type: str
|
||||
storage_account_type:
|
||||
description:
|
||||
- Type of storage for the managed disk.
|
||||
- See U(https://docs.microsoft.com/en-us/azure/virtual-machines/windows/disks-types) for more information about this type.
|
||||
type: str
|
||||
sample: Standard_LRS
|
||||
create_option:
|
||||
description:
|
||||
- Create option of the disk.
|
||||
type: str
|
||||
sample: copy
|
||||
source_uri:
|
||||
description:
|
||||
- URI to a valid VHD file to be used or the resource ID of the managed disk to copy.
|
||||
type: str
|
||||
os_type:
|
||||
description:
|
||||
- Type of Operating System.
|
||||
choices:
|
||||
- linux
|
||||
- windows
|
||||
type: str
|
||||
disk_size_gb:
|
||||
description:
|
||||
- Size in GB of the managed disk to be created.
|
||||
type: str
|
||||
managed_by:
|
||||
description:
|
||||
- Name of an existing virtual machine with which the disk is or will be associated, this VM should be in the same resource group.
|
||||
type: str
|
||||
tags:
|
||||
description:
|
||||
- Tags to assign to the managed disk.
|
||||
type: dict
|
||||
sample: { "tag": "value" }
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
except Exception:
|
||||
# handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
# duplicated in azure_rm_manageddisk
|
||||
def managed_disk_to_dict(managed_disk):
|
||||
create_data = managed_disk.creation_data
|
||||
return dict(
|
||||
id=managed_disk.id,
|
||||
name=managed_disk.name,
|
||||
location=managed_disk.location,
|
||||
tags=managed_disk.tags,
|
||||
create_option=create_data.create_option.lower(),
|
||||
source_uri=create_data.source_uri or create_data.source_resource_id,
|
||||
disk_size_gb=managed_disk.disk_size_gb,
|
||||
os_type=managed_disk.os_type.lower() if managed_disk.os_type else None,
|
||||
storage_account_type=managed_disk.sku.name if managed_disk.sku else None,
|
||||
managed_by=managed_disk.managed_by,
|
||||
zone=managed_disk.zones[0] if managed_disk.zones and len(managed_disk.zones) > 0 else ''
|
||||
)
|
||||
|
||||
|
||||
class AzureRMManagedDiskInfo(AzureRMModuleBase):
|
||||
"""Utility class to get managed disk facts"""
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str'
|
||||
),
|
||||
name=dict(
|
||||
type='str'
|
||||
),
|
||||
tags=dict(
|
||||
type='str'
|
||||
),
|
||||
)
|
||||
self.results = dict(
|
||||
ansible_info=dict(
|
||||
azure_managed_disk=[]
|
||||
)
|
||||
)
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.create_option = None
|
||||
self.source_uri = None
|
||||
self.source_resource_uri = None
|
||||
self.tags = None
|
||||
super(AzureRMManagedDiskInfo, self).__init__(
|
||||
derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=True)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
for key in self.module_arg_spec:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
self.results['ansible_info']['azure_managed_disk'] = (
|
||||
self.get_item() if self.name
|
||||
else (self.list_items_by_resource_group() if self.resource_group else self.list_items())
|
||||
)
|
||||
|
||||
return self.results
|
||||
|
||||
def get_item(self):
|
||||
"""Get a single managed disk"""
|
||||
item = None
|
||||
result = []
|
||||
|
||||
try:
|
||||
item = self.compute_client.disks.get(
|
||||
self.resource_group,
|
||||
self.name)
|
||||
except CloudError:
|
||||
pass
|
||||
|
||||
if item and self.has_tags(item.tags, self.tags):
|
||||
result = [managed_disk_to_dict(item)]
|
||||
|
||||
return result
|
||||
|
||||
def list_items(self):
|
||||
"""Get all managed disks"""
|
||||
try:
|
||||
response = self.compute_client.disks.list()
|
||||
except CloudError as exc:
|
||||
self.fail('Failed to list all items - {0}'.format(str(exc)))
|
||||
|
||||
results = []
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(managed_disk_to_dict(item))
|
||||
return results
|
||||
|
||||
def list_items_by_resource_group(self):
|
||||
"""Get managed disks in a resource group"""
|
||||
try:
|
||||
response = self.compute_client.disks.list_by_resource_group(resource_group_name=self.resource_group)
|
||||
except CloudError as exc:
|
||||
self.fail('Failed to list items by resource group - {0}'.format(str(exc)))
|
||||
|
||||
results = []
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(managed_disk_to_dict(item))
|
||||
return results
|
||||
|
||||
|
||||
def main():
|
||||
"""Main module execution code path"""
|
||||
|
||||
AzureRMManagedDiskInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,241 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
|
||||
# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_mariadbconfiguration
|
||||
version_added: "2.8"
|
||||
short_description: Manage Configuration instance
|
||||
description:
|
||||
- Create, update and delete instance of Configuration.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group that contains the resource.
|
||||
required: True
|
||||
server_name:
|
||||
description:
|
||||
- The name of the server.
|
||||
required: True
|
||||
name:
|
||||
description:
|
||||
- The name of the server configuration.
|
||||
required: True
|
||||
value:
|
||||
description:
|
||||
- Value of the configuration.
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the MariaDB configuration. Use C(present) to update setting, or C(absent) to reset to default value.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
- Matti Ranta (@techknowlogick)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Update SQL Server setting
|
||||
azure_rm_mariadbconfiguration:
|
||||
resource_group: myResourceGroup
|
||||
server_name: myServer
|
||||
name: event_scheduler
|
||||
value: "ON"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- Resource ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/myServer/confi
|
||||
gurations/event_scheduler"
|
||||
'''
|
||||
|
||||
import time
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrest.polling import LROPoller
|
||||
from azure.mgmt.rdbms.mysql import MariaDBManagementClient
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class Actions:
|
||||
NoAction, Create, Update, Delete = range(4)
|
||||
|
||||
|
||||
class AzureRMMariaDbConfiguration(AzureRMModuleBase):
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
server_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
value=dict(
|
||||
type='str'
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
)
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.server_name = None
|
||||
self.name = None
|
||||
self.value = None
|
||||
|
||||
self.results = dict(changed=False)
|
||||
self.state = None
|
||||
self.to_do = Actions.NoAction
|
||||
|
||||
super(AzureRMMariaDbConfiguration, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
for key in list(self.module_arg_spec.keys()):
|
||||
if hasattr(self, key):
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
old_response = None
|
||||
response = None
|
||||
|
||||
old_response = self.get_configuration()
|
||||
|
||||
if not old_response:
|
||||
self.log("Configuration instance doesn't exist")
|
||||
if self.state == 'absent':
|
||||
self.log("Old instance didn't exist")
|
||||
else:
|
||||
self.to_do = Actions.Create
|
||||
else:
|
||||
self.log("Configuration instance already exists")
|
||||
if self.state == 'absent' and old_response['source'] == 'user-override':
|
||||
self.to_do = Actions.Delete
|
||||
elif self.state == 'present':
|
||||
self.log("Need to check if Configuration instance has to be deleted or may be updated")
|
||||
if self.value != old_response.get('value'):
|
||||
self.to_do = Actions.Update
|
||||
|
||||
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
|
||||
self.log("Need to Create / Update the Configuration instance")
|
||||
|
||||
if self.check_mode:
|
||||
self.results['changed'] = True
|
||||
return self.results
|
||||
|
||||
response = self.create_update_configuration()
|
||||
|
||||
self.results['changed'] = True
|
||||
self.log("Creation / Update done")
|
||||
elif self.to_do == Actions.Delete:
|
||||
self.log("Configuration instance deleted")
|
||||
self.results['changed'] = True
|
||||
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
self.delete_configuration()
|
||||
else:
|
||||
self.log("Configuration instance unchanged")
|
||||
self.results['changed'] = False
|
||||
response = old_response
|
||||
|
||||
if response:
|
||||
self.results["id"] = response["id"]
|
||||
|
||||
return self.results
|
||||
|
||||
def create_update_configuration(self):
|
||||
self.log("Creating / Updating the Configuration instance {0}".format(self.name))
|
||||
|
||||
try:
|
||||
response = self.mariadb_client.configurations.create_or_update(resource_group_name=self.resource_group,
|
||||
server_name=self.server_name,
|
||||
configuration_name=self.name,
|
||||
value=self.value,
|
||||
source='user-override')
|
||||
if isinstance(response, LROPoller):
|
||||
response = self.get_poller_result(response)
|
||||
|
||||
except CloudError as exc:
|
||||
self.log('Error attempting to create the Configuration instance.')
|
||||
self.fail("Error creating the Configuration instance: {0}".format(str(exc)))
|
||||
return response.as_dict()
|
||||
|
||||
def delete_configuration(self):
|
||||
self.log("Deleting the Configuration instance {0}".format(self.name))
|
||||
try:
|
||||
response = self.mariadb_client.configurations.create_or_update(resource_group_name=self.resource_group,
|
||||
server_name=self.server_name,
|
||||
configuration_name=self.name,
|
||||
source='system-default')
|
||||
except CloudError as e:
|
||||
self.log('Error attempting to delete the Configuration instance.')
|
||||
self.fail("Error deleting the Configuration instance: {0}".format(str(e)))
|
||||
|
||||
return True
|
||||
|
||||
def get_configuration(self):
|
||||
self.log("Checking if the Configuration instance {0} is present".format(self.name))
|
||||
found = False
|
||||
try:
|
||||
response = self.mariadb_client.configurations.get(resource_group_name=self.resource_group,
|
||||
server_name=self.server_name,
|
||||
configuration_name=self.name)
|
||||
found = True
|
||||
self.log("Response : {0}".format(response))
|
||||
self.log("Configuration instance : {0} found".format(response.name))
|
||||
except CloudError as e:
|
||||
self.log('Did not find the Configuration instance.')
|
||||
if found is True:
|
||||
return response.as_dict()
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution"""
|
||||
AzureRMMariaDbConfiguration()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,216 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
|
||||
# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_mariadbconfiguration_info
|
||||
version_added: "2.9"
|
||||
short_description: Get Azure MariaDB Configuration facts
|
||||
description:
|
||||
- Get facts of Azure MariaDB Configuration.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
|
||||
required: True
|
||||
type: str
|
||||
server_name:
|
||||
description:
|
||||
- The name of the server.
|
||||
required: True
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- Setting name.
|
||||
type: str
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
- Matti Ranta (@techknowlogick)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get specific setting of MariaDB Server
|
||||
azure_rm_mariadbconfiguration_info:
|
||||
resource_group: myResourceGroup
|
||||
server_name: testserver
|
||||
name: deadlock_timeout
|
||||
|
||||
- name: Get all settings of MariaDB Server
|
||||
azure_rm_mariadbconfiguration_info:
|
||||
resource_group: myResourceGroup
|
||||
server_name: server_name
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
settings:
|
||||
description:
|
||||
- A list of dictionaries containing MariaDB Server settings.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- Setting resource ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testserver
|
||||
/configurations/deadlock_timeout"
|
||||
name:
|
||||
description:
|
||||
- Setting name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: deadlock_timeout
|
||||
value:
|
||||
description:
|
||||
- Setting value.
|
||||
returned: always
|
||||
type: raw
|
||||
sample: 1000
|
||||
description:
|
||||
description:
|
||||
- Description of the configuration.
|
||||
returned: always
|
||||
type: str
|
||||
sample: Deadlock timeout.
|
||||
source:
|
||||
description:
|
||||
- Source of the configuration.
|
||||
returned: always
|
||||
type: str
|
||||
sample: system-default
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrestazure.azure_operation import AzureOperationPoller
|
||||
from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMMariaDbConfigurationInfo(AzureRMModuleBase):
|
||||
def __init__(self):
|
||||
# define user inputs into argument
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
server_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str'
|
||||
)
|
||||
)
|
||||
# store the results of the module operation
|
||||
self.results = dict(changed=False)
|
||||
self.mgmt_client = None
|
||||
self.resource_group = None
|
||||
self.server_name = None
|
||||
self.name = None
|
||||
super(AzureRMMariaDbConfigurationInfo, self).__init__(self.module_arg_spec, supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
is_old_facts = self.module._name == 'azure_rm_mariadbconfiguration_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_mariadbconfiguration_facts' module has been renamed to 'azure_rm_mariadbconfiguration_info'", version='2.13')
|
||||
|
||||
for key in self.module_arg_spec:
|
||||
setattr(self, key, kwargs[key])
|
||||
self.mgmt_client = self.get_mgmt_svc_client(MariaDBManagementClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
|
||||
if self.name is not None:
|
||||
self.results['settings'] = self.get()
|
||||
else:
|
||||
self.results['settings'] = self.list_by_server()
|
||||
return self.results
|
||||
|
||||
def get(self):
|
||||
'''
|
||||
Gets facts of the specified MariaDB Configuration.
|
||||
|
||||
:return: deserialized MariaDB Configurationinstance state dictionary
|
||||
'''
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mgmt_client.configurations.get(resource_group_name=self.resource_group,
|
||||
server_name=self.server_name,
|
||||
configuration_name=self.name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get facts for Configurations.')
|
||||
|
||||
if response is not None:
|
||||
results.append(self.format_item(response))
|
||||
|
||||
return results
|
||||
|
||||
def list_by_server(self):
|
||||
'''
|
||||
Gets facts of the specified MariaDB Configuration.
|
||||
|
||||
:return: deserialized MariaDB Configurationinstance state dictionary
|
||||
'''
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mgmt_client.configurations.list_by_server(resource_group_name=self.resource_group,
|
||||
server_name=self.server_name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get facts for Configurations.')
|
||||
|
||||
if response is not None:
|
||||
for item in response:
|
||||
results.append(self.format_item(item))
|
||||
|
||||
return results
|
||||
|
||||
def format_item(self, item):
|
||||
d = item.as_dict()
|
||||
d = {
|
||||
'resource_group': self.resource_group,
|
||||
'server_name': self.server_name,
|
||||
'id': d['id'],
|
||||
'name': d['name'],
|
||||
'value': d['value'],
|
||||
'description': d['description'],
|
||||
'source': d['source']
|
||||
}
|
||||
return d
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMMariaDbConfigurationInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,304 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
|
||||
# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_mariadbdatabase
|
||||
version_added: "2.8"
|
||||
short_description: Manage MariaDB Database instance
|
||||
description:
|
||||
- Create, update and delete instance of MariaDB Database.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
|
||||
required: True
|
||||
server_name:
|
||||
description:
|
||||
- The name of the server.
|
||||
required: True
|
||||
name:
|
||||
description:
|
||||
- The name of the database.
|
||||
required: True
|
||||
charset:
|
||||
description:
|
||||
- The charset of the database. Check MariaDB documentation for possible values.
|
||||
- This is only set on creation, use I(force_update) to recreate a database if the values don't match.
|
||||
collation:
|
||||
description:
|
||||
- The collation of the database. Check MariaDB documentation for possible values.
|
||||
- This is only set on creation, use I(force_update) to recreate a database if the values don't match.
|
||||
force_update:
|
||||
description:
|
||||
- When set to C(true), will delete and recreate the existing MariaDB database if any of the properties don't match what is set.
|
||||
- When set to C(false), no change will occur to the database even if any of the properties do not match.
|
||||
type: bool
|
||||
default: 'no'
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the MariaDB Database. Use C(present) to create or update a database and C(absent) to delete it.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
- Matti Ranta (@techknowlogick)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create (or update) MariaDB Database
|
||||
azure_rm_mariadbdatabase:
|
||||
resource_group: myResourceGroup
|
||||
server_name: testserver
|
||||
name: db1
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- Resource ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testserver/databases/db1
|
||||
name:
|
||||
description:
|
||||
- Resource name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: db1
|
||||
'''
|
||||
|
||||
import time
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrest.polling import LROPoller
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class Actions:
|
||||
NoAction, Create, Update, Delete = range(4)
|
||||
|
||||
|
||||
class AzureRMMariaDbDatabase(AzureRMModuleBase):
|
||||
"""Configuration class for an Azure RM MariaDB Database resource"""
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
server_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
charset=dict(
|
||||
type='str'
|
||||
),
|
||||
collation=dict(
|
||||
type='str'
|
||||
),
|
||||
force_update=dict(
|
||||
type='bool',
|
||||
default=False
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
)
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.server_name = None
|
||||
self.name = None
|
||||
self.force_update = None
|
||||
self.parameters = dict()
|
||||
|
||||
self.results = dict(changed=False)
|
||||
self.mgmt_client = None
|
||||
self.state = None
|
||||
self.to_do = Actions.NoAction
|
||||
|
||||
super(AzureRMMariaDbDatabase, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
"""Main module execution method"""
|
||||
|
||||
for key in list(self.module_arg_spec.keys()):
|
||||
if hasattr(self, key):
|
||||
setattr(self, key, kwargs[key])
|
||||
elif kwargs[key] is not None:
|
||||
if key == "charset":
|
||||
self.parameters["charset"] = kwargs[key]
|
||||
elif key == "collation":
|
||||
self.parameters["collation"] = kwargs[key]
|
||||
|
||||
old_response = None
|
||||
response = None
|
||||
|
||||
self.mgmt_client = self.get_mgmt_svc_client(MariaDBManagementClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
|
||||
resource_group = self.get_resource_group(self.resource_group)
|
||||
|
||||
old_response = self.get_mariadbdatabase()
|
||||
|
||||
if not old_response:
|
||||
self.log("MariaDB Database instance doesn't exist")
|
||||
if self.state == 'absent':
|
||||
self.log("Old instance didn't exist")
|
||||
else:
|
||||
self.to_do = Actions.Create
|
||||
else:
|
||||
self.log("MariaDB Database instance already exists")
|
||||
if self.state == 'absent':
|
||||
self.to_do = Actions.Delete
|
||||
elif self.state == 'present':
|
||||
self.log("Need to check if MariaDB Database instance has to be deleted or may be updated")
|
||||
if ('collation' in self.parameters) and (self.parameters['collation'] != old_response['collation']):
|
||||
self.to_do = Actions.Update
|
||||
if ('charset' in self.parameters) and (self.parameters['charset'] != old_response['charset']):
|
||||
self.to_do = Actions.Update
|
||||
if self.to_do == Actions.Update:
|
||||
if self.force_update:
|
||||
if not self.check_mode:
|
||||
self.delete_mariadbdatabase()
|
||||
else:
|
||||
self.fail("Database properties cannot be updated without setting 'force_update' option")
|
||||
self.to_do = Actions.NoAction
|
||||
|
||||
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
|
||||
self.log("Need to Create / Update the MariaDB Database instance")
|
||||
|
||||
if self.check_mode:
|
||||
self.results['changed'] = True
|
||||
return self.results
|
||||
|
||||
response = self.create_update_mariadbdatabase()
|
||||
self.results['changed'] = True
|
||||
self.log("Creation / Update done")
|
||||
elif self.to_do == Actions.Delete:
|
||||
self.log("MariaDB Database instance deleted")
|
||||
self.results['changed'] = True
|
||||
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
self.delete_mariadbdatabase()
|
||||
# make sure instance is actually deleted, for some Azure resources, instance is hanging around
|
||||
# for some time after deletion -- this should be really fixed in Azure
|
||||
while self.get_mariadbdatabase():
|
||||
time.sleep(20)
|
||||
else:
|
||||
self.log("MariaDB Database instance unchanged")
|
||||
self.results['changed'] = False
|
||||
response = old_response
|
||||
|
||||
if response:
|
||||
self.results["id"] = response["id"]
|
||||
self.results["name"] = response["name"]
|
||||
|
||||
return self.results
|
||||
|
||||
def create_update_mariadbdatabase(self):
|
||||
'''
|
||||
Creates or updates MariaDB Database with the specified configuration.
|
||||
|
||||
:return: deserialized MariaDB Database instance state dictionary
|
||||
'''
|
||||
self.log("Creating / Updating the MariaDB Database instance {0}".format(self.name))
|
||||
|
||||
try:
|
||||
response = self.mgmt_client.databases.create_or_update(resource_group_name=self.resource_group,
|
||||
server_name=self.server_name,
|
||||
database_name=self.name,
|
||||
parameters=self.parameters)
|
||||
if isinstance(response, LROPoller):
|
||||
response = self.get_poller_result(response)
|
||||
|
||||
except CloudError as exc:
|
||||
self.log('Error attempting to create the MariaDB Database instance.')
|
||||
self.fail("Error creating the MariaDB Database instance: {0}".format(str(exc)))
|
||||
return response.as_dict()
|
||||
|
||||
def delete_mariadbdatabase(self):
|
||||
'''
|
||||
Deletes specified MariaDB Database instance in the specified subscription and resource group.
|
||||
|
||||
:return: True
|
||||
'''
|
||||
self.log("Deleting the MariaDB Database instance {0}".format(self.name))
|
||||
try:
|
||||
response = self.mgmt_client.databases.delete(resource_group_name=self.resource_group,
|
||||
server_name=self.server_name,
|
||||
database_name=self.name)
|
||||
except CloudError as e:
|
||||
self.log('Error attempting to delete the MariaDB Database instance.')
|
||||
self.fail("Error deleting the MariaDB Database instance: {0}".format(str(e)))
|
||||
|
||||
return True
|
||||
|
||||
def get_mariadbdatabase(self):
|
||||
'''
|
||||
Gets the properties of the specified MariaDB Database.
|
||||
|
||||
:return: deserialized MariaDB Database instance state dictionary
|
||||
'''
|
||||
self.log("Checking if the MariaDB Database instance {0} is present".format(self.name))
|
||||
found = False
|
||||
try:
|
||||
response = self.mgmt_client.databases.get(resource_group_name=self.resource_group,
|
||||
server_name=self.server_name,
|
||||
database_name=self.name)
|
||||
found = True
|
||||
self.log("Response : {0}".format(response))
|
||||
self.log("MariaDB Database instance : {0} found".format(response.name))
|
||||
except CloudError as e:
|
||||
self.log('Did not find the MariaDB Database instance.')
|
||||
if found is True:
|
||||
return response.as_dict()
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution"""
|
||||
AzureRMMariaDbDatabase()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,211 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
|
||||
# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_mariadbdatabase_info
|
||||
version_added: "2.9"
|
||||
short_description: Get Azure MariaDB Database facts
|
||||
description:
|
||||
- Get facts of MariaDB Database.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
|
||||
required: True
|
||||
type: str
|
||||
server_name:
|
||||
description:
|
||||
- The name of the server.
|
||||
required: True
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- The name of the database.
|
||||
type: str
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
- Matti Ranta (@techknowlogick)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get instance of MariaDB Database
|
||||
azure_rm_mariadbdatabase_info:
|
||||
resource_group: myResourceGroup
|
||||
server_name: server_name
|
||||
name: database_name
|
||||
|
||||
- name: List instances of MariaDB Database
|
||||
azure_rm_mariadbdatabase_info:
|
||||
resource_group: myResourceGroup
|
||||
server_name: server_name
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
databases:
|
||||
description:
|
||||
- A list of dictionaries containing facts for MariaDB Databases.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- Resource ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testser
|
||||
ver/databases/db1"
|
||||
resource_group:
|
||||
description:
|
||||
- Resource group name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: testrg
|
||||
server_name:
|
||||
description:
|
||||
- Server name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: testserver
|
||||
name:
|
||||
description:
|
||||
- Resource name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: db1
|
||||
charset:
|
||||
description:
|
||||
- The charset of the database.
|
||||
returned: always
|
||||
type: str
|
||||
sample: UTF8
|
||||
collation:
|
||||
description:
|
||||
- The collation of the database.
|
||||
returned: always
|
||||
type: str
|
||||
sample: English_United States.1252
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMMariaDbDatabaseInfo(AzureRMModuleBase):
|
||||
def __init__(self):
|
||||
# define user inputs into argument
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
server_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str'
|
||||
)
|
||||
)
|
||||
# store the results of the module operation
|
||||
self.results = dict(
|
||||
changed=False
|
||||
)
|
||||
self.resource_group = None
|
||||
self.server_name = None
|
||||
self.name = None
|
||||
super(AzureRMMariaDbDatabaseInfo, self).__init__(self.module_arg_spec, supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
is_old_facts = self.module._name == 'azure_rm_mariadbdatabase_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_mariadbdatabase_facts' module has been renamed to 'azure_rm_mariadbdatabase_info'", version='2.13')
|
||||
|
||||
for key in self.module_arg_spec:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
if (self.resource_group is not None and
|
||||
self.server_name is not None and
|
||||
self.name is not None):
|
||||
self.results['databases'] = self.get()
|
||||
elif (self.resource_group is not None and
|
||||
self.server_name is not None):
|
||||
self.results['databases'] = self.list_by_server()
|
||||
return self.results
|
||||
|
||||
def get(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mariadb_client.databases.get(resource_group_name=self.resource_group,
|
||||
server_name=self.server_name,
|
||||
database_name=self.name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get facts for Databases.')
|
||||
|
||||
if response is not None:
|
||||
results.append(self.format_item(response))
|
||||
|
||||
return results
|
||||
|
||||
def list_by_server(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mariadb_client.databases.list_by_server(resource_group_name=self.resource_group,
|
||||
server_name=self.server_name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.fail("Error listing for server {0} - {1}".format(self.server_name, str(e)))
|
||||
|
||||
if response is not None:
|
||||
for item in response:
|
||||
results.append(self.format_item(item))
|
||||
|
||||
return results
|
||||
|
||||
def format_item(self, item):
|
||||
d = item.as_dict()
|
||||
d = {
|
||||
'resource_group': self.resource_group,
|
||||
'server_name': self.server_name,
|
||||
'name': d['name'],
|
||||
'charset': d['charset'],
|
||||
'collation': d['collation']
|
||||
}
|
||||
return d
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMMariaDbDatabaseInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,277 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
|
||||
# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_mariadbfirewallrule
|
||||
version_added: "2.8"
|
||||
short_description: Manage MariaDB firewall rule instance
|
||||
description:
|
||||
- Create, update and delete instance of MariaDB firewall rule.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
|
||||
required: True
|
||||
server_name:
|
||||
description:
|
||||
- The name of the server.
|
||||
required: True
|
||||
name:
|
||||
description:
|
||||
- The name of the MariaDB firewall rule.
|
||||
required: True
|
||||
start_ip_address:
|
||||
description:
|
||||
- The start IP address of the MariaDB firewall rule. Must be IPv4 format.
|
||||
end_ip_address:
|
||||
description:
|
||||
- The end IP address of the MariaDB firewall rule. Must be IPv4 format.
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the MariaDB firewall rule. Use C(present) to create or update a rule and C(absent) to ensure it is not present.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
- Matti Ranta (@techknowlogick)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create (or update) MariaDB firewall rule
|
||||
azure_rm_mariadbfirewallrule:
|
||||
resource_group: myResourceGroup
|
||||
server_name: testserver
|
||||
name: rule1
|
||||
start_ip_address: 10.0.0.17
|
||||
end_ip_address: 10.0.0.20
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- Resource ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testserver/fire
|
||||
wallRules/rule1"
|
||||
'''
|
||||
|
||||
import time
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrest.polling import LROPoller
|
||||
from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class Actions:
|
||||
NoAction, Create, Update, Delete = range(4)
|
||||
|
||||
|
||||
class AzureRMMariaDbFirewallRule(AzureRMModuleBase):
|
||||
"""Configuration class for an Azure RM MariaDB firewall rule resource"""
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
server_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
start_ip_address=dict(
|
||||
type='str'
|
||||
),
|
||||
end_ip_address=dict(
|
||||
type='str'
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
)
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.server_name = None
|
||||
self.name = None
|
||||
self.start_ip_address = None
|
||||
self.end_ip_address = None
|
||||
|
||||
self.results = dict(changed=False)
|
||||
self.state = None
|
||||
self.to_do = Actions.NoAction
|
||||
|
||||
super(AzureRMMariaDbFirewallRule, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
"""Main module execution method"""
|
||||
|
||||
for key in list(self.module_arg_spec.keys()):
|
||||
if hasattr(self, key):
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
old_response = None
|
||||
response = None
|
||||
|
||||
resource_group = self.get_resource_group(self.resource_group)
|
||||
|
||||
old_response = self.get_firewallrule()
|
||||
|
||||
if not old_response:
|
||||
self.log("MariaDB firewall rule instance doesn't exist")
|
||||
if self.state == 'absent':
|
||||
self.log("Old instance didn't exist")
|
||||
else:
|
||||
self.to_do = Actions.Create
|
||||
else:
|
||||
self.log("MariaDB firewall rule instance already exists")
|
||||
if self.state == 'absent':
|
||||
self.to_do = Actions.Delete
|
||||
elif self.state == 'present':
|
||||
self.log("Need to check if MariaDB firewall rule instance has to be deleted or may be updated")
|
||||
if (self.start_ip_address is not None) and (self.start_ip_address != old_response['start_ip_address']):
|
||||
self.to_do = Actions.Update
|
||||
if (self.end_ip_address is not None) and (self.end_ip_address != old_response['end_ip_address']):
|
||||
self.to_do = Actions.Update
|
||||
|
||||
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
|
||||
self.log("Need to Create / Update the MariaDB firewall rule instance")
|
||||
|
||||
if self.check_mode:
|
||||
self.results['changed'] = True
|
||||
return self.results
|
||||
|
||||
response = self.create_update_firewallrule()
|
||||
|
||||
if not old_response:
|
||||
self.results['changed'] = True
|
||||
else:
|
||||
self.results['changed'] = old_response.__ne__(response)
|
||||
self.log("Creation / Update done")
|
||||
elif self.to_do == Actions.Delete:
|
||||
self.log("MariaDB firewall rule instance deleted")
|
||||
self.results['changed'] = True
|
||||
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
self.delete_firewallrule()
|
||||
# make sure instance is actually deleted, for some Azure resources, instance is hanging around
|
||||
# for some time after deletion -- this should be really fixed in Azure
|
||||
while self.get_firewallrule():
|
||||
time.sleep(20)
|
||||
else:
|
||||
self.log("MariaDB firewall rule instance unchanged")
|
||||
self.results['changed'] = False
|
||||
response = old_response
|
||||
|
||||
if response:
|
||||
self.results["id"] = response["id"]
|
||||
|
||||
return self.results
|
||||
|
||||
def create_update_firewallrule(self):
|
||||
'''
|
||||
Creates or updates MariaDB firewall rule with the specified configuration.
|
||||
|
||||
:return: deserialized MariaDB firewall rule instance state dictionary
|
||||
'''
|
||||
self.log("Creating / Updating the MariaDB firewall rule instance {0}".format(self.name))
|
||||
|
||||
try:
|
||||
response = self.mariadb_client.firewall_rules.create_or_update(resource_group_name=self.resource_group,
|
||||
server_name=self.server_name,
|
||||
firewall_rule_name=self.name,
|
||||
start_ip_address=self.start_ip_address,
|
||||
end_ip_address=self.end_ip_address)
|
||||
if isinstance(response, LROPoller):
|
||||
response = self.get_poller_result(response)
|
||||
|
||||
except CloudError as exc:
|
||||
self.log('Error attempting to create the MariaDB firewall rule instance.')
|
||||
self.fail("Error creating the MariaDB firewall rule instance: {0}".format(str(exc)))
|
||||
return response.as_dict()
|
||||
|
||||
def delete_firewallrule(self):
|
||||
'''
|
||||
Deletes specified MariaDB firewall rule instance in the specified subscription and resource group.
|
||||
|
||||
:return: True
|
||||
'''
|
||||
self.log("Deleting the MariaDB firewall rule instance {0}".format(self.name))
|
||||
try:
|
||||
response = self.mariadb_client.firewall_rules.delete(resource_group_name=self.resource_group,
|
||||
server_name=self.server_name,
|
||||
firewall_rule_name=self.name)
|
||||
except CloudError as e:
|
||||
self.log('Error attempting to delete the MariaDB firewall rule instance.')
|
||||
self.fail("Error deleting the MariaDB firewall rule instance: {0}".format(str(e)))
|
||||
|
||||
return True
|
||||
|
||||
def get_firewallrule(self):
|
||||
'''
|
||||
Gets the properties of the specified MariaDB firewall rule.
|
||||
|
||||
:return: deserialized MariaDB firewall rule instance state dictionary
|
||||
'''
|
||||
self.log("Checking if the MariaDB firewall rule instance {0} is present".format(self.name))
|
||||
found = False
|
||||
try:
|
||||
response = self.mariadb_client.firewall_rules.get(resource_group_name=self.resource_group,
|
||||
server_name=self.server_name,
|
||||
firewall_rule_name=self.name)
|
||||
found = True
|
||||
self.log("Response : {0}".format(response))
|
||||
self.log("MariaDB firewall rule instance : {0} found".format(response.name))
|
||||
except CloudError as e:
|
||||
self.log('Did not find the MariaDB firewall rule instance.')
|
||||
if found is True:
|
||||
return response.as_dict()
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution"""
|
||||
AzureRMMariaDbFirewallRule()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,207 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
|
||||
# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_mariadbfirewallrule_info
|
||||
version_added: "2.9"
|
||||
short_description: Get Azure MariaDB Firewall Rule facts
|
||||
description:
|
||||
- Get facts of Azure MariaDB Firewall Rule.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group.
|
||||
required: True
|
||||
type: str
|
||||
server_name:
|
||||
description:
|
||||
- The name of the server.
|
||||
required: True
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- The name of the server firewall rule.
|
||||
type: str
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
- Matti Ranta (@techknowlogick)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get instance of MariaDB Firewall Rule
|
||||
azure_rm_mariadbfirewallrule_info:
|
||||
resource_group: myResourceGroup
|
||||
server_name: server_name
|
||||
name: firewall_rule_name
|
||||
|
||||
- name: List instances of MariaDB Firewall Rule
|
||||
azure_rm_mariadbfirewallrule_info:
|
||||
resource_group: myResourceGroup
|
||||
server_name: server_name
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
rules:
|
||||
description:
|
||||
- A list of dictionaries containing facts for MariaDB Firewall Rule.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- Resource ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/TestGroup/providers/Microsoft.DBforMariaDB/servers/testserver/fire
|
||||
wallRules/rule1"
|
||||
server_name:
|
||||
description:
|
||||
- The name of the server.
|
||||
returned: always
|
||||
type: str
|
||||
sample: testserver
|
||||
name:
|
||||
description:
|
||||
- Resource name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: rule1
|
||||
start_ip_address:
|
||||
description:
|
||||
- The start IP address of the MariaDB firewall rule.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 10.0.0.16
|
||||
end_ip_address:
|
||||
description:
|
||||
- The end IP address of the MariaDB firewall rule.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 10.0.0.18
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrestazure.azure_operation import AzureOperationPoller
|
||||
from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMMariaDbFirewallRuleInfo(AzureRMModuleBase):
|
||||
def __init__(self):
|
||||
# define user inputs into argument
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
server_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str'
|
||||
)
|
||||
)
|
||||
# store the results of the module operation
|
||||
self.results = dict(
|
||||
changed=False
|
||||
)
|
||||
self.mgmt_client = None
|
||||
self.resource_group = None
|
||||
self.server_name = None
|
||||
self.name = None
|
||||
super(AzureRMMariaDbFirewallRuleInfo, self).__init__(self.module_arg_spec, supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
is_old_facts = self.module._name == 'azure_rm_mariadbfirewallrule_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_mariadbfirewallrule_facts' module has been renamed to 'azure_rm_mariadbfirewallrule_info'", version='2.13')
|
||||
|
||||
for key in self.module_arg_spec:
|
||||
setattr(self, key, kwargs[key])
|
||||
self.mgmt_client = self.get_mgmt_svc_client(MariaDBManagementClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
|
||||
if (self.name is not None):
|
||||
self.results['rules'] = self.get()
|
||||
else:
|
||||
self.results['rules'] = self.list_by_server()
|
||||
return self.results
|
||||
|
||||
def get(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mgmt_client.firewall_rules.get(resource_group_name=self.resource_group,
|
||||
server_name=self.server_name,
|
||||
firewall_rule_name=self.name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get facts for FirewallRules.')
|
||||
|
||||
if response is not None:
|
||||
results.append(self.format_item(response))
|
||||
|
||||
return results
|
||||
|
||||
def list_by_server(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mgmt_client.firewall_rules.list_by_server(resource_group_name=self.resource_group,
|
||||
server_name=self.server_name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get facts for FirewallRules.')
|
||||
|
||||
if response is not None:
|
||||
for item in response:
|
||||
results.append(self.format_item(item))
|
||||
|
||||
return results
|
||||
|
||||
def format_item(self, item):
|
||||
d = item.as_dict()
|
||||
d = {
|
||||
'resource_group': self.resource_group,
|
||||
'id': d['id'],
|
||||
'server_name': self.server_name,
|
||||
'name': d['name'],
|
||||
'start_ip_address': d['start_ip_address'],
|
||||
'end_ip_address': d['end_ip_address']
|
||||
}
|
||||
return d
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMMariaDbFirewallRuleInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,388 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
|
||||
# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_mariadbserver
|
||||
version_added: "2.8"
|
||||
short_description: Manage MariaDB Server instance
|
||||
description:
|
||||
- Create, update and delete instance of MariaDB Server.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
|
||||
required: True
|
||||
name:
|
||||
description:
|
||||
- The name of the server.
|
||||
required: True
|
||||
sku:
|
||||
description:
|
||||
- The SKU (pricing tier) of the server.
|
||||
suboptions:
|
||||
name:
|
||||
description:
|
||||
- The name of the SKU, typically, tier + family + cores, for example C(B_Gen4_1), C(GP_Gen5_8).
|
||||
tier:
|
||||
description:
|
||||
- The tier of the particular SKU, for example C(Basic).
|
||||
choices:
|
||||
- basic
|
||||
- standard
|
||||
capacity:
|
||||
description:
|
||||
- The scale up/out capacity, representing server's compute units.
|
||||
type: int
|
||||
size:
|
||||
description:
|
||||
- The size code, to be interpreted by resource as appropriate.
|
||||
location:
|
||||
description:
|
||||
- Resource location. If not set, location from the resource group will be used as default.
|
||||
storage_mb:
|
||||
description:
|
||||
- The maximum storage allowed for a server.
|
||||
type: int
|
||||
version:
|
||||
description:
|
||||
- Server version.
|
||||
choices:
|
||||
- 10.2
|
||||
enforce_ssl:
|
||||
description:
|
||||
- Enable SSL enforcement.
|
||||
type: bool
|
||||
default: False
|
||||
admin_username:
|
||||
description:
|
||||
- The administrator's login name of a server. Can only be specified when the server is being created (and is required for creation).
|
||||
admin_password:
|
||||
description:
|
||||
- The password of the administrator login.
|
||||
create_mode:
|
||||
description:
|
||||
- Create mode of SQL Server.
|
||||
default: Default
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the MariaDB Server. Use C(present) to create or update a server and C(absent) to delete it.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
- Matti Ranta (@techknowlogick)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create (or update) MariaDB Server
|
||||
azure_rm_mariadbserver:
|
||||
resource_group: myResourceGroup
|
||||
name: testserver
|
||||
sku:
|
||||
name: B_Gen5_1
|
||||
tier: Basic
|
||||
location: eastus
|
||||
storage_mb: 1024
|
||||
enforce_ssl: True
|
||||
version: 10.2
|
||||
admin_username: cloudsa
|
||||
admin_password: password
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- Resource ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/mariadbsrv1b6dd89593
|
||||
version:
|
||||
description:
|
||||
- Server version. Possible values include C(10.2).
|
||||
returned: always
|
||||
type: str
|
||||
sample: 10.2
|
||||
state:
|
||||
description:
|
||||
- A state of a server that is visible to user. Possible values include C(Ready), C(Dropping), C(Disabled).
|
||||
returned: always
|
||||
type: str
|
||||
sample: Ready
|
||||
fully_qualified_domain_name:
|
||||
description:
|
||||
- The fully qualified domain name of a server.
|
||||
returned: always
|
||||
type: str
|
||||
sample: mariadbsrv1b6dd89593.mariadb.database.azure.com
|
||||
'''
|
||||
|
||||
import time
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrest.polling import LROPoller
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class Actions:
|
||||
NoAction, Create, Update, Delete = range(4)
|
||||
|
||||
|
||||
class AzureRMMariaDbServers(AzureRMModuleBase):
|
||||
"""Configuration class for an Azure RM MariaDB Server resource"""
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
sku=dict(
|
||||
type='dict'
|
||||
),
|
||||
location=dict(
|
||||
type='str'
|
||||
),
|
||||
storage_mb=dict(
|
||||
type='int'
|
||||
),
|
||||
version=dict(
|
||||
type='str',
|
||||
choices=['10.2']
|
||||
),
|
||||
enforce_ssl=dict(
|
||||
type='bool',
|
||||
default=False
|
||||
),
|
||||
create_mode=dict(
|
||||
type='str',
|
||||
default='Default'
|
||||
),
|
||||
admin_username=dict(
|
||||
type='str'
|
||||
),
|
||||
admin_password=dict(
|
||||
type='str',
|
||||
no_log=True
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
)
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.parameters = dict()
|
||||
self.tags = None
|
||||
|
||||
self.results = dict(changed=False)
|
||||
self.state = None
|
||||
self.to_do = Actions.NoAction
|
||||
|
||||
super(AzureRMMariaDbServers, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=True)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
"""Main module execution method"""
|
||||
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
if hasattr(self, key):
|
||||
setattr(self, key, kwargs[key])
|
||||
elif kwargs[key] is not None:
|
||||
if key == "sku":
|
||||
ev = kwargs[key]
|
||||
if 'tier' in ev:
|
||||
if ev['tier'] == 'basic':
|
||||
ev['tier'] = 'Basic'
|
||||
elif ev['tier'] == 'standard':
|
||||
ev['tier'] = 'Standard'
|
||||
self.parameters["sku"] = ev
|
||||
elif key == "location":
|
||||
self.parameters["location"] = kwargs[key]
|
||||
elif key == "storage_mb":
|
||||
self.parameters.setdefault("properties", {}).setdefault("storage_profile", {})["storage_mb"] = kwargs[key]
|
||||
elif key == "version":
|
||||
self.parameters.setdefault("properties", {})["version"] = kwargs[key]
|
||||
elif key == "enforce_ssl":
|
||||
self.parameters.setdefault("properties", {})["ssl_enforcement"] = 'Enabled' if kwargs[key] else 'Disabled'
|
||||
elif key == "create_mode":
|
||||
self.parameters.setdefault("properties", {})["create_mode"] = kwargs[key]
|
||||
elif key == "admin_username":
|
||||
self.parameters.setdefault("properties", {})["administrator_login"] = kwargs[key]
|
||||
elif key == "admin_password":
|
||||
self.parameters.setdefault("properties", {})["administrator_login_password"] = kwargs[key]
|
||||
|
||||
old_response = None
|
||||
response = None
|
||||
|
||||
resource_group = self.get_resource_group(self.resource_group)
|
||||
|
||||
if "location" not in self.parameters:
|
||||
self.parameters["location"] = resource_group.location
|
||||
|
||||
old_response = self.get_mariadbserver()
|
||||
|
||||
if not old_response:
|
||||
self.log("MariaDB Server instance doesn't exist")
|
||||
if self.state == 'absent':
|
||||
self.log("Old instance didn't exist")
|
||||
else:
|
||||
self.to_do = Actions.Create
|
||||
else:
|
||||
self.log("MariaDB Server instance already exists")
|
||||
if self.state == 'absent':
|
||||
self.to_do = Actions.Delete
|
||||
elif self.state == 'present':
|
||||
self.log("Need to check if MariaDB Server instance has to be deleted or may be updated")
|
||||
update_tags, newtags = self.update_tags(old_response.get('tags', {}))
|
||||
if update_tags:
|
||||
self.tags = newtags
|
||||
self.to_do = Actions.Update
|
||||
|
||||
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
|
||||
self.log("Need to Create / Update the MariaDB Server instance")
|
||||
|
||||
if self.check_mode:
|
||||
self.results['changed'] = True
|
||||
return self.results
|
||||
|
||||
response = self.create_update_mariadbserver()
|
||||
|
||||
if not old_response:
|
||||
self.results['changed'] = True
|
||||
else:
|
||||
self.results['changed'] = old_response.__ne__(response)
|
||||
self.log("Creation / Update done")
|
||||
elif self.to_do == Actions.Delete:
|
||||
self.log("MariaDB Server instance deleted")
|
||||
self.results['changed'] = True
|
||||
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
self.delete_mariadbserver()
|
||||
# make sure instance is actually deleted, for some Azure resources, instance is hanging around
|
||||
# for some time after deletion -- this should be really fixed in Azure
|
||||
while self.get_mariadbserver():
|
||||
time.sleep(20)
|
||||
else:
|
||||
self.log("MariaDB Server instance unchanged")
|
||||
self.results['changed'] = False
|
||||
response = old_response
|
||||
|
||||
if response:
|
||||
self.results["id"] = response["id"]
|
||||
self.results["version"] = response["version"]
|
||||
self.results["state"] = response["user_visible_state"]
|
||||
self.results["fully_qualified_domain_name"] = response["fully_qualified_domain_name"]
|
||||
|
||||
return self.results
|
||||
|
||||
def create_update_mariadbserver(self):
|
||||
'''
|
||||
Creates or updates MariaDB Server with the specified configuration.
|
||||
|
||||
:return: deserialized MariaDB Server instance state dictionary
|
||||
'''
|
||||
self.log("Creating / Updating the MariaDB Server instance {0}".format(self.name))
|
||||
|
||||
try:
|
||||
self.parameters['tags'] = self.tags
|
||||
if self.to_do == Actions.Create:
|
||||
response = self.mariadb_client.servers.create(resource_group_name=self.resource_group,
|
||||
server_name=self.name,
|
||||
parameters=self.parameters)
|
||||
else:
|
||||
# structure of parameters for update must be changed
|
||||
self.parameters.update(self.parameters.pop("properties", {}))
|
||||
response = self.mariadb_client.servers.update(resource_group_name=self.resource_group,
|
||||
server_name=self.name,
|
||||
parameters=self.parameters)
|
||||
if isinstance(response, LROPoller):
|
||||
response = self.get_poller_result(response)
|
||||
|
||||
except CloudError as exc:
|
||||
self.log('Error attempting to create the MariaDB Server instance.')
|
||||
self.fail("Error creating the MariaDB Server instance: {0}".format(str(exc)))
|
||||
return response.as_dict()
|
||||
|
||||
def delete_mariadbserver(self):
|
||||
'''
|
||||
Deletes specified MariaDB Server instance in the specified subscription and resource group.
|
||||
|
||||
:return: True
|
||||
'''
|
||||
self.log("Deleting the MariaDB Server instance {0}".format(self.name))
|
||||
try:
|
||||
response = self.mariadb_client.servers.delete(resource_group_name=self.resource_group,
|
||||
server_name=self.name)
|
||||
except CloudError as e:
|
||||
self.log('Error attempting to delete the MariaDB Server instance.')
|
||||
self.fail("Error deleting the MariaDB Server instance: {0}".format(str(e)))
|
||||
|
||||
return True
|
||||
|
||||
def get_mariadbserver(self):
|
||||
'''
|
||||
Gets the properties of the specified MariaDB Server.
|
||||
|
||||
:return: deserialized MariaDB Server instance state dictionary
|
||||
'''
|
||||
self.log("Checking if the MariaDB Server instance {0} is present".format(self.name))
|
||||
found = False
|
||||
try:
|
||||
response = self.mariadb_client.servers.get(resource_group_name=self.resource_group,
|
||||
server_name=self.name)
|
||||
found = True
|
||||
self.log("Response : {0}".format(response))
|
||||
self.log("MariaDB Server instance : {0} found".format(response.name))
|
||||
except CloudError as e:
|
||||
self.log('Did not find the MariaDB Server instance.')
|
||||
if found is True:
|
||||
return response.as_dict()
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution"""
|
||||
AzureRMMariaDbServers()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,264 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
|
||||
# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_mariadbserver_info
|
||||
version_added: "2.9"
|
||||
short_description: Get Azure MariaDB Server facts
|
||||
description:
|
||||
- Get facts of MariaDB Server.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
|
||||
required: True
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- The name of the server.
|
||||
type: str
|
||||
tags:
|
||||
description:
|
||||
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
|
||||
type: list
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
- Matti Ranta (@techknowlogick)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get instance of MariaDB Server
|
||||
azure_rm_mariadbserver_info:
|
||||
resource_group: myResourceGroup
|
||||
name: server_name
|
||||
|
||||
- name: List instances of MariaDB Server
|
||||
azure_rm_mariadbserver_info:
|
||||
resource_group: myResourceGroup
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
servers:
|
||||
description:
|
||||
- A list of dictionaries containing facts for MariaDB servers.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- Resource ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/myabdud1223
|
||||
resource_group:
|
||||
description:
|
||||
- Resource group name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myResourceGroup
|
||||
name:
|
||||
description:
|
||||
- Resource name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myabdud1223
|
||||
location:
|
||||
description:
|
||||
- The location the resource resides in.
|
||||
returned: always
|
||||
type: str
|
||||
sample: eastus
|
||||
sku:
|
||||
description:
|
||||
- The SKU of the server.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
name:
|
||||
description:
|
||||
- The name of the SKU.
|
||||
returned: always
|
||||
type: str
|
||||
sample: GP_Gen4_2
|
||||
tier:
|
||||
description:
|
||||
- The tier of the particular SKU.
|
||||
returned: always
|
||||
type: str
|
||||
sample: GeneralPurpose
|
||||
capacity:
|
||||
description:
|
||||
- The scale capacity.
|
||||
returned: always
|
||||
type: int
|
||||
sample: 2
|
||||
storage_mb:
|
||||
description:
|
||||
- The maximum storage allowed for a server.
|
||||
returned: always
|
||||
type: int
|
||||
sample: 128000
|
||||
enforce_ssl:
|
||||
description:
|
||||
- Enable SSL enforcement.
|
||||
returned: always
|
||||
type: bool
|
||||
sample: False
|
||||
admin_username:
|
||||
description:
|
||||
- The administrator's login name of a server.
|
||||
returned: always
|
||||
type: str
|
||||
sample: serveradmin
|
||||
version:
|
||||
description:
|
||||
- Server version.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "9.6"
|
||||
user_visible_state:
|
||||
description:
|
||||
- A state of a server that is visible to user.
|
||||
returned: always
|
||||
type: str
|
||||
sample: Ready
|
||||
fully_qualified_domain_name:
|
||||
description:
|
||||
- The fully qualified domain name of a server.
|
||||
returned: always
|
||||
type: str
|
||||
sample: myabdud1223.mys.database.azure.com
|
||||
tags:
|
||||
description:
|
||||
- Tags assigned to the resource. Dictionary of string:string pairs.
|
||||
type: dict
|
||||
sample: { tag1: abc }
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMMariaDbServerInfo(AzureRMModuleBase):
|
||||
def __init__(self):
|
||||
# define user inputs into argument
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str'
|
||||
),
|
||||
tags=dict(
|
||||
type='list'
|
||||
)
|
||||
)
|
||||
# store the results of the module operation
|
||||
self.results = dict(
|
||||
changed=False
|
||||
)
|
||||
self.resource_group = None
|
||||
self.name = None
|
||||
self.tags = None
|
||||
super(AzureRMMariaDbServerInfo, self).__init__(self.module_arg_spec, supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
is_old_facts = self.module._name == 'azure_rm_mariadbserver_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_mariadbserver_facts' module has been renamed to 'azure_rm_mariadbserver_info'", version='2.13')
|
||||
|
||||
for key in self.module_arg_spec:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
if (self.resource_group is not None and
|
||||
self.name is not None):
|
||||
self.results['servers'] = self.get()
|
||||
elif (self.resource_group is not None):
|
||||
self.results['servers'] = self.list_by_resource_group()
|
||||
return self.results
|
||||
|
||||
def get(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mariadb_client.servers.get(resource_group_name=self.resource_group,
|
||||
server_name=self.name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get facts for MariaDB Server.')
|
||||
|
||||
if response and self.has_tags(response.tags, self.tags):
|
||||
results.append(self.format_item(response))
|
||||
|
||||
return results
|
||||
|
||||
def list_by_resource_group(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mariadb_client.servers.list_by_resource_group(resource_group_name=self.resource_group)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get facts for MariaDB Servers.')
|
||||
|
||||
if response is not None:
|
||||
for item in response:
|
||||
if self.has_tags(item.tags, self.tags):
|
||||
results.append(self.format_item(item))
|
||||
|
||||
return results
|
||||
|
||||
def format_item(self, item):
|
||||
d = item.as_dict()
|
||||
d = {
|
||||
'id': d['id'],
|
||||
'resource_group': self.resource_group,
|
||||
'name': d['name'],
|
||||
'sku': d['sku'],
|
||||
'location': d['location'],
|
||||
'storage_mb': d['storage_profile']['storage_mb'],
|
||||
'version': d['version'],
|
||||
'enforce_ssl': (d['ssl_enforcement'] == 'Enabled'),
|
||||
'admin_username': d['administrator_login'],
|
||||
'user_visible_state': d['user_visible_state'],
|
||||
'fully_qualified_domain_name': d['fully_qualified_domain_name'],
|
||||
'tags': d.get('tags')
|
||||
}
|
||||
|
||||
return d
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMMariaDbServerInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,392 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Yunge Zhu, <yungez@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_monitorlogprofile
|
||||
version_added: "2.9"
|
||||
short_description: Manage Azure Monitor log profile
|
||||
description:
|
||||
- Create, update and delete instance of Azure Monitor log profile.
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Unique name of the log profile to create or update.
|
||||
required: True
|
||||
type: str
|
||||
location:
|
||||
description:
|
||||
- Resource location.
|
||||
type: str
|
||||
locations:
|
||||
description:
|
||||
- List of regions for which Activity Log events should be stored.
|
||||
type: list
|
||||
categories:
|
||||
description:
|
||||
- List of categories of logs. These categories are created as is convenient to user. Some Values are C(Write), C(Delete) and/or C(Action).
|
||||
type: list
|
||||
retention_policy:
|
||||
description:
|
||||
- Retention policy for events in the log.
|
||||
type: dict
|
||||
suboptions:
|
||||
enabled:
|
||||
description:
|
||||
- Whether the retention policy is enabled.
|
||||
type: bool
|
||||
days:
|
||||
description:
|
||||
- The number of days for the retention. A value of 0 will retain the events indefinitely.
|
||||
type: int
|
||||
service_bus_rule_id:
|
||||
description:
|
||||
- The service bus rule ID of the service bus namespace in which you would like to have Event Hubs created for streaming in the Activity Log.
|
||||
- Format like {service_bus_resource_id}/authorizationrules{key_name}.
|
||||
type: str
|
||||
storage_account:
|
||||
description:
|
||||
- The storage account to which send the Activity Log.
|
||||
- It could be a resource ID.
|
||||
- It could be a dict containing I(resource_grorup) and I(name).
|
||||
type: raw
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the log profile.
|
||||
- Use C(present) to create or update a log profile and C(absent) to delete it.
|
||||
default: present
|
||||
type: str
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
- azure_tags
|
||||
|
||||
author:
|
||||
- Yunge Zhu(@yungezz)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a log profile
|
||||
azure_rm_monitorlogprofile:
|
||||
name: myProfile
|
||||
location: eastus
|
||||
locations:
|
||||
- eastus
|
||||
- westus
|
||||
categories:
|
||||
- Write
|
||||
- Action
|
||||
retention_policy:
|
||||
enabled: False
|
||||
days: 1
|
||||
storage_account:
|
||||
resource_group: myResourceGroup
|
||||
name: myStorageAccount
|
||||
register: output
|
||||
|
||||
- name: Delete a log profile
|
||||
azure_rm_monitorlogprofile:
|
||||
name: myProfile
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- ID of the log profile.
|
||||
returned: always
|
||||
type: str
|
||||
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/providers/microsoft.insights/logprofiles/myProfile
|
||||
|
||||
'''
|
||||
|
||||
import time
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrestazure.azure_operation import AzureOperationPoller
|
||||
from msrestazure.tools import is_valid_resource_id
|
||||
from msrest.serialization import Model
|
||||
from azure.mgmt.monitor.models import (RetentionPolicy, LogProfileResource, ErrorResponseException)
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
retention_policy_spec = dict(
|
||||
enabled=dict(type='bool'),
|
||||
days=dict(type='int')
|
||||
)
|
||||
|
||||
|
||||
def logprofile_to_dict(profile):
|
||||
return dict(
|
||||
id=profile.id,
|
||||
name=profile.name,
|
||||
location=profile.location,
|
||||
locations=profile.locations,
|
||||
categories=profile.categories,
|
||||
storage_account=profile.storage_account_id,
|
||||
service_bus_rule_id=profile.service_bus_rule_id,
|
||||
retention_policy=dict(
|
||||
enabled=profile.retention_policy.enabled,
|
||||
days=profile.retention_policy.days
|
||||
),
|
||||
tags=profile.tags if profile.tags else None
|
||||
)
|
||||
|
||||
|
||||
class Actions:
|
||||
NoAction, CreateOrUpdate, Delete = range(3)
|
||||
|
||||
|
||||
class AzureRMMonitorLogprofile(AzureRMModuleBase):
|
||||
"""Configuration class for an Azure RM Monitor log profile"""
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
location=dict(
|
||||
type='str'
|
||||
),
|
||||
locations=dict(
|
||||
type='list',
|
||||
elements='str'
|
||||
),
|
||||
categories=dict(
|
||||
type='list',
|
||||
elements='str'
|
||||
),
|
||||
retention_policy=dict(
|
||||
type='dict',
|
||||
options=retention_policy_spec
|
||||
),
|
||||
service_bus_rule_id=dict(
|
||||
type='str'
|
||||
),
|
||||
storage_account=dict(
|
||||
type='raw'
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
)
|
||||
)
|
||||
|
||||
self._client = None
|
||||
|
||||
self.name = None
|
||||
self.location = None
|
||||
|
||||
self.locations = None
|
||||
self.categories = None
|
||||
self.retention_policy = False
|
||||
self.service_bus_rule_id = None
|
||||
self.storage_account = None
|
||||
|
||||
self.tags = None
|
||||
|
||||
self.results = dict(
|
||||
changed=False,
|
||||
id=None
|
||||
)
|
||||
self.state = None
|
||||
|
||||
super(AzureRMMonitorLogprofile, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=True)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
"""Main module execution method"""
|
||||
|
||||
for key in list(self.module_arg_spec.keys()) + ['tags']:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
old_response = None
|
||||
response = None
|
||||
to_be_updated = False
|
||||
|
||||
# get storage account id
|
||||
if self.storage_account:
|
||||
if isinstance(self.storage_account, dict):
|
||||
self.storage_account = format_resource_id(val=self.storage_account['name'],
|
||||
subscription_id=self.storage_account.get('subscription') or self.subscription_id,
|
||||
namespace='Microsoft.Storage',
|
||||
types='storageAccounts',
|
||||
resource_group=self.storage_account.get('resource_group'))
|
||||
elif not is_valid_resource_id(self.storage_account):
|
||||
self.fail("storage_account either be a resource id or a dict containing resource_group and name")
|
||||
|
||||
# get existing log profile
|
||||
old_response = self.get_logprofile()
|
||||
|
||||
if old_response:
|
||||
self.results['id'] = old_response['id']
|
||||
|
||||
if self.state == 'present':
|
||||
# if profile not exists, create new
|
||||
if not old_response:
|
||||
self.log("Log profile instance doesn't exist")
|
||||
|
||||
to_be_updated = True
|
||||
self.to_do = Actions.CreateOrUpdate
|
||||
|
||||
else:
|
||||
# log profile exists already, do update
|
||||
self.log("Log profile instance already exists")
|
||||
|
||||
update_tags, self.tags = self.update_tags(old_response.get('tags', None))
|
||||
|
||||
if update_tags:
|
||||
to_be_updated = True
|
||||
self.to_do = Actions.CreateOrUpdate
|
||||
|
||||
# check if update
|
||||
if self.check_update(old_response):
|
||||
to_be_updated = True
|
||||
self.to_do = Actions.CreateOrUpdate
|
||||
|
||||
elif self.state == 'absent':
|
||||
if old_response:
|
||||
self.log("Delete log profile instance")
|
||||
self.results['id'] = old_response['id']
|
||||
to_be_updated = True
|
||||
self.to_do = Actions.Delete
|
||||
else:
|
||||
self.results['changed'] = False
|
||||
self.log("Log profile {0} not exists.".format(self.name))
|
||||
|
||||
if to_be_updated:
|
||||
self.log('Need to Create/Update log profile')
|
||||
self.results['changed'] = True
|
||||
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
if self.to_do == Actions.CreateOrUpdate:
|
||||
response = self.create_or_update_logprofile()
|
||||
self.results['id'] = response['id']
|
||||
|
||||
if self.to_do == Actions.Delete:
|
||||
self.delete_logprofile()
|
||||
self.log('Log profile instance deleted')
|
||||
|
||||
return self.results
|
||||
|
||||
def check_update(self, existing):
|
||||
if self.locations is not None and existing['locations'] != self.locations:
|
||||
self.log("locations diff: origin {0} / update {1}".format(existing['locations'], self.locations))
|
||||
return True
|
||||
if self.retention_policy is not None:
|
||||
if existing['retention_policy']['enabled'] != self.retention_policy['enabled']:
|
||||
self.log("retention_policy diff: origin {0} / update {1}".format(str(existing['sku']['name']), str(self.retention_policy['enabled'])))
|
||||
return True
|
||||
if existing['retention_policy']['days'] != self.retention_policy['days']:
|
||||
self.log("retention_policy diff: origin {0} / update {1}".format(existing['retention_policy']['days'], str(self.retention_policy['days'])))
|
||||
return True
|
||||
if self.storage_account is not None and existing['storage_account'] != self.storage_account:
|
||||
self.log("storage_account diff: origin {0} / update {1}".format(existing['storage_account'], self.storage_account))
|
||||
return True
|
||||
if self.service_bus_rule_id is not None and existing['service_bus_rule_id'] != self.service_bus_rule_id:
|
||||
self.log("service_bus_rule_id diff: origin {0} / update {1}".format(existing['service_bus_rule_id'], self.service_bus_rule_id))
|
||||
return True
|
||||
return False
|
||||
|
||||
def create_or_update_logprofile(self):
|
||||
'''
|
||||
Creates or Update log profile.
|
||||
|
||||
:return: deserialized log profile state dictionary
|
||||
'''
|
||||
self.log(
|
||||
"Creating log profile instance {0}".format(self.name))
|
||||
|
||||
try:
|
||||
params = LogProfileResource(
|
||||
location=self.location,
|
||||
locations=self.locations,
|
||||
categories=self.categories,
|
||||
retention_policy=RetentionPolicy(days=self.retention_policy['days'],
|
||||
enabled=self.retention_policy['enabled']) if self.retention_policy else None,
|
||||
storage_account_id=self.storage_account if self.storage_account else None,
|
||||
service_bus_rule_id=self.service_bus_rule_id if self.service_bus_rule_id else None,
|
||||
tags=self.tags
|
||||
)
|
||||
|
||||
response = self.monitor_client.log_profiles.create_or_update(log_profile_name=self.name,
|
||||
parameters=params)
|
||||
if isinstance(response, AzureOperationPoller):
|
||||
response = self.get_poller_result(response)
|
||||
|
||||
except CloudError as exc:
|
||||
self.log('Error attempting to create/update log profile.')
|
||||
self.fail("Error creating/updating log profile: {0}".format(str(exc)))
|
||||
return logprofile_to_dict(response)
|
||||
|
||||
def delete_logprofile(self):
|
||||
'''
|
||||
Deletes specified log profile.
|
||||
|
||||
:return: True
|
||||
'''
|
||||
self.log("Deleting the log profile instance {0}".format(self.name))
|
||||
try:
|
||||
response = self.monitor_client.log_profiles.delete(log_profile_name=self.name)
|
||||
except CloudError as e:
|
||||
self.log('Error attempting to delete the log profile.')
|
||||
self.fail(
|
||||
"Error deleting the log profile: {0}".format(str(e)))
|
||||
return True
|
||||
|
||||
def get_logprofile(self):
|
||||
'''
|
||||
Gets the properties of the specified log profile.
|
||||
|
||||
:return: log profile state dictionary
|
||||
'''
|
||||
self.log("Checking if the log profile {0} is present".format(self.name))
|
||||
|
||||
response = None
|
||||
|
||||
try:
|
||||
response = self.monitor_client.log_profiles.get(log_profile_name=self.name)
|
||||
|
||||
self.log("Response : {0}".format(response))
|
||||
self.log("log profile : {0} found".format(response.name))
|
||||
return logprofile_to_dict(response)
|
||||
|
||||
except ErrorResponseException as ex:
|
||||
self.log("Didn't find log profile {0}".format(self.name))
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution"""
|
||||
AzureRMMonitorLogprofile()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,240 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_mysqlconfiguration
|
||||
version_added: "2.8"
|
||||
short_description: Manage Configuration instance
|
||||
description:
|
||||
- Create, update and delete instance of Configuration.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group that contains the resource.
|
||||
required: True
|
||||
server_name:
|
||||
description:
|
||||
- The name of the server.
|
||||
required: True
|
||||
name:
|
||||
description:
|
||||
- The name of the server configuration.
|
||||
required: True
|
||||
value:
|
||||
description:
|
||||
- Value of the configuration.
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the MySQL configuration. Use C(present) to update setting, or C(absent) to reset to default value.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Update SQL Server setting
|
||||
azure_rm_mysqlconfiguration:
|
||||
resource_group: myResourceGroup
|
||||
server_name: myServer
|
||||
name: event_scheduler
|
||||
value: "ON"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- Resource ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMySQL/servers/myServer/confi
|
||||
gurations/event_scheduler"
|
||||
'''
|
||||
|
||||
import time
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrest.polling import LROPoller
|
||||
from azure.mgmt.rdbms.mysql import MySQLManagementClient
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class Actions:
|
||||
NoAction, Create, Update, Delete = range(4)
|
||||
|
||||
|
||||
class AzureRMMySqlConfiguration(AzureRMModuleBase):
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
server_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
value=dict(
|
||||
type='str'
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
)
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.server_name = None
|
||||
self.name = None
|
||||
self.value = None
|
||||
|
||||
self.results = dict(changed=False)
|
||||
self.state = None
|
||||
self.to_do = Actions.NoAction
|
||||
|
||||
super(AzureRMMySqlConfiguration, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
|
||||
for key in list(self.module_arg_spec.keys()):
|
||||
if hasattr(self, key):
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
old_response = None
|
||||
response = None
|
||||
|
||||
old_response = self.get_configuration()
|
||||
|
||||
if not old_response:
|
||||
self.log("Configuration instance doesn't exist")
|
||||
if self.state == 'absent':
|
||||
self.log("Old instance didn't exist")
|
||||
else:
|
||||
self.to_do = Actions.Create
|
||||
else:
|
||||
self.log("Configuration instance already exists")
|
||||
if self.state == 'absent' and old_response['source'] == 'user-override':
|
||||
self.to_do = Actions.Delete
|
||||
elif self.state == 'present':
|
||||
self.log("Need to check if Configuration instance has to be deleted or may be updated")
|
||||
if self.value != old_response.get('value'):
|
||||
self.to_do = Actions.Update
|
||||
|
||||
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
|
||||
self.log("Need to Create / Update the Configuration instance")
|
||||
|
||||
if self.check_mode:
|
||||
self.results['changed'] = True
|
||||
return self.results
|
||||
|
||||
response = self.create_update_configuration()
|
||||
|
||||
self.results['changed'] = True
|
||||
self.log("Creation / Update done")
|
||||
elif self.to_do == Actions.Delete:
|
||||
self.log("Configuration instance deleted")
|
||||
self.results['changed'] = True
|
||||
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
self.delete_configuration()
|
||||
else:
|
||||
self.log("Configuration instance unchanged")
|
||||
self.results['changed'] = False
|
||||
response = old_response
|
||||
|
||||
if response:
|
||||
self.results["id"] = response["id"]
|
||||
|
||||
return self.results
|
||||
|
||||
def create_update_configuration(self):
|
||||
self.log("Creating / Updating the Configuration instance {0}".format(self.name))
|
||||
|
||||
try:
|
||||
response = self.mysql_client.configurations.create_or_update(resource_group_name=self.resource_group,
|
||||
server_name=self.server_name,
|
||||
configuration_name=self.name,
|
||||
value=self.value,
|
||||
source='user-override')
|
||||
if isinstance(response, LROPoller):
|
||||
response = self.get_poller_result(response)
|
||||
|
||||
except CloudError as exc:
|
||||
self.log('Error attempting to create the Configuration instance.')
|
||||
self.fail("Error creating the Configuration instance: {0}".format(str(exc)))
|
||||
return response.as_dict()
|
||||
|
||||
def delete_configuration(self):
|
||||
self.log("Deleting the Configuration instance {0}".format(self.name))
|
||||
try:
|
||||
response = self.mysql_client.configurations.create_or_update(resource_group_name=self.resource_group,
|
||||
server_name=self.server_name,
|
||||
configuration_name=self.name,
|
||||
source='system-default')
|
||||
except CloudError as e:
|
||||
self.log('Error attempting to delete the Configuration instance.')
|
||||
self.fail("Error deleting the Configuration instance: {0}".format(str(e)))
|
||||
|
||||
return True
|
||||
|
||||
def get_configuration(self):
|
||||
self.log("Checking if the Configuration instance {0} is present".format(self.name))
|
||||
found = False
|
||||
try:
|
||||
response = self.mysql_client.configurations.get(resource_group_name=self.resource_group,
|
||||
server_name=self.server_name,
|
||||
configuration_name=self.name)
|
||||
found = True
|
||||
self.log("Response : {0}".format(response))
|
||||
self.log("Configuration instance : {0} found".format(response.name))
|
||||
except CloudError as e:
|
||||
self.log('Did not find the Configuration instance.')
|
||||
if found is True:
|
||||
return response.as_dict()
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution"""
|
||||
AzureRMMySqlConfiguration()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,214 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_mysqlconfiguration_info
|
||||
version_added: "2.9"
|
||||
short_description: Get Azure MySQL Configuration facts
|
||||
description:
|
||||
- Get facts of Azure MySQL Configuration.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
|
||||
required: True
|
||||
type: str
|
||||
server_name:
|
||||
description:
|
||||
- The name of the server.
|
||||
required: True
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- Setting name.
|
||||
type: str
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get specific setting of MySQL Server
|
||||
azure_rm_mysqlconfiguration_info:
|
||||
resource_group: myResourceGroup
|
||||
server_name: testmysqlserver
|
||||
name: deadlock_timeout
|
||||
|
||||
- name: Get all settings of MySQL Server
|
||||
azure_rm_mysqlconfiguration_info:
|
||||
resource_group: myResourceGroup
|
||||
server_name: server_name
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
settings:
|
||||
description:
|
||||
- A list of dictionaries containing MySQL Server settings.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- Setting resource ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMySQL/servers/testmysqlser
|
||||
ver/configurations/deadlock_timeout"
|
||||
name:
|
||||
description:
|
||||
- Setting name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: deadlock_timeout
|
||||
value:
|
||||
description:
|
||||
- Setting value.
|
||||
returned: always
|
||||
type: raw
|
||||
sample: 1000
|
||||
description:
|
||||
description:
|
||||
- Description of the configuration.
|
||||
returned: always
|
||||
type: str
|
||||
sample: Deadlock timeout.
|
||||
source:
|
||||
description:
|
||||
- Source of the configuration.
|
||||
returned: always
|
||||
type: str
|
||||
sample: system-default
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrestazure.azure_operation import AzureOperationPoller
|
||||
from azure.mgmt.rdbms.mysql import MySQLManagementClient
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMMySqlConfigurationInfo(AzureRMModuleBase):
|
||||
def __init__(self):
|
||||
# define user inputs into argument
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
server_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str'
|
||||
)
|
||||
)
|
||||
# store the results of the module operation
|
||||
self.results = dict(changed=False)
|
||||
self.mgmt_client = None
|
||||
self.resource_group = None
|
||||
self.server_name = None
|
||||
self.name = None
|
||||
super(AzureRMMySqlConfigurationInfo, self).__init__(self.module_arg_spec, supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
is_old_facts = self.module._name == 'azure_rm_mysqlconfiguration_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_mysqlconfiguration_facts' module has been renamed to 'azure_rm_mysqlconfiguration_info'", version='2.13')
|
||||
|
||||
for key in self.module_arg_spec:
|
||||
setattr(self, key, kwargs[key])
|
||||
self.mgmt_client = self.get_mgmt_svc_client(MySQLManagementClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
|
||||
if self.name is not None:
|
||||
self.results['settings'] = self.get()
|
||||
else:
|
||||
self.results['settings'] = self.list_by_server()
|
||||
return self.results
|
||||
|
||||
def get(self):
|
||||
'''
|
||||
Gets facts of the specified MySQL Configuration.
|
||||
|
||||
:return: deserialized MySQL Configurationinstance state dictionary
|
||||
'''
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mgmt_client.configurations.get(resource_group_name=self.resource_group,
|
||||
server_name=self.server_name,
|
||||
configuration_name=self.name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get facts for Configurations.')
|
||||
|
||||
if response is not None:
|
||||
results.append(self.format_item(response))
|
||||
|
||||
return results
|
||||
|
||||
def list_by_server(self):
|
||||
'''
|
||||
Gets facts of the specified MySQL Configuration.
|
||||
|
||||
:return: deserialized MySQL Configurationinstance state dictionary
|
||||
'''
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mgmt_client.configurations.list_by_server(resource_group_name=self.resource_group,
|
||||
server_name=self.server_name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get facts for Configurations.')
|
||||
|
||||
if response is not None:
|
||||
for item in response:
|
||||
results.append(self.format_item(item))
|
||||
|
||||
return results
|
||||
|
||||
def format_item(self, item):
|
||||
d = item.as_dict()
|
||||
d = {
|
||||
'resource_group': self.resource_group,
|
||||
'server_name': self.server_name,
|
||||
'id': d['id'],
|
||||
'name': d['name'],
|
||||
'value': d['value'],
|
||||
'description': d['description'],
|
||||
'source': d['source']
|
||||
}
|
||||
return d
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMMySqlConfigurationInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,302 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_mysqldatabase
|
||||
version_added: "2.5"
|
||||
short_description: Manage MySQL Database instance
|
||||
description:
|
||||
- Create, update and delete instance of MySQL Database.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
|
||||
required: True
|
||||
server_name:
|
||||
description:
|
||||
- The name of the server.
|
||||
required: True
|
||||
name:
|
||||
description:
|
||||
- The name of the database.
|
||||
required: True
|
||||
charset:
|
||||
description:
|
||||
- The charset of the database. Check MySQL documentation for possible values.
|
||||
- This is only set on creation, use I(force_update) to recreate a database if the values don't match.
|
||||
collation:
|
||||
description:
|
||||
- The collation of the database. Check MySQL documentation for possible values.
|
||||
- This is only set on creation, use I(force_update) to recreate a database if the values don't match.
|
||||
force_update:
|
||||
description:
|
||||
- When set to C(true), will delete and recreate the existing MySQL database if any of the properties don't match what is set.
|
||||
- When set to C(false), no change will occur to the database even if any of the properties do not match.
|
||||
type: bool
|
||||
default: 'no'
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the MySQL Database. Use C(present) to create or update a database and C(absent) to delete it.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create (or update) MySQL Database
|
||||
azure_rm_mysqldatabase:
|
||||
resource_group: myResourceGroup
|
||||
server_name: testserver
|
||||
name: db1
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- Resource ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMySQL/servers/testserver/databases/db1
|
||||
name:
|
||||
description:
|
||||
- Resource name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: db1
|
||||
'''
|
||||
|
||||
import time
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from azure.mgmt.rdbms.mysql import MySQLManagementClient
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrest.polling import LROPoller
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class Actions:
|
||||
NoAction, Create, Update, Delete = range(4)
|
||||
|
||||
|
||||
class AzureRMMySqlDatabase(AzureRMModuleBase):
|
||||
"""Configuration class for an Azure RM MySQL Database resource"""
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
server_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
charset=dict(
|
||||
type='str'
|
||||
),
|
||||
collation=dict(
|
||||
type='str'
|
||||
),
|
||||
force_update=dict(
|
||||
type='bool',
|
||||
default=False
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
)
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.server_name = None
|
||||
self.name = None
|
||||
self.force_update = None
|
||||
self.parameters = dict()
|
||||
|
||||
self.results = dict(changed=False)
|
||||
self.mgmt_client = None
|
||||
self.state = None
|
||||
self.to_do = Actions.NoAction
|
||||
|
||||
super(AzureRMMySqlDatabase, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
"""Main module execution method"""
|
||||
|
||||
for key in list(self.module_arg_spec.keys()):
|
||||
if hasattr(self, key):
|
||||
setattr(self, key, kwargs[key])
|
||||
elif kwargs[key] is not None:
|
||||
if key == "charset":
|
||||
self.parameters["charset"] = kwargs[key]
|
||||
elif key == "collation":
|
||||
self.parameters["collation"] = kwargs[key]
|
||||
|
||||
old_response = None
|
||||
response = None
|
||||
|
||||
self.mgmt_client = self.get_mgmt_svc_client(MySQLManagementClient,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager)
|
||||
|
||||
resource_group = self.get_resource_group(self.resource_group)
|
||||
|
||||
old_response = self.get_mysqldatabase()
|
||||
|
||||
if not old_response:
|
||||
self.log("MySQL Database instance doesn't exist")
|
||||
if self.state == 'absent':
|
||||
self.log("Old instance didn't exist")
|
||||
else:
|
||||
self.to_do = Actions.Create
|
||||
else:
|
||||
self.log("MySQL Database instance already exists")
|
||||
if self.state == 'absent':
|
||||
self.to_do = Actions.Delete
|
||||
elif self.state == 'present':
|
||||
self.log("Need to check if MySQL Database instance has to be deleted or may be updated")
|
||||
if ('collation' in self.parameters) and (self.parameters['collation'] != old_response['collation']):
|
||||
self.to_do = Actions.Update
|
||||
if ('charset' in self.parameters) and (self.parameters['charset'] != old_response['charset']):
|
||||
self.to_do = Actions.Update
|
||||
if self.to_do == Actions.Update:
|
||||
if self.force_update:
|
||||
if not self.check_mode:
|
||||
self.delete_mysqldatabase()
|
||||
else:
|
||||
self.fail("Database properties cannot be updated without setting 'force_update' option")
|
||||
self.to_do = Actions.NoAction
|
||||
|
||||
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
|
||||
self.log("Need to Create / Update the MySQL Database instance")
|
||||
|
||||
if self.check_mode:
|
||||
self.results['changed'] = True
|
||||
return self.results
|
||||
|
||||
response = self.create_update_mysqldatabase()
|
||||
self.results['changed'] = True
|
||||
self.log("Creation / Update done")
|
||||
elif self.to_do == Actions.Delete:
|
||||
self.log("MySQL Database instance deleted")
|
||||
self.results['changed'] = True
|
||||
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
self.delete_mysqldatabase()
|
||||
# make sure instance is actually deleted, for some Azure resources, instance is hanging around
|
||||
# for some time after deletion -- this should be really fixed in Azure
|
||||
while self.get_mysqldatabase():
|
||||
time.sleep(20)
|
||||
else:
|
||||
self.log("MySQL Database instance unchanged")
|
||||
self.results['changed'] = False
|
||||
response = old_response
|
||||
|
||||
if response:
|
||||
self.results["id"] = response["id"]
|
||||
self.results["name"] = response["name"]
|
||||
|
||||
return self.results
|
||||
|
||||
def create_update_mysqldatabase(self):
|
||||
'''
|
||||
Creates or updates MySQL Database with the specified configuration.
|
||||
|
||||
:return: deserialized MySQL Database instance state dictionary
|
||||
'''
|
||||
self.log("Creating / Updating the MySQL Database instance {0}".format(self.name))
|
||||
|
||||
try:
|
||||
response = self.mgmt_client.databases.create_or_update(resource_group_name=self.resource_group,
|
||||
server_name=self.server_name,
|
||||
database_name=self.name,
|
||||
parameters=self.parameters)
|
||||
if isinstance(response, LROPoller):
|
||||
response = self.get_poller_result(response)
|
||||
|
||||
except CloudError as exc:
|
||||
self.log('Error attempting to create the MySQL Database instance.')
|
||||
self.fail("Error creating the MySQL Database instance: {0}".format(str(exc)))
|
||||
return response.as_dict()
|
||||
|
||||
def delete_mysqldatabase(self):
|
||||
'''
|
||||
Deletes specified MySQL Database instance in the specified subscription and resource group.
|
||||
|
||||
:return: True
|
||||
'''
|
||||
self.log("Deleting the MySQL Database instance {0}".format(self.name))
|
||||
try:
|
||||
response = self.mgmt_client.databases.delete(resource_group_name=self.resource_group,
|
||||
server_name=self.server_name,
|
||||
database_name=self.name)
|
||||
except CloudError as e:
|
||||
self.log('Error attempting to delete the MySQL Database instance.')
|
||||
self.fail("Error deleting the MySQL Database instance: {0}".format(str(e)))
|
||||
|
||||
return True
|
||||
|
||||
def get_mysqldatabase(self):
|
||||
'''
|
||||
Gets the properties of the specified MySQL Database.
|
||||
|
||||
:return: deserialized MySQL Database instance state dictionary
|
||||
'''
|
||||
self.log("Checking if the MySQL Database instance {0} is present".format(self.name))
|
||||
found = False
|
||||
try:
|
||||
response = self.mgmt_client.databases.get(resource_group_name=self.resource_group,
|
||||
server_name=self.server_name,
|
||||
database_name=self.name)
|
||||
found = True
|
||||
self.log("Response : {0}".format(response))
|
||||
self.log("MySQL Database instance : {0} found".format(response.name))
|
||||
except CloudError as e:
|
||||
self.log('Did not find the MySQL Database instance.')
|
||||
if found is True:
|
||||
return response.as_dict()
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution"""
|
||||
AzureRMMySqlDatabase()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,209 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_mysqldatabase_info
|
||||
version_added: "2.9"
|
||||
short_description: Get Azure MySQL Database facts
|
||||
description:
|
||||
- Get facts of MySQL Database.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
|
||||
required: True
|
||||
type: str
|
||||
server_name:
|
||||
description:
|
||||
- The name of the server.
|
||||
required: True
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- The name of the database.
|
||||
type: str
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get instance of MySQL Database
|
||||
azure_rm_mysqldatabase_info:
|
||||
resource_group: myResourceGroup
|
||||
server_name: server_name
|
||||
name: database_name
|
||||
|
||||
- name: List instances of MySQL Database
|
||||
azure_rm_mysqldatabase_info:
|
||||
resource_group: myResourceGroup
|
||||
server_name: server_name
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
databases:
|
||||
description:
|
||||
- A list of dictionaries containing facts for MySQL Databases.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description:
|
||||
- Resource ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMySQL/servers/testser
|
||||
ver/databases/db1"
|
||||
resource_group:
|
||||
description:
|
||||
- Resource group name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: testrg
|
||||
server_name:
|
||||
description:
|
||||
- Server name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: testserver
|
||||
name:
|
||||
description:
|
||||
- Resource name.
|
||||
returned: always
|
||||
type: str
|
||||
sample: db1
|
||||
charset:
|
||||
description:
|
||||
- The charset of the database.
|
||||
returned: always
|
||||
type: str
|
||||
sample: utf8
|
||||
collation:
|
||||
description:
|
||||
- The collation of the database.
|
||||
returned: always
|
||||
type: str
|
||||
sample: English_United States.1252
|
||||
'''
|
||||
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from azure.mgmt.rdbms.mysql import MySQLManagementClient
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class AzureRMMySqlDatabaseInfo(AzureRMModuleBase):
|
||||
def __init__(self):
|
||||
# define user inputs into argument
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
server_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str'
|
||||
)
|
||||
)
|
||||
# store the results of the module operation
|
||||
self.results = dict(
|
||||
changed=False
|
||||
)
|
||||
self.resource_group = None
|
||||
self.server_name = None
|
||||
self.name = None
|
||||
super(AzureRMMySqlDatabaseInfo, self).__init__(self.module_arg_spec, supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
is_old_facts = self.module._name == 'azure_rm_mysqldatabase_facts'
|
||||
if is_old_facts:
|
||||
self.module.deprecate("The 'azure_rm_mysqldatabase_facts' module has been renamed to 'azure_rm_mysqldatabase_info'", version='2.13')
|
||||
|
||||
for key in self.module_arg_spec:
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
if (self.resource_group is not None and
|
||||
self.server_name is not None and
|
||||
self.name is not None):
|
||||
self.results['databases'] = self.get()
|
||||
elif (self.resource_group is not None and
|
||||
self.server_name is not None):
|
||||
self.results['databases'] = self.list_by_server()
|
||||
return self.results
|
||||
|
||||
def get(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mysql_client.databases.get(resource_group_name=self.resource_group,
|
||||
server_name=self.server_name,
|
||||
database_name=self.name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.log('Could not get facts for Databases.')
|
||||
|
||||
if response is not None:
|
||||
results.append(self.format_item(response))
|
||||
|
||||
return results
|
||||
|
||||
def list_by_server(self):
|
||||
response = None
|
||||
results = []
|
||||
try:
|
||||
response = self.mysql_client.databases.list_by_server(resource_group_name=self.resource_group,
|
||||
server_name=self.server_name)
|
||||
self.log("Response : {0}".format(response))
|
||||
except CloudError as e:
|
||||
self.fail("Error listing for server {0} - {1}".format(self.server_name, str(e)))
|
||||
|
||||
if response is not None:
|
||||
for item in response:
|
||||
results.append(self.format_item(item))
|
||||
|
||||
return results
|
||||
|
||||
def format_item(self, item):
|
||||
d = item.as_dict()
|
||||
d = {
|
||||
'resource_group': self.resource_group,
|
||||
'server_name': self.server_name,
|
||||
'name': d['name'],
|
||||
'charset': d['charset'],
|
||||
'collation': d['collation']
|
||||
}
|
||||
return d
|
||||
|
||||
|
||||
def main():
|
||||
AzureRMMySqlDatabaseInfo()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -1,277 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: azure_rm_mysqlfirewallrule
|
||||
version_added: "2.8"
|
||||
short_description: Manage MySQL firewall rule instance
|
||||
description:
|
||||
- Create, update and delete instance of MySQL firewall rule.
|
||||
|
||||
options:
|
||||
resource_group:
|
||||
description:
|
||||
- The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
|
||||
required: True
|
||||
server_name:
|
||||
description:
|
||||
- The name of the server.
|
||||
required: True
|
||||
name:
|
||||
description:
|
||||
- The name of the MySQL firewall rule.
|
||||
required: True
|
||||
start_ip_address:
|
||||
description:
|
||||
- The start IP address of the MySQL firewall rule. Must be IPv4 format.
|
||||
required: True
|
||||
end_ip_address:
|
||||
description:
|
||||
- The end IP address of the MySQL firewall rule. Must be IPv4 format.
|
||||
required: True
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the MySQL firewall rule. Use C(present) to create or update a rule and C(absent) to ensure it is not present.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
extends_documentation_fragment:
|
||||
- azure
|
||||
|
||||
author:
|
||||
- Zim Kalinowski (@zikalino)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create (or update) MySQL firewall rule
|
||||
azure_rm_mysqlfirewallrule:
|
||||
resource_group: myResourceGroup
|
||||
server_name: testserver
|
||||
name: rule1
|
||||
start_ip_address: 10.0.0.17
|
||||
end_ip_address: 10.0.0.20
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description:
|
||||
- Resource ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMySQL/servers/testserver/fire
|
||||
wallRules/rule1"
|
||||
'''
|
||||
|
||||
import time
|
||||
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrest.polling import LROPoller
|
||||
from azure.mgmt.rdbms.mysql import MySQLManagementClient
|
||||
from msrest.serialization import Model
|
||||
except ImportError:
|
||||
# This is handled in azure_rm_common
|
||||
pass
|
||||
|
||||
|
||||
class Actions:
|
||||
NoAction, Create, Update, Delete = range(4)
|
||||
|
||||
|
||||
class AzureRMMySqlFirewallRule(AzureRMModuleBase):
|
||||
"""Configuration class for an Azure RM MySQL firewall rule resource"""
|
||||
|
||||
def __init__(self):
|
||||
self.module_arg_spec = dict(
|
||||
resource_group=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
server_name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
start_ip_address=dict(
|
||||
type='str'
|
||||
),
|
||||
end_ip_address=dict(
|
||||
type='str'
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
)
|
||||
)
|
||||
|
||||
self.resource_group = None
|
||||
self.server_name = None
|
||||
self.name = None
|
||||
self.start_ip_address = None
|
||||
self.end_ip_address = None
|
||||
|
||||
self.results = dict(changed=False)
|
||||
self.state = None
|
||||
self.to_do = Actions.NoAction
|
||||
|
||||
super(AzureRMMySqlFirewallRule, self).__init__(derived_arg_spec=self.module_arg_spec,
|
||||
supports_check_mode=True,
|
||||
supports_tags=False)
|
||||
|
||||
def exec_module(self, **kwargs):
|
||||
"""Main module execution method"""
|
||||
|
||||
for key in list(self.module_arg_spec.keys()):
|
||||
if hasattr(self, key):
|
||||
setattr(self, key, kwargs[key])
|
||||
|
||||
old_response = None
|
||||
response = None
|
||||
|
||||
resource_group = self.get_resource_group(self.resource_group)
|
||||
|
||||
old_response = self.get_firewallrule()
|
||||
|
||||
if not old_response:
|
||||
self.log("MySQL firewall rule instance doesn't exist")
|
||||
if self.state == 'absent':
|
||||
self.log("Old instance didn't exist")
|
||||
else:
|
||||
self.to_do = Actions.Create
|
||||
else:
|
||||
self.log("MySQL firewall rule instance already exists")
|
||||
if self.state == 'absent':
|
||||
self.to_do = Actions.Delete
|
||||
elif self.state == 'present':
|
||||
self.log("Need to check if MySQL firewall rule instance has to be deleted or may be updated")
|
||||
if (self.start_ip_address is not None) and (self.start_ip_address != old_response['start_ip_address']):
|
||||
self.to_do = Actions.Update
|
||||
if (self.end_ip_address is not None) and (self.end_ip_address != old_response['end_ip_address']):
|
||||
self.to_do = Actions.Update
|
||||
|
||||
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
|
||||
self.log("Need to Create / Update the MySQL firewall rule instance")
|
||||
|
||||
if self.check_mode:
|
||||
self.results['changed'] = True
|
||||
return self.results
|
||||
|
||||
response = self.create_update_firewallrule()
|
||||
|
||||
if not old_response:
|
||||
self.results['changed'] = True
|
||||
else:
|
||||
self.results['changed'] = old_response.__ne__(response)
|
||||
self.log("Creation / Update done")
|
||||
elif self.to_do == Actions.Delete:
|
||||
self.log("MySQL firewall rule instance deleted")
|
||||
self.results['changed'] = True
|
||||
|
||||
if self.check_mode:
|
||||
return self.results
|
||||
|
||||
self.delete_firewallrule()
|
||||
# make sure instance is actually deleted, for some Azure resources, instance is hanging around
|
||||
# for some time after deletion -- this should be really fixed in Azure
|
||||
while self.get_firewallrule():
|
||||
time.sleep(20)
|
||||
else:
|
||||
self.log("MySQL firewall rule instance unchanged")
|
||||
self.results['changed'] = False
|
||||
response = old_response
|
||||
|
||||
if response:
|
||||
self.results["id"] = response["id"]
|
||||
|
||||
return self.results
|
||||
|
||||
def create_update_firewallrule(self):
|
||||
'''
|
||||
Creates or updates MySQL firewall rule with the specified configuration.
|
||||
|
||||
:return: deserialized MySQL firewall rule instance state dictionary
|
||||
'''
|
||||
self.log("Creating / Updating the MySQL firewall rule instance {0}".format(self.name))
|
||||
|
||||
try:
|
||||
response = self.mysql_client.firewall_rules.create_or_update(resource_group_name=self.resource_group,
|
||||
server_name=self.server_name,
|
||||
firewall_rule_name=self.name,
|
||||
start_ip_address=self.start_ip_address,
|
||||
end_ip_address=self.end_ip_address)
|
||||
if isinstance(response, LROPoller):
|
||||
response = self.get_poller_result(response)
|
||||
|
||||
except CloudError as exc:
|
||||
self.log('Error attempting to create the MySQL firewall rule instance.')
|
||||
self.fail("Error creating the MySQL firewall rule instance: {0}".format(str(exc)))
|
||||
return response.as_dict()
|
||||
|
||||
def delete_firewallrule(self):
|
||||
'''
|
||||
Deletes specified MySQL firewall rule instance in the specified subscription and resource group.
|
||||
|
||||
:return: True
|
||||
'''
|
||||
self.log("Deleting the MySQL firewall rule instance {0}".format(self.name))
|
||||
try:
|
||||
response = self.mysql_client.firewall_rules.delete(resource_group_name=self.resource_group,
|
||||
server_name=self.server_name,
|
||||
firewall_rule_name=self.name)
|
||||
except CloudError as e:
|
||||
self.log('Error attempting to delete the MySQL firewall rule instance.')
|
||||
self.fail("Error deleting the MySQL firewall rule instance: {0}".format(str(e)))
|
||||
|
||||
return True
|
||||
|
||||
def get_firewallrule(self):
|
||||
'''
|
||||
Gets the properties of the specified MySQL firewall rule.
|
||||
|
||||
:return: deserialized MySQL firewall rule instance state dictionary
|
||||
'''
|
||||
self.log("Checking if the MySQL firewall rule instance {0} is present".format(self.name))
|
||||
found = False
|
||||
try:
|
||||
response = self.mysql_client.firewall_rules.get(resource_group_name=self.resource_group,
|
||||
server_name=self.server_name,
|
||||
firewall_rule_name=self.name)
|
||||
found = True
|
||||
self.log("Response : {0}".format(response))
|
||||
self.log("MySQL firewall rule instance : {0} found".format(response.name))
|
||||
except CloudError as e:
|
||||
self.log('Did not find the MySQL firewall rule instance.')
|
||||
if found is True:
|
||||
return response.as_dict()
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution"""
|
||||
AzureRMMySqlFirewallRule()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue