Migrated to community.general

pull/67091/head
Ansible Core Team 6 years ago committed by Matt Martz
parent 58e8a91f4d
commit 4e488d8435

@ -1,48 +0,0 @@
# Ansible external inventory script settings for Abiquo
#
# Define an Abiquo user with access to Abiquo API which will be used to
# perform required queries to obtain information to generate the Ansible
# inventory output.
#
[auth]
apiuser = admin
apipass = xabiquo
# Specify Abiquo API version in major.minor format and the access URI to
# API endpoint. Tested versions are: 2.6 , 3.0 and 3.1
# To confirm that your box haves access to Abiquo API you can perform a
# curl command, replacing with suitable values, similar to this:
# curl -X GET https://192.168.2.100/api/login -u admin:xabiquo
#
[api]
version = 3.0
uri = https://192.168.2.100/api
# You probably won't need to modify login preferences, but just in case
login_path = /login
login_type = application/vnd.abiquo.user+json
# To avoid performing excessive calls to Abiquo API you can define a
# cache for the plugin output. Within the time defined in seconds, latest
# output will be reused. After that time, the cache will be refreshed.
#
[cache]
cache_max_age = 30
cache_dir = /tmp
[defaults]
# Depending in your Abiquo environment, you may want to use only public IP
# addresses (if using public cloud providers) or also private IP addresses.
# You can set this with public_ip_only configuration.
public_ip_only = false
# default_net_interface only is used if public_ip_only = false
# If public_ip_only is set to false, you can choose default nic to obtain
# IP address to define the host.
default_net_interface = nic0
# Only deployed VM are displayed in the plugin output.
deployed_only = true
# Define if VM metadata is obtained from Abiquo API.
get_metadata = false

@ -1,232 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
External inventory script for Abiquo
====================================
Shamelessly copied from an existing inventory script.
This script generates an inventory that Ansible can understand by making API requests to Abiquo API
Requires some python libraries, ensure to have them installed when using this script.
This script has been tested in Abiquo 3.0 but it may work also for Abiquo 2.6.
Before using this script you may want to modify abiquo.ini config file.
This script generates an Ansible hosts file with these host groups:
ABQ_xxx: Defines a hosts itself by Abiquo VM name label
all: Contains all hosts defined in Abiquo user's enterprise
virtualdatecenter: Creates a host group for each virtualdatacenter containing all hosts defined on it
virtualappliance: Creates a host group for each virtualappliance containing all hosts defined on it
imagetemplate: Creates a host group for each image template containing all hosts using it
'''
# (c) 2014, Daniel Beneyto <daniel.beneyto@abiquo.com>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import time
import json
from ansible.module_utils.six.moves import configparser as ConfigParser
from ansible.module_utils.urls import open_url
def api_get(link, config):
try:
if link is None:
url = config.get('api', 'uri') + config.get('api', 'login_path')
headers = {"Accept": config.get('api', 'login_type')}
else:
url = link['href'] + '?limit=0'
headers = {"Accept": link['type']}
result = open_url(url, headers=headers, url_username=config.get('auth', 'apiuser').replace('\n', ''),
url_password=config.get('auth', 'apipass').replace('\n', ''))
return json.loads(result.read())
except Exception:
return None
def save_cache(data, config):
''' saves item to cache '''
dpath = config.get('cache', 'cache_dir')
try:
cache = open('/'.join([dpath, 'inventory']), 'w')
cache.write(json.dumps(data))
cache.close()
except IOError as e:
pass # not really sure what to do here
def get_cache(cache_item, config):
''' returns cached item '''
dpath = config.get('cache', 'cache_dir')
inv = {}
try:
cache = open('/'.join([dpath, 'inventory']), 'r')
inv = cache.read()
cache.close()
except IOError as e:
pass # not really sure what to do here
return inv
def cache_available(config):
''' checks if we have a 'fresh' cache available for item requested '''
if config.has_option('cache', 'cache_dir'):
dpath = config.get('cache', 'cache_dir')
try:
existing = os.stat('/'.join([dpath, 'inventory']))
except Exception:
# cache doesn't exist or isn't accessible
return False
if config.has_option('cache', 'cache_max_age'):
maxage = config.get('cache', 'cache_max_age')
if (int(time.time()) - int(existing.st_mtime)) <= int(maxage):
return True
return False
def generate_inv_from_api(enterprise_entity, config):
try:
inventory['all'] = {}
inventory['all']['children'] = []
inventory['all']['hosts'] = []
inventory['_meta'] = {}
inventory['_meta']['hostvars'] = {}
enterprise = api_get(enterprise_entity, config)
vms_entity = next(link for link in enterprise['links'] if link['rel'] == 'virtualmachines')
vms = api_get(vms_entity, config)
for vmcollection in vms['collection']:
for link in vmcollection['links']:
if link['rel'] == 'virtualappliance':
vm_vapp = link['title'].replace('[', '').replace(']', '').replace(' ', '_')
elif link['rel'] == 'virtualdatacenter':
vm_vdc = link['title'].replace('[', '').replace(']', '').replace(' ', '_')
elif link['rel'] == 'virtualmachinetemplate':
vm_template = link['title'].replace('[', '').replace(']', '').replace(' ', '_')
# From abiquo.ini: Only adding to inventory VMs with public IP
if config.getboolean('defaults', 'public_ip_only') is True:
for link in vmcollection['links']:
if link['type'] == 'application/vnd.abiquo.publicip+json' and link['rel'] == 'ip':
vm_nic = link['title']
break
else:
vm_nic = None
# Otherwise, assigning defined network interface IP address
else:
for link in vmcollection['links']:
if link['rel'] == config.get('defaults', 'default_net_interface'):
vm_nic = link['title']
break
else:
vm_nic = None
vm_state = True
# From abiquo.ini: Only adding to inventory VMs deployed
if config.getboolean('defaults', 'deployed_only') is True and vmcollection['state'] == 'NOT_ALLOCATED':
vm_state = False
if vm_nic is not None and vm_state:
if vm_vapp not in inventory:
inventory[vm_vapp] = {}
inventory[vm_vapp]['children'] = []
inventory[vm_vapp]['hosts'] = []
if vm_vdc not in inventory:
inventory[vm_vdc] = {}
inventory[vm_vdc]['hosts'] = []
inventory[vm_vdc]['children'] = []
if vm_template not in inventory:
inventory[vm_template] = {}
inventory[vm_template]['children'] = []
inventory[vm_template]['hosts'] = []
if config.getboolean('defaults', 'get_metadata') is True:
meta_entity = next(link for link in vmcollection['links'] if link['rel'] == 'metadata')
try:
metadata = api_get(meta_entity, config)
if (config.getfloat("api", "version") >= 3.0):
vm_metadata = metadata['metadata']
else:
vm_metadata = metadata['metadata']['metadata']
inventory['_meta']['hostvars'][vm_nic] = vm_metadata
except Exception as e:
pass
inventory[vm_vapp]['children'].append(vmcollection['name'])
inventory[vm_vdc]['children'].append(vmcollection['name'])
inventory[vm_template]['children'].append(vmcollection['name'])
inventory['all']['children'].append(vmcollection['name'])
inventory[vmcollection['name']] = []
inventory[vmcollection['name']].append(vm_nic)
return inventory
except Exception as e:
# Return empty hosts output
return {'all': {'hosts': []}, '_meta': {'hostvars': {}}}
def get_inventory(enterprise, config):
''' Reads the inventory from cache or Abiquo api '''
if cache_available(config):
inv = get_cache('inventory', config)
else:
default_group = os.path.basename(sys.argv[0]).rstrip('.py')
# MAKE ABIQUO API CALLS #
inv = generate_inv_from_api(enterprise, config)
save_cache(inv, config)
return json.dumps(inv)
if __name__ == '__main__':
inventory = {}
enterprise = {}
# Read config
config = ConfigParser.SafeConfigParser()
for configfilename in [os.path.abspath(sys.argv[0]).rstrip('.py') + '.ini', 'abiquo.ini']:
if os.path.exists(configfilename):
config.read(configfilename)
break
try:
login = api_get(None, config)
enterprise = next(link for link in login['links'] if link['rel'] == 'enterprise')
except Exception as e:
enterprise = None
if cache_available(config):
inventory = get_cache('inventory', config)
else:
inventory = get_inventory(enterprise, config)
# return to ansible
sys.stdout.write(str(inventory))
sys.stdout.flush()

@ -1,346 +0,0 @@
#!/usr/bin/env python
# (c) 2013, Sebastien Goasguen <runseb@gmail.com>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
'''
Apache Libcloud generic external inventory script
=================================
Generates inventory that Ansible can understand by making API request to
Cloud providers using the Apache libcloud library.
This script also assumes there is a libcloud.ini file alongside it
'''
import sys
import os
import argparse
import re
from time import time
from ansible.module_utils.six import iteritems, string_types
from ansible.module_utils.six.moves import configparser as ConfigParser
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
import libcloud.security as sec
import json
class LibcloudInventory(object):
def __init__(self):
''' Main execution path '''
# Inventory grouped by instance IDs, tags, security groups, regions,
# and availability zones
self.inventory = {}
# Index of hostname (address) to instance ID
self.index = {}
# Read settings and parse CLI arguments
self.read_settings()
self.parse_cli_args()
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of instances for inventory
if len(self.inventory) == 0:
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print(data_to_print)
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def read_settings(self):
''' Reads the settings from the libcloud.ini file '''
config = ConfigParser.SafeConfigParser()
libcloud_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'libcloud.ini')
libcloud_ini_path = os.environ.get('LIBCLOUD_INI_PATH', libcloud_default_ini_path)
config.read(libcloud_ini_path)
if not config.has_section('driver'):
raise ValueError('libcloud.ini file must contain a [driver] section')
if config.has_option('driver', 'provider'):
self.provider = config.get('driver', 'provider')
else:
raise ValueError('libcloud.ini does not have a provider defined')
if config.has_option('driver', 'key'):
self.key = config.get('driver', 'key')
else:
raise ValueError('libcloud.ini does not have a key defined')
if config.has_option('driver', 'secret'):
self.secret = config.get('driver', 'secret')
else:
raise ValueError('libcloud.ini does not have a secret defined')
if config.has_option('driver', 'host'):
self.host = config.get('driver', 'host')
if config.has_option('driver', 'secure'):
self.secure = config.get('driver', 'secure')
if config.has_option('driver', 'verify_ssl_cert'):
self.verify_ssl_cert = config.get('driver', 'verify_ssl_cert')
if config.has_option('driver', 'port'):
self.port = config.get('driver', 'port')
if config.has_option('driver', 'path'):
self.path = config.get('driver', 'path')
if config.has_option('driver', 'api_version'):
self.api_version = config.get('driver', 'api_version')
Driver = get_driver(getattr(Provider, self.provider))
self.conn = Driver(key=self.key, secret=self.secret, secure=self.secure,
host=self.host, path=self.path)
# Cache related
cache_path = config.get('cache', 'cache_path')
self.cache_path_cache = cache_path + "/ansible-libcloud.cache"
self.cache_path_index = cache_path + "/ansible-libcloud.index"
self.cache_max_age = config.getint('cache', 'cache_max_age')
def parse_cli_args(self):
'''
Command line argument processing
'''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on libcloud supported providers')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to libcloud supported providers (default: False - use cache files)')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
'''
Do API calls to a location, and save data in cache files
'''
self.get_nodes()
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def get_nodes(self):
'''
Gets the list of all nodes
'''
for node in self.conn.list_nodes():
self.add_node(node)
def get_node(self, node_id):
'''
Gets details about a specific node
'''
return [node for node in self.conn.list_nodes() if node.id == node_id][0]
def add_node(self, node):
'''
Adds a node to the inventory and index, as long as it is
addressable
'''
# Only want running instances
if node.state != 0:
return
# Select the best destination address
if not node.public_ips == []:
dest = node.public_ips[0]
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = node.name
# Inventory: Group by instance ID (always a group of 1)
self.inventory[node.name] = [dest]
'''
# Inventory: Group by region
self.push(self.inventory, region, dest)
# Inventory: Group by availability zone
self.push(self.inventory, node.placement, dest)
# Inventory: Group by instance type
self.push(self.inventory, self.to_safe('type_' + node.instance_type), dest)
'''
# Inventory: Group by key pair
if node.extra['key_name']:
self.push(self.inventory, self.to_safe('key_' + node.extra['key_name']), dest)
# Inventory: Group by security group, quick thing to handle single sg
if node.extra['security_group']:
self.push(self.inventory, self.to_safe('sg_' + node.extra['security_group'][0]), dest)
# Inventory: Group by tag
if node.extra['tags']:
for tagkey in node.extra['tags'].keys():
self.push(self.inventory, self.to_safe('tag_' + tagkey + '_' + node.extra['tags'][tagkey]), dest)
def get_host_info(self):
'''
Get variables about a specific host
'''
if len(self.index) == 0:
# Need to load index from cache
self.load_index_from_cache()
if self.args.host not in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if self.args.host not in self.index:
# host migh not exist anymore
return self.json_format_dict({}, True)
node_id = self.index[self.args.host]
node = self.get_node(node_id)
instance_vars = {}
for key, value in vars(node).items():
key = self.to_safe('ec2_' + key)
# Handle complex types
if isinstance(value, (int, bool)):
instance_vars[key] = value
elif isinstance(value, string_types):
instance_vars[key] = value.strip()
elif value is None:
instance_vars[key] = ''
elif key == 'ec2_region':
instance_vars[key] = value.name
elif key == 'ec2_tags':
for k, v in iteritems(value):
key = self.to_safe('ec2_tag_' + k)
instance_vars[key] = v
elif key == 'ec2_groups':
group_ids = []
group_names = []
for group in value:
group_ids.append(group.id)
group_names.append(group.name)
instance_vars["ec2_security_group_ids"] = ','.join(group_ids)
instance_vars["ec2_security_group_names"] = ','.join(group_names)
else:
pass
# TODO Product codes if someone finds them useful
# print(key)
# print(type(value))
# print(value)
return self.json_format_dict(instance_vars, True)
def push(self, my_dict, key, element):
'''
Pushed an element onto an array that may not have been defined in
the dict
'''
if key in my_dict:
my_dict[key].append(element)
else:
my_dict[key] = [element]
def get_inventory_from_cache(self):
'''
Reads the inventory from the cache file and returns it as a JSON
object
'''
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory
def load_index_from_cache(self):
'''
Reads the index from the cache file sets self.index
'''
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index)
def write_to_cache(self, data, filename):
'''
Writes data in JSON format to a file
'''
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def to_safe(self, word):
'''
Converts 'bad' characters in a string to underscores so they can be
used as Ansible groups
'''
return re.sub(r"[^A-Za-z0-9\-]", "_", word)
def json_format_dict(self, data, pretty=False):
'''
Converts a dict to a JSON object and dumps it as a formatted
string
'''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
def main():
LibcloudInventory()
if __name__ == '__main__':
main()

@ -1,20 +0,0 @@
# Ansible Apstra AOS external inventory script settings
# Dynamic Inventory script parameter can be provided using this file
# Or by using Environment Variables:
# - AOS_SERVER, AOS_PORT, AOS_USERNAME, AOS_PASSWORD, AOS_BLUEPRINT
#
# This file takes precedence over the Environment Variables
#
[aos]
# aos_server = 172.20.62.3
# port = 8888
# username = admin
# password = admin
## Blueprint Mode
# to use the inventory in mode Blueprint, you need to define the blueprint name you want to use
# blueprint = my-blueprint-l2
# blueprint_interface = true

@ -1,589 +0,0 @@
#!/usr/bin/env python
#
# (c) 2017 Apstra Inc, <community@apstra.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
"""
Apstra AOS external inventory script
====================================
Ansible has a feature where instead of reading from /etc/ansible/hosts
as a text file, it can query external programs to obtain the list
of hosts, groups the hosts are in, and even variables to assign to each host.
To use this:
- copy this file over /etc/ansible/hosts and chmod +x the file.
- Copy both files (.py and .ini) in your preferred directory
More information about Ansible Dynamic Inventory here
http://unix.stackexchange.com/questions/205479/in-ansible-dynamic-inventory-json-can-i-render-hostvars-based-on-the-hostname
2 modes are currently, supported: **device based** or **blueprint based**:
- For **Device based**, the list of device is taken from the global device list
the serial ID will be used as the inventory_hostname
- For **Blueprint based**, the list of device is taken from the given blueprint
the Node name will be used as the inventory_hostname
Input parameters parameter can be provided using either with the ini file or by using Environment Variables:
The following list of Environment Variables are supported: AOS_SERVER, AOS_PORT, AOS_USERNAME, AOS_PASSWORD, AOS_BLUEPRINT
The config file takes precedence over the Environment Variables
Tested with Apstra AOS 1.1
This script has been inspired by the cobbler.py inventory. thanks
Author: Damien Garros (@dgarros)
Version: 0.2.0
"""
import json
import os
import re
import sys
try:
import argparse
HAS_ARGPARSE = True
except ImportError:
HAS_ARGPARSE = False
try:
from apstra.aosom.session import Session
HAS_AOS_PYEZ = True
except ImportError:
HAS_AOS_PYEZ = False
from ansible.module_utils.six.moves import configparser
"""
##
Expected output format in Device mode
{
"Cumulus": {
"hosts": [
"52540073956E",
"52540022211A"
],
"vars": {}
},
"EOS": {
"hosts": [
"5254001CAFD8",
"525400DDDF72"
],
"vars": {}
},
"Generic Model": {
"hosts": [
"525400E5486D"
],
"vars": {}
},
"Ubuntu GNU/Linux": {
"hosts": [
"525400E5486D"
],
"vars": {}
},
"VX": {
"hosts": [
"52540073956E",
"52540022211A"
],
"vars": {}
},
"_meta": {
"hostvars": {
"5254001CAFD8": {
"agent_start_time": "2017-02-03T00:49:16.000000Z",
"ansible_ssh_host": "172.20.52.6",
"aos_hcl_model": "Arista_vEOS",
"aos_server": "",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-03T00:47:58.454480Z",
"domain_name": "",
"error_message": "",
"fqdn": "localhost",
"hostname": "localhost",
"hw_model": "vEOS",
"hw_version": "",
"is_acknowledged": false,
"mgmt_ifname": "Management1",
"mgmt_ipaddr": "172.20.52.6",
"mgmt_macaddr": "52:54:00:1C:AF:D8",
"os_arch": "x86_64",
"os_family": "EOS",
"os_version": "4.16.6M",
"os_version_info": {
"build": "6M",
"major": "4",
"minor": "16"
},
"serial_number": "5254001CAFD8",
"state": "OOS-QUARANTINED",
"vendor": "Arista"
},
"52540022211A": {
"agent_start_time": "2017-02-03T00:45:22.000000Z",
"ansible_ssh_host": "172.20.52.7",
"aos_hcl_model": "Cumulus_VX",
"aos_server": "172.20.52.3",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-03T00:45:11.019189Z",
"domain_name": "",
"error_message": "",
"fqdn": "cumulus",
"hostname": "cumulus",
"hw_model": "VX",
"hw_version": "",
"is_acknowledged": false,
"mgmt_ifname": "eth0",
"mgmt_ipaddr": "172.20.52.7",
"mgmt_macaddr": "52:54:00:22:21:1a",
"os_arch": "x86_64",
"os_family": "Cumulus",
"os_version": "3.1.1",
"os_version_info": {
"build": "1",
"major": "3",
"minor": "1"
},
"serial_number": "52540022211A",
"state": "OOS-QUARANTINED",
"vendor": "Cumulus"
},
"52540073956E": {
"agent_start_time": "2017-02-03T00:45:19.000000Z",
"ansible_ssh_host": "172.20.52.8",
"aos_hcl_model": "Cumulus_VX",
"aos_server": "172.20.52.3",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-03T00:45:11.030113Z",
"domain_name": "",
"error_message": "",
"fqdn": "cumulus",
"hostname": "cumulus",
"hw_model": "VX",
"hw_version": "",
"is_acknowledged": false,
"mgmt_ifname": "eth0",
"mgmt_ipaddr": "172.20.52.8",
"mgmt_macaddr": "52:54:00:73:95:6e",
"os_arch": "x86_64",
"os_family": "Cumulus",
"os_version": "3.1.1",
"os_version_info": {
"build": "1",
"major": "3",
"minor": "1"
},
"serial_number": "52540073956E",
"state": "OOS-QUARANTINED",
"vendor": "Cumulus"
},
"525400DDDF72": {
"agent_start_time": "2017-02-03T00:49:07.000000Z",
"ansible_ssh_host": "172.20.52.5",
"aos_hcl_model": "Arista_vEOS",
"aos_server": "",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-03T00:47:46.929921Z",
"domain_name": "",
"error_message": "",
"fqdn": "localhost",
"hostname": "localhost",
"hw_model": "vEOS",
"hw_version": "",
"is_acknowledged": false,
"mgmt_ifname": "Management1",
"mgmt_ipaddr": "172.20.52.5",
"mgmt_macaddr": "52:54:00:DD:DF:72",
"os_arch": "x86_64",
"os_family": "EOS",
"os_version": "4.16.6M",
"os_version_info": {
"build": "6M",
"major": "4",
"minor": "16"
},
"serial_number": "525400DDDF72",
"state": "OOS-QUARANTINED",
"vendor": "Arista"
},
"525400E5486D": {
"agent_start_time": "2017-02-02T18:44:42.000000Z",
"ansible_ssh_host": "172.20.52.4",
"aos_hcl_model": "Generic_Server_1RU_1x10G",
"aos_server": "172.20.52.3",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-02T21:11:25.188734Z",
"domain_name": "",
"error_message": "",
"fqdn": "localhost",
"hostname": "localhost",
"hw_model": "Generic Model",
"hw_version": "pc-i440fx-trusty",
"is_acknowledged": false,
"mgmt_ifname": "eth0",
"mgmt_ipaddr": "172.20.52.4",
"mgmt_macaddr": "52:54:00:e5:48:6d",
"os_arch": "x86_64",
"os_family": "Ubuntu GNU/Linux",
"os_version": "14.04 LTS",
"os_version_info": {
"build": "",
"major": "14",
"minor": "04"
},
"serial_number": "525400E5486D",
"state": "OOS-QUARANTINED",
"vendor": "Generic Manufacturer"
}
}
},
"all": {
"hosts": [
"5254001CAFD8",
"52540073956E",
"525400DDDF72",
"525400E5486D",
"52540022211A"
],
"vars": {}
},
"vEOS": {
"hosts": [
"5254001CAFD8",
"525400DDDF72"
],
"vars": {}
}
}
"""
def fail(msg):
sys.stderr.write("%s\n" % msg)
sys.exit(1)
class AosInventory(object):
def __init__(self):
""" Main execution path """
if not HAS_AOS_PYEZ:
raise Exception('aos-pyez is not installed. Please see details here: https://github.com/Apstra/aos-pyez')
if not HAS_ARGPARSE:
raise Exception('argparse is not installed. Please install the argparse library or upgrade to python-2.7')
# Initialize inventory
self.inventory = dict() # A list of groups and the hosts in that group
self.inventory['_meta'] = dict()
self.inventory['_meta']['hostvars'] = dict()
# Read settings and parse CLI arguments
self.read_settings()
self.parse_cli_args()
# ----------------------------------------------------
# Open session to AOS
# ----------------------------------------------------
aos = Session(server=self.aos_server,
port=self.aos_server_port,
user=self.aos_username,
passwd=self.aos_password)
aos.login()
# Save session information in variables of group all
self.add_var_to_group('all', 'aos_session', aos.session)
# Add the AOS server itself in the inventory
self.add_host_to_group("all", 'aos')
self.add_var_to_host("aos", "ansible_ssh_host", self.aos_server)
self.add_var_to_host("aos", "ansible_ssh_pass", self.aos_password)
self.add_var_to_host("aos", "ansible_ssh_user", self.aos_username)
# ----------------------------------------------------
# Build the inventory
# 2 modes are supported: device based or blueprint based
# - For device based, the list of device is taken from the global device list
# the serial ID will be used as the inventory_hostname
# - For Blueprint based, the list of device is taken from the given blueprint
# the Node name will be used as the inventory_hostname
# ----------------------------------------------------
if self.aos_blueprint:
bp = aos.Blueprints[self.aos_blueprint]
if bp.exists is False:
fail("Unable to find the Blueprint: %s" % self.aos_blueprint)
for dev_name, dev_id in bp.params['devices'].value.items():
self.add_host_to_group('all', dev_name)
device = aos.Devices.find(uid=dev_id)
if 'facts' in device.value.keys():
self.add_device_facts_to_var(dev_name, device)
# Define admin State and Status
if 'user_config' in device.value.keys():
if 'admin_state' in device.value['user_config'].keys():
self.add_var_to_host(dev_name, 'admin_state', device.value['user_config']['admin_state'])
self.add_device_status_to_var(dev_name, device)
# Go over the contents data structure
for node in bp.contents['system']['nodes']:
if node['display_name'] == dev_name:
self.add_host_to_group(node['role'], dev_name)
# Check for additional attribute to import
attributes_to_import = [
'loopback_ip',
'asn',
'role',
'position',
]
for attr in attributes_to_import:
if attr in node.keys():
self.add_var_to_host(dev_name, attr, node[attr])
# if blueprint_interface is enabled in the configuration
# Collect links information
if self.aos_blueprint_int:
interfaces = dict()
for link in bp.contents['system']['links']:
# each link has 2 sides [0,1], and it's unknown which one match this device
# at first we assume, first side match(0) and peer is (1)
peer_id = 1
for side in link['endpoints']:
if side['display_name'] == dev_name:
# import local information first
int_name = side['interface']
# init dict
interfaces[int_name] = dict()
if 'ip' in side.keys():
interfaces[int_name]['ip'] = side['ip']
if 'interface' in side.keys():
interfaces[int_name]['name'] = side['interface']
if 'display_name' in link['endpoints'][peer_id].keys():
interfaces[int_name]['peer'] = link['endpoints'][peer_id]['display_name']
if 'ip' in link['endpoints'][peer_id].keys():
interfaces[int_name]['peer_ip'] = link['endpoints'][peer_id]['ip']
if 'type' in link['endpoints'][peer_id].keys():
interfaces[int_name]['peer_type'] = link['endpoints'][peer_id]['type']
else:
# if we haven't match the first time, prepare the peer_id
# for the second loop iteration
peer_id = 0
self.add_var_to_host(dev_name, 'interfaces', interfaces)
else:
for device in aos.Devices:
# If not reacheable, create by key and
# If reacheable, create by hostname
self.add_host_to_group('all', device.name)
# populate information for this host
self.add_device_status_to_var(device.name, device)
if 'user_config' in device.value.keys():
for key, value in device.value['user_config'].items():
self.add_var_to_host(device.name, key, value)
# Based on device status online|offline, collect facts as well
if device.value['status']['comm_state'] == 'on':
if 'facts' in device.value.keys():
self.add_device_facts_to_var(device.name, device)
# Check if device is associated with a blueprint
# if it's create a new group
if 'blueprint_active' in device.value['status'].keys():
if 'blueprint_id' in device.value['status'].keys():
bp = aos.Blueprints.find(uid=device.value['status']['blueprint_id'])
if bp:
self.add_host_to_group(bp.name, device.name)
# ----------------------------------------------------
# Convert the inventory and return a JSON String
# ----------------------------------------------------
data_to_print = ""
data_to_print += self.json_format_dict(self.inventory, True)
print(data_to_print)
def read_settings(self):
""" Reads the settings from the apstra_aos.ini file """
config = configparser.ConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/apstra_aos.ini')
# Default Values
self.aos_blueprint = False
self.aos_blueprint_int = True
self.aos_username = 'admin'
self.aos_password = 'admin'
self.aos_server_port = 8888
# Try to reach all parameters from File, if not available try from ENV
try:
self.aos_server = config.get('aos', 'aos_server')
except Exception:
if 'AOS_SERVER' in os.environ.keys():
self.aos_server = os.environ['AOS_SERVER']
try:
self.aos_server_port = config.get('aos', 'port')
except Exception:
if 'AOS_PORT' in os.environ.keys():
self.aos_server_port = os.environ['AOS_PORT']
try:
self.aos_username = config.get('aos', 'username')
except Exception:
if 'AOS_USERNAME' in os.environ.keys():
self.aos_username = os.environ['AOS_USERNAME']
try:
self.aos_password = config.get('aos', 'password')
except Exception:
if 'AOS_PASSWORD' in os.environ.keys():
self.aos_password = os.environ['AOS_PASSWORD']
try:
self.aos_blueprint = config.get('aos', 'blueprint')
except Exception:
if 'AOS_BLUEPRINT' in os.environ.keys():
self.aos_blueprint = os.environ['AOS_BLUEPRINT']
try:
if config.get('aos', 'blueprint_interface') in ['false', 'no']:
self.aos_blueprint_int = False
except Exception:
pass
def parse_cli_args(self):
""" Command line argument processing """
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Apstra AOS')
parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
self.args = parser.parse_args()
def json_format_dict(self, data, pretty=False):
""" Converts a dict to a JSON object and dumps it as a formatted string """
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
def add_host_to_group(self, group, host):
# Cleanup group name first
clean_group = self.cleanup_group_name(group)
# Check if the group exist, if not initialize it
if clean_group not in self.inventory.keys():
self.inventory[clean_group] = {}
self.inventory[clean_group]['hosts'] = []
self.inventory[clean_group]['vars'] = {}
self.inventory[clean_group]['hosts'].append(host)
def add_var_to_host(self, host, var, value):
# Check if the host exist, if not initialize it
if host not in self.inventory['_meta']['hostvars'].keys():
self.inventory['_meta']['hostvars'][host] = {}
self.inventory['_meta']['hostvars'][host][var] = value
def add_var_to_group(self, group, var, value):
# Cleanup group name first
clean_group = self.cleanup_group_name(group)
# Check if the group exist, if not initialize it
if clean_group not in self.inventory.keys():
self.inventory[clean_group] = {}
self.inventory[clean_group]['hosts'] = []
self.inventory[clean_group]['vars'] = {}
self.inventory[clean_group]['vars'][var] = value
def add_device_facts_to_var(self, device_name, device):
# Populate variables for this host
self.add_var_to_host(device_name,
'ansible_ssh_host',
device.value['facts']['mgmt_ipaddr'])
self.add_var_to_host(device_name, 'id', device.id)
# self.add_host_to_group('all', device.name)
for key, value in device.value['facts'].items():
self.add_var_to_host(device_name, key, value)
if key == 'os_family':
self.add_host_to_group(value, device_name)
elif key == 'hw_model':
self.add_host_to_group(value, device_name)
def cleanup_group_name(self, group_name):
"""
Clean up group name by :
- Replacing all non-alphanumeric caracter by underscore
- Converting to lowercase
"""
rx = re.compile(r'\W+')
clean_group = rx.sub('_', group_name).lower()
return clean_group
def add_device_status_to_var(self, device_name, device):
if 'status' in device.value.keys():
for key, value in device.value['status'].items():
self.add_var_to_host(device.name, key, value)
# Run the script
if __name__ == '__main__':
AosInventory()

@ -1,23 +0,0 @@
#
# Configuration file for azure_rm.py
#
[azure]
# Control which resource groups are included. By default all resources groups are included.
# Set resource_groups to a comma separated list of resource groups names.
#resource_groups=
# Control which tags are included. Set tags to a comma separated list of keys or key:value pairs
#tags=
# Control which locations are included. Set locations to a comma separated list (e.g. eastus,eastus2,westus)
#locations=
# Include powerstate. If you don't need powerstate information, turning it off improves runtime performance.
include_powerstate=yes
# Control grouping with the following boolean flags. Valid values: yes, no, true, false, True, False, 0, 1.
group_by_resource_group=yes
group_by_location=yes
group_by_security_group=yes
group_by_os_family=yes
group_by_tag=yes

@ -1,973 +0,0 @@
#!/usr/bin/env python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
'''
Important note (2018/10)
========================
This inventory script is in maintenance mode: only critical bug fixes but no new features.
There's new Azure external inventory script at https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/inventory/azure_rm.py,
with better performance and latest new features. Please go to the link to get latest Azure inventory.
Azure External Inventory Script
===============================
Generates dynamic inventory by making API requests to the Azure Resource
Manager using the Azure Python SDK. For instruction on installing the
Azure Python SDK see https://azure-sdk-for-python.readthedocs.io/
Authentication
--------------
The order of precedence is command line arguments, environment variables,
and finally the [default] profile found in ~/.azure/credentials.
If using a credentials file, it should be an ini formatted file with one or
more sections, which we refer to as profiles. The script looks for a
[default] section, if a profile is not specified either on the command line
or with an environment variable. The keys in a profile will match the
list of command line arguments below.
For command line arguments and environment variables specify a profile found
in your ~/.azure/credentials file, or a service principal or Active Directory
user.
Command line arguments:
- profile
- client_id
- secret
- subscription_id
- tenant
- ad_user
- password
- cloud_environment
- adfs_authority_url
Environment variables:
- AZURE_PROFILE
- AZURE_CLIENT_ID
- AZURE_SECRET
- AZURE_SUBSCRIPTION_ID
- AZURE_TENANT
- AZURE_AD_USER
- AZURE_PASSWORD
- AZURE_CLOUD_ENVIRONMENT
- AZURE_ADFS_AUTHORITY_URL
Run for Specific Host
-----------------------
When run for a specific host using the --host option, a resource group is
required. For a specific host, this script returns the following variables:
{
"ansible_host": "XXX.XXX.XXX.XXX",
"computer_name": "computer_name2",
"fqdn": null,
"id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Compute/virtualMachines/object-name",
"image": {
"offer": "CentOS",
"publisher": "OpenLogic",
"sku": "7.1",
"version": "latest"
},
"location": "westus",
"mac_address": "00-00-5E-00-53-FE",
"name": "object-name",
"network_interface": "interface-name",
"network_interface_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkInterfaces/object-name1",
"network_security_group": null,
"network_security_group_id": null,
"os_disk": {
"name": "object-name",
"operating_system_type": "Linux"
},
"plan": null,
"powerstate": "running",
"private_ip": "172.26.3.6",
"private_ip_alloc_method": "Static",
"provisioning_state": "Succeeded",
"public_ip": "XXX.XXX.XXX.XXX",
"public_ip_alloc_method": "Static",
"public_ip_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/publicIPAddresses/object-name",
"public_ip_name": "object-name",
"resource_group": "galaxy-production",
"security_group": "object-name",
"security_group_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkSecurityGroups/object-name",
"tags": {
"db": "database"
},
"type": "Microsoft.Compute/virtualMachines",
"virtual_machine_size": "Standard_DS4"
}
Groups
------
When run in --list mode, instances are grouped by the following categories:
- azure
- location
- resource_group
- security_group
- tag key
- tag key_value
Control groups using azure_rm.ini or set environment variables:
AZURE_GROUP_BY_RESOURCE_GROUP=yes
AZURE_GROUP_BY_LOCATION=yes
AZURE_GROUP_BY_SECURITY_GROUP=yes
AZURE_GROUP_BY_TAG=yes
Select hosts within specific resource groups by assigning a comma separated list to:
AZURE_RESOURCE_GROUPS=resource_group_a,resource_group_b
Select hosts for specific tag key by assigning a comma separated list of tag keys to:
AZURE_TAGS=key1,key2,key3
Select hosts for specific locations:
AZURE_LOCATIONS=eastus,westus,eastus2
Or, select hosts for specific tag key:value pairs by assigning a comma separated list key:value pairs to:
AZURE_TAGS=key1:value1,key2:value2
If you don't need the powerstate, you can improve performance by turning off powerstate fetching:
AZURE_INCLUDE_POWERSTATE=no
azure_rm.ini
------------
As mentioned above, you can control execution using environment variables or a .ini file. A sample
azure_rm.ini is included. The name of the .ini file is the basename of the inventory script (in this case
'azure_rm') with a .ini extension. It also assumes the .ini file is alongside the script. To specify
a different path for the .ini file, define the AZURE_INI_PATH environment variable:
export AZURE_INI_PATH=/path/to/custom.ini
Powerstate:
-----------
The powerstate attribute indicates whether or not a host is running. If the value is 'running', the machine is
up. If the value is anything other than 'running', the machine is down, and will be unreachable.
Examples:
---------
Execute /bin/uname on all instances in the galaxy-qa resource group
$ ansible -i azure_rm.py galaxy-qa -m shell -a "/bin/uname -a"
Use the inventory script to print instance specific information
$ contrib/inventory/azure_rm.py --host my_instance_host_name --pretty
Use with a playbook
$ ansible-playbook -i contrib/inventory/azure_rm.py my_playbook.yml --limit galaxy-qa
Insecure Platform Warning
-------------------------
If you receive InsecurePlatformWarning from urllib3, install the
requests security packages:
pip install requests[security]
author:
- Chris Houseknecht (@chouseknecht)
- Matt Davis (@nitzmahone)
Company: Ansible by Red Hat
Version: 1.0.0
'''
import argparse
import json
import os
import re
import sys
import inspect
from os.path import expanduser
from ansible.module_utils.six.moves import configparser as cp
import ansible.module_utils.six.moves.urllib.parse as urlparse
HAS_AZURE = True
HAS_AZURE_EXC = None
HAS_AZURE_CLI_CORE = True
CLIError = None
try:
from msrestazure.azure_active_directory import AADTokenCredentials
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_active_directory import MSIAuthentication
from msrestazure import azure_cloud
from azure.mgmt.compute import __version__ as azure_compute_version
from azure.common import AzureMissingResourceHttpError, AzureHttpError
from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials
from azure.mgmt.network import NetworkManagementClient
from azure.mgmt.resource.resources import ResourceManagementClient
from azure.mgmt.resource.subscriptions import SubscriptionClient
from azure.mgmt.compute import ComputeManagementClient
from adal.authentication_context import AuthenticationContext
except ImportError as exc:
HAS_AZURE_EXC = exc
HAS_AZURE = False
try:
from azure.cli.core.util import CLIError
from azure.common.credentials import get_azure_cli_credentials, get_cli_profile
from azure.common.cloud import get_cli_active_cloud
except ImportError:
HAS_AZURE_CLI_CORE = False
CLIError = Exception
try:
from ansible.release import __version__ as ansible_version
except ImportError:
ansible_version = 'unknown'
AZURE_CREDENTIAL_ENV_MAPPING = dict(
profile='AZURE_PROFILE',
subscription_id='AZURE_SUBSCRIPTION_ID',
client_id='AZURE_CLIENT_ID',
secret='AZURE_SECRET',
tenant='AZURE_TENANT',
ad_user='AZURE_AD_USER',
password='AZURE_PASSWORD',
cloud_environment='AZURE_CLOUD_ENVIRONMENT',
adfs_authority_url='AZURE_ADFS_AUTHORITY_URL'
)
AZURE_CONFIG_SETTINGS = dict(
resource_groups='AZURE_RESOURCE_GROUPS',
tags='AZURE_TAGS',
locations='AZURE_LOCATIONS',
include_powerstate='AZURE_INCLUDE_POWERSTATE',
group_by_resource_group='AZURE_GROUP_BY_RESOURCE_GROUP',
group_by_location='AZURE_GROUP_BY_LOCATION',
group_by_security_group='AZURE_GROUP_BY_SECURITY_GROUP',
group_by_tag='AZURE_GROUP_BY_TAG',
group_by_os_family='AZURE_GROUP_BY_OS_FAMILY',
use_private_ip='AZURE_USE_PRIVATE_IP'
)
AZURE_MIN_VERSION = "2.0.0"
ANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ansible_version)
def azure_id_to_dict(id):
pieces = re.sub(r'^\/', '', id).split('/')
result = {}
index = 0
while index < len(pieces) - 1:
result[pieces[index]] = pieces[index + 1]
index += 1
return result
class AzureRM(object):
def __init__(self, args):
self._args = args
self._cloud_environment = None
self._compute_client = None
self._resource_client = None
self._network_client = None
self._adfs_authority_url = None
self._resource = None
self.debug = False
if args.debug:
self.debug = True
self.credentials = self._get_credentials(args)
if not self.credentials:
self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
"or define a profile in ~/.azure/credentials.")
# if cloud_environment specified, look up/build Cloud object
raw_cloud_env = self.credentials.get('cloud_environment')
if not raw_cloud_env:
self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD # SDK default
else:
# try to look up "well-known" values via the name attribute on azure_cloud members
all_clouds = [x[1] for x in inspect.getmembers(azure_cloud) if isinstance(x[1], azure_cloud.Cloud)]
matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env]
if len(matched_clouds) == 1:
self._cloud_environment = matched_clouds[0]
elif len(matched_clouds) > 1:
self.fail("Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'".format(raw_cloud_env))
else:
if not urlparse.urlparse(raw_cloud_env).scheme:
self.fail("cloud_environment must be an endpoint discovery URL or one of {0}".format([x.name for x in all_clouds]))
try:
self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(raw_cloud_env)
except Exception as e:
self.fail("cloud_environment {0} could not be resolved: {1}".format(raw_cloud_env, e.message))
if self.credentials.get('subscription_id', None) is None:
self.fail("Credentials did not include a subscription_id value.")
self.log("setting subscription_id")
self.subscription_id = self.credentials['subscription_id']
# get authentication authority
# for adfs, user could pass in authority or not.
# for others, use default authority from cloud environment
if self.credentials.get('adfs_authority_url'):
self._adfs_authority_url = self.credentials.get('adfs_authority_url')
else:
self._adfs_authority_url = self._cloud_environment.endpoints.active_directory
# get resource from cloud environment
self._resource = self._cloud_environment.endpoints.active_directory_resource_id
if self.credentials.get('credentials'):
self.azure_credentials = self.credentials.get('credentials')
elif self.credentials.get('client_id') and self.credentials.get('secret') and self.credentials.get('tenant'):
self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'],
secret=self.credentials['secret'],
tenant=self.credentials['tenant'],
cloud_environment=self._cloud_environment)
elif self.credentials.get('ad_user') is not None and \
self.credentials.get('password') is not None and \
self.credentials.get('client_id') is not None and \
self.credentials.get('tenant') is not None:
self.azure_credentials = self.acquire_token_with_username_password(
self._adfs_authority_url,
self._resource,
self.credentials['ad_user'],
self.credentials['password'],
self.credentials['client_id'],
self.credentials['tenant'])
elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None:
tenant = self.credentials.get('tenant')
if not tenant:
tenant = 'common'
self.azure_credentials = UserPassCredentials(self.credentials['ad_user'],
self.credentials['password'],
tenant=tenant,
cloud_environment=self._cloud_environment)
else:
self.fail("Failed to authenticate with provided credentials. Some attributes were missing. "
"Credentials must include client_id, secret and tenant or ad_user and password, or "
"ad_user, password, client_id, tenant and adfs_authority_url(optional) for ADFS authentication, or "
"be logged in using AzureCLI.")
def log(self, msg):
if self.debug:
print(msg + u'\n')
def fail(self, msg):
raise Exception(msg)
def _get_profile(self, profile="default"):
path = expanduser("~")
path += "/.azure/credentials"
try:
config = cp.ConfigParser()
config.read(path)
except Exception as exc:
self.fail("Failed to access {0}. Check that the file exists and you have read "
"access. {1}".format(path, str(exc)))
credentials = dict()
for key in AZURE_CREDENTIAL_ENV_MAPPING:
try:
credentials[key] = config.get(profile, key, raw=True)
except Exception:
pass
if credentials.get('client_id') is not None or credentials.get('ad_user') is not None:
return credentials
return None
def _get_env_credentials(self):
env_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
env_credentials[attribute] = os.environ.get(env_variable, None)
if env_credentials['profile'] is not None:
credentials = self._get_profile(env_credentials['profile'])
return credentials
if env_credentials['client_id'] is not None or env_credentials['ad_user'] is not None:
return env_credentials
return None
def _get_azure_cli_credentials(self):
credentials, subscription_id = get_azure_cli_credentials()
cloud_environment = get_cli_active_cloud()
cli_credentials = {
'credentials': credentials,
'subscription_id': subscription_id,
'cloud_environment': cloud_environment
}
return cli_credentials
def _get_msi_credentials(self, subscription_id_param=None):
credentials = MSIAuthentication()
subscription_id_param = subscription_id_param or os.environ.get(AZURE_CREDENTIAL_ENV_MAPPING['subscription_id'], None)
try:
# try to get the subscription in MSI to test whether MSI is enabled
subscription_client = SubscriptionClient(credentials)
subscription = next(subscription_client.subscriptions.list())
subscription_id = str(subscription.subscription_id)
return {
'credentials': credentials,
'subscription_id': subscription_id_param or subscription_id
}
except Exception as exc:
return None
def _get_credentials(self, params):
# Get authentication credentials.
# Precedence: cmd line parameters-> environment variables-> default profile in ~/.azure/credentials.
self.log('Getting credentials')
arg_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
arg_credentials[attribute] = getattr(params, attribute)
# try module params
if arg_credentials['profile'] is not None:
self.log('Retrieving credentials with profile parameter.')
credentials = self._get_profile(arg_credentials['profile'])
return credentials
if arg_credentials['client_id'] is not None:
self.log('Received credentials from parameters.')
return arg_credentials
if arg_credentials['ad_user'] is not None:
self.log('Received credentials from parameters.')
return arg_credentials
# try environment
env_credentials = self._get_env_credentials()
if env_credentials:
self.log('Received credentials from env.')
return env_credentials
# try default profile from ~./azure/credentials
default_credentials = self._get_profile()
if default_credentials:
self.log('Retrieved default profile credentials from ~/.azure/credentials.')
return default_credentials
msi_credentials = self._get_msi_credentials(arg_credentials.get('subscription_id'))
if msi_credentials:
self.log('Retrieved credentials from MSI.')
return msi_credentials
try:
if HAS_AZURE_CLI_CORE:
self.log('Retrieving credentials from AzureCLI profile')
cli_credentials = self._get_azure_cli_credentials()
return cli_credentials
except CLIError as ce:
self.log('Error getting AzureCLI profile credentials - {0}'.format(ce))
return None
def acquire_token_with_username_password(self, authority, resource, username, password, client_id, tenant):
authority_uri = authority
if tenant is not None:
authority_uri = authority + '/' + tenant
context = AuthenticationContext(authority_uri)
token_response = context.acquire_token_with_username_password(resource, username, password, client_id)
return AADTokenCredentials(token_response)
def _register(self, key):
try:
# We have to perform the one-time registration here. Otherwise, we receive an error the first
# time we attempt to use the requested client.
resource_client = self.rm_client
resource_client.providers.register(key)
except Exception as exc:
self.log("One-time registration of {0} failed - {1}".format(key, str(exc)))
self.log("You might need to register {0} using an admin account".format(key))
self.log(("To register a provider using the Python CLI: "
"https://docs.microsoft.com/azure/azure-resource-manager/"
"resource-manager-common-deployment-errors#noregisteredproviderfound"))
def get_mgmt_svc_client(self, client_type, base_url, api_version):
client = client_type(self.azure_credentials,
self.subscription_id,
base_url=base_url,
api_version=api_version)
client.config.add_user_agent(ANSIBLE_USER_AGENT)
return client
@property
def network_client(self):
self.log('Getting network client')
if not self._network_client:
self._network_client = self.get_mgmt_svc_client(NetworkManagementClient,
self._cloud_environment.endpoints.resource_manager,
'2017-06-01')
self._register('Microsoft.Network')
return self._network_client
@property
def rm_client(self):
self.log('Getting resource manager client')
if not self._resource_client:
self._resource_client = self.get_mgmt_svc_client(ResourceManagementClient,
self._cloud_environment.endpoints.resource_manager,
'2017-05-10')
return self._resource_client
@property
def compute_client(self):
self.log('Getting compute client')
if not self._compute_client:
self._compute_client = self.get_mgmt_svc_client(ComputeManagementClient,
self._cloud_environment.endpoints.resource_manager,
'2017-03-30')
self._register('Microsoft.Compute')
return self._compute_client
class AzureInventory(object):
def __init__(self):
self._args = self._parse_cli_args()
try:
rm = AzureRM(self._args)
except Exception as e:
sys.exit("{0}".format(str(e)))
self._compute_client = rm.compute_client
self._network_client = rm.network_client
self._resource_client = rm.rm_client
self._security_groups = None
self.resource_groups = []
self.tags = None
self.locations = None
self.replace_dash_in_groups = False
self.group_by_resource_group = True
self.group_by_location = True
self.group_by_os_family = True
self.group_by_security_group = True
self.group_by_tag = True
self.include_powerstate = True
self.use_private_ip = False
self._inventory = dict(
_meta=dict(
hostvars=dict()
),
azure=[]
)
self._get_settings()
if self._args.resource_groups:
self.resource_groups = self._args.resource_groups.split(',')
if self._args.tags:
self.tags = self._args.tags.split(',')
if self._args.locations:
self.locations = self._args.locations.split(',')
if self._args.no_powerstate:
self.include_powerstate = False
self.get_inventory()
print(self._json_format_dict(pretty=self._args.pretty))
sys.exit(0)
def _parse_cli_args(self):
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file for an Azure subscription')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--debug', action='store_true', default=False,
help='Send debug messages to STDOUT')
parser.add_argument('--host', action='store',
help='Get all information about an instance')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty print JSON output(default: False)')
parser.add_argument('--profile', action='store',
help='Azure profile contained in ~/.azure/credentials')
parser.add_argument('--subscription_id', action='store',
help='Azure Subscription Id')
parser.add_argument('--client_id', action='store',
help='Azure Client Id ')
parser.add_argument('--secret', action='store',
help='Azure Client Secret')
parser.add_argument('--tenant', action='store',
help='Azure Tenant Id')
parser.add_argument('--ad_user', action='store',
help='Active Directory User')
parser.add_argument('--password', action='store',
help='password')
parser.add_argument('--adfs_authority_url', action='store',
help='Azure ADFS authority url')
parser.add_argument('--cloud_environment', action='store',
help='Azure Cloud Environment name or metadata discovery URL')
parser.add_argument('--resource-groups', action='store',
help='Return inventory for comma separated list of resource group names')
parser.add_argument('--tags', action='store',
help='Return inventory for comma separated list of tag key:value pairs')
parser.add_argument('--locations', action='store',
help='Return inventory for comma separated list of locations')
parser.add_argument('--no-powerstate', action='store_true', default=False,
help='Do not include the power state of each virtual host')
return parser.parse_args()
def get_inventory(self):
if len(self.resource_groups) > 0:
# get VMs for requested resource groups
for resource_group in self.resource_groups:
try:
virtual_machines = self._compute_client.virtual_machines.list(resource_group.lower())
except Exception as exc:
sys.exit("Error: fetching virtual machines for resource group {0} - {1}".format(resource_group, str(exc)))
if self._args.host or self.tags:
selected_machines = self._selected_machines(virtual_machines)
self._load_machines(selected_machines)
else:
self._load_machines(virtual_machines)
else:
# get all VMs within the subscription
try:
virtual_machines = self._compute_client.virtual_machines.list_all()
except Exception as exc:
sys.exit("Error: fetching virtual machines - {0}".format(str(exc)))
if self._args.host or self.tags or self.locations:
selected_machines = self._selected_machines(virtual_machines)
self._load_machines(selected_machines)
else:
self._load_machines(virtual_machines)
def _load_machines(self, machines):
for machine in machines:
id_dict = azure_id_to_dict(machine.id)
# TODO - The API is returning an ID value containing resource group name in ALL CAPS. If/when it gets
# fixed, we should remove the .lower(). Opened Issue
# #574: https://github.com/Azure/azure-sdk-for-python/issues/574
resource_group = id_dict['resourceGroups'].lower()
if self.group_by_security_group:
self._get_security_groups(resource_group)
host_vars = dict(
ansible_host=None,
private_ip=None,
private_ip_alloc_method=None,
public_ip=None,
public_ip_name=None,
public_ip_id=None,
public_ip_alloc_method=None,
fqdn=None,
location=machine.location,
name=machine.name,
type=machine.type,
id=machine.id,
tags=machine.tags,
network_interface_id=None,
network_interface=None,
resource_group=resource_group,
mac_address=None,
plan=(machine.plan.name if machine.plan else None),
virtual_machine_size=machine.hardware_profile.vm_size,
computer_name=(machine.os_profile.computer_name if machine.os_profile else None),
provisioning_state=machine.provisioning_state,
)
host_vars['os_disk'] = dict(
name=machine.storage_profile.os_disk.name,
operating_system_type=machine.storage_profile.os_disk.os_type.value.lower()
)
if self.include_powerstate:
host_vars['powerstate'] = self._get_powerstate(resource_group, machine.name)
if machine.storage_profile.image_reference:
host_vars['image'] = dict(
offer=machine.storage_profile.image_reference.offer,
publisher=machine.storage_profile.image_reference.publisher,
sku=machine.storage_profile.image_reference.sku,
version=machine.storage_profile.image_reference.version
)
# Add windows details
if machine.os_profile is not None and machine.os_profile.windows_configuration is not None:
host_vars['ansible_connection'] = 'winrm'
host_vars['windows_auto_updates_enabled'] = \
machine.os_profile.windows_configuration.enable_automatic_updates
host_vars['windows_timezone'] = machine.os_profile.windows_configuration.time_zone
host_vars['windows_rm'] = None
if machine.os_profile.windows_configuration.win_rm is not None:
host_vars['windows_rm'] = dict(listeners=None)
if machine.os_profile.windows_configuration.win_rm.listeners is not None:
host_vars['windows_rm']['listeners'] = []
for listener in machine.os_profile.windows_configuration.win_rm.listeners:
host_vars['windows_rm']['listeners'].append(dict(protocol=listener.protocol.name,
certificate_url=listener.certificate_url))
for interface in machine.network_profile.network_interfaces:
interface_reference = self._parse_ref_id(interface.id)
network_interface = self._network_client.network_interfaces.get(
interface_reference['resourceGroups'],
interface_reference['networkInterfaces'])
if network_interface.primary:
if self.group_by_security_group and \
self._security_groups[resource_group].get(network_interface.id, None):
host_vars['security_group'] = \
self._security_groups[resource_group][network_interface.id]['name']
host_vars['security_group_id'] = \
self._security_groups[resource_group][network_interface.id]['id']
host_vars['network_interface'] = network_interface.name
host_vars['network_interface_id'] = network_interface.id
host_vars['mac_address'] = network_interface.mac_address
for ip_config in network_interface.ip_configurations:
host_vars['private_ip'] = ip_config.private_ip_address
host_vars['private_ip_alloc_method'] = ip_config.private_ip_allocation_method
if self.use_private_ip:
host_vars['ansible_host'] = ip_config.private_ip_address
if ip_config.public_ip_address:
public_ip_reference = self._parse_ref_id(ip_config.public_ip_address.id)
public_ip_address = self._network_client.public_ip_addresses.get(
public_ip_reference['resourceGroups'],
public_ip_reference['publicIPAddresses'])
if not self.use_private_ip:
host_vars['ansible_host'] = public_ip_address.ip_address
host_vars['public_ip'] = public_ip_address.ip_address
host_vars['public_ip_name'] = public_ip_address.name
host_vars['public_ip_alloc_method'] = public_ip_address.public_ip_allocation_method
host_vars['public_ip_id'] = public_ip_address.id
if public_ip_address.dns_settings:
host_vars['fqdn'] = public_ip_address.dns_settings.fqdn
self._add_host(host_vars)
def _selected_machines(self, virtual_machines):
selected_machines = []
for machine in virtual_machines:
if self._args.host and self._args.host == machine.name:
selected_machines.append(machine)
if self.tags and self._tags_match(machine.tags, self.tags):
selected_machines.append(machine)
if self.locations and machine.location in self.locations:
selected_machines.append(machine)
return selected_machines
def _get_security_groups(self, resource_group):
''' For a given resource_group build a mapping of network_interface.id to security_group name '''
if not self._security_groups:
self._security_groups = dict()
if not self._security_groups.get(resource_group):
self._security_groups[resource_group] = dict()
for group in self._network_client.network_security_groups.list(resource_group):
if group.network_interfaces:
for interface in group.network_interfaces:
self._security_groups[resource_group][interface.id] = dict(
name=group.name,
id=group.id
)
def _get_powerstate(self, resource_group, name):
try:
vm = self._compute_client.virtual_machines.get(resource_group,
name,
expand='instanceview')
except Exception as exc:
sys.exit("Error: fetching instanceview for host {0} - {1}".format(name, str(exc)))
return next((s.code.replace('PowerState/', '')
for s in vm.instance_view.statuses if s.code.startswith('PowerState')), None)
def _add_host(self, vars):
host_name = self._to_safe(vars['name'])
resource_group = self._to_safe(vars['resource_group'])
operating_system_type = self._to_safe(vars['os_disk']['operating_system_type'].lower())
security_group = None
if vars.get('security_group'):
security_group = self._to_safe(vars['security_group'])
if self.group_by_os_family:
if not self._inventory.get(operating_system_type):
self._inventory[operating_system_type] = []
self._inventory[operating_system_type].append(host_name)
if self.group_by_resource_group:
if not self._inventory.get(resource_group):
self._inventory[resource_group] = []
self._inventory[resource_group].append(host_name)
if self.group_by_location:
if not self._inventory.get(vars['location']):
self._inventory[vars['location']] = []
self._inventory[vars['location']].append(host_name)
if self.group_by_security_group and security_group:
if not self._inventory.get(security_group):
self._inventory[security_group] = []
self._inventory[security_group].append(host_name)
self._inventory['_meta']['hostvars'][host_name] = vars
self._inventory['azure'].append(host_name)
if self.group_by_tag and vars.get('tags'):
for key, value in vars['tags'].items():
safe_key = self._to_safe(key)
safe_value = safe_key + '_' + self._to_safe(value)
if not self._inventory.get(safe_key):
self._inventory[safe_key] = []
if not self._inventory.get(safe_value):
self._inventory[safe_value] = []
self._inventory[safe_key].append(host_name)
self._inventory[safe_value].append(host_name)
def _json_format_dict(self, pretty=False):
# convert inventory to json
if pretty:
return json.dumps(self._inventory, sort_keys=True, indent=2)
else:
return json.dumps(self._inventory)
def _get_settings(self):
# Load settings from the .ini, if it exists. Otherwise,
# look for environment values.
file_settings = self._load_settings()
if file_settings:
for key in AZURE_CONFIG_SETTINGS:
if key in ('resource_groups', 'tags', 'locations') and file_settings.get(key):
values = file_settings.get(key).split(',')
if len(values) > 0:
setattr(self, key, values)
elif file_settings.get(key):
val = self._to_boolean(file_settings[key])
setattr(self, key, val)
else:
env_settings = self._get_env_settings()
for key in AZURE_CONFIG_SETTINGS:
if key in('resource_groups', 'tags', 'locations') and env_settings.get(key):
values = env_settings.get(key).split(',')
if len(values) > 0:
setattr(self, key, values)
elif env_settings.get(key, None) is not None:
val = self._to_boolean(env_settings[key])
setattr(self, key, val)
def _parse_ref_id(self, reference):
response = {}
keys = reference.strip('/').split('/')
for index in range(len(keys)):
if index < len(keys) - 1 and index % 2 == 0:
response[keys[index]] = keys[index + 1]
return response
def _to_boolean(self, value):
if value in ['Yes', 'yes', 1, 'True', 'true', True]:
result = True
elif value in ['No', 'no', 0, 'False', 'false', False]:
result = False
else:
result = True
return result
def _get_env_settings(self):
env_settings = dict()
for attribute, env_variable in AZURE_CONFIG_SETTINGS.items():
env_settings[attribute] = os.environ.get(env_variable, None)
return env_settings
def _load_settings(self):
basename = os.path.splitext(os.path.basename(__file__))[0]
default_path = os.path.join(os.path.dirname(__file__), (basename + '.ini'))
path = os.path.expanduser(os.path.expandvars(os.environ.get('AZURE_INI_PATH', default_path)))
config = None
settings = None
try:
config = cp.ConfigParser()
config.read(path)
except Exception:
pass
if config is not None:
settings = dict()
for key in AZURE_CONFIG_SETTINGS:
try:
settings[key] = config.get('azure', key, raw=True)
except Exception:
pass
return settings
def _tags_match(self, tag_obj, tag_args):
'''
Return True if the tags object from a VM contains the requested tag values.
:param tag_obj: Dictionary of string:string pairs
:param tag_args: List of strings in the form key=value
:return: boolean
'''
if not tag_obj:
return False
matches = 0
for arg in tag_args:
arg_key = arg
arg_value = None
if re.search(r':', arg):
arg_key, arg_value = arg.split(':')
if arg_value and tag_obj.get(arg_key, None) == arg_value:
matches += 1
elif not arg_value and tag_obj.get(arg_key, None) is not None:
matches += 1
if matches == len(tag_args):
return True
return False
def _to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
regex = r"[^A-Za-z0-9\_"
if not self.replace_dash_in_groups:
regex += r"\-"
return re.sub(regex + "]", "_", word)
def main():
if not HAS_AZURE:
sys.exit("The Azure python sdk is not installed (try `pip install 'azure>={0}' --upgrade`) - {1}".format(AZURE_MIN_VERSION, HAS_AZURE_EXC))
AzureInventory()
if __name__ == '__main__':
main()

@ -1,39 +0,0 @@
# Copyright 2016 Doalitic.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# The Brook.io inventory script has the following dependencies:
# 1. A working Brook.io account
# See https://brook.io
# 2. A valid token generated through the 'API token' panel of Brook.io
# 3. The libbrook python libray.
# See https://github.com/doalitic/libbrook
#
# Author: Francisco Ros <fjros@doalitic.com>
[brook]
# Valid API token (required).
# E.g. 'Aed342a12A60433697281FeEe1a4037C'
#
api_token =
# Project id within Brook.io, as obtained from the project settings (optional). If provided, the
# generated inventory will just include the hosts that belong to such project. Otherwise, it will
# include all hosts in projects the requesting user has access to. The response includes groups
# 'project_x', being 'x' the project name.
# E.g. '2e8e099e1bc34cc0979d97ac34e9577b'
#
project_id =

@ -1,258 +0,0 @@
#!/usr/bin/env python
# Copyright 2016 Doalitic.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
Brook.io external inventory script
==================================
Generates inventory that Ansible can understand by making API requests to Brook.io via the libbrook
library. Hence, such dependency must be installed in the system to run this script.
The default configuration file is named 'brook.ini' and is located alongside this script. You can
choose any other file by setting the BROOK_INI_PATH environment variable.
If param 'project_id' is left blank in 'brook.ini', the inventory includes all the instances in
projects where the requesting user belongs. Otherwise, only instances from the given project are
included, provided the requesting user belongs to it.
The following variables are established for every host. They can be retrieved from the hostvars
dictionary.
- brook_pid: str
- brook_name: str
- brook_description: str
- brook_project: str
- brook_template: str
- brook_region: str
- brook_zone: str
- brook_status: str
- brook_tags: list(str)
- brook_internal_ips: list(str)
- brook_external_ips: list(str)
- brook_created_at
- brook_updated_at
- ansible_ssh_host
Instances are grouped by the following categories:
- tag:
A group is created for each tag. E.g. groups 'tag_foo' and 'tag_bar' are created if there exist
instances with tags 'foo' and/or 'bar'.
- project:
A group is created for each project. E.g. group 'project_test' is created if a project named
'test' exist.
- status:
A group is created for each instance state. E.g. groups 'status_RUNNING' and 'status_PENDING'
are created if there are instances in running and pending state.
Examples:
Execute uname on all instances in project 'test'
$ ansible -i brook.py project_test -m shell -a "/bin/uname -a"
Install nginx on all debian web servers tagged with 'www'
$ ansible -i brook.py tag_www -m apt -a "name=nginx state=present"
Run site.yml playbook on web servers
$ ansible-playbook -i brook.py site.yml -l tag_www
Support:
This script is tested on Python 2.7 and 3.4. It may work on other versions though.
Author: Francisco Ros <fjros@doalitic.com>
Version: 0.2
"""
import sys
import os
from ansible.module_utils.six.moves.configparser import SafeConfigParser as ConfigParser
import json
try:
import libbrook
except Exception:
sys.exit('Brook.io inventory script requires libbrook. See https://github.com/doalitic/libbrook')
class BrookInventory:
_API_ENDPOINT = 'https://api.brook.io'
def __init__(self):
self._configure_from_file()
self.client = self.get_api_client()
self.inventory = self.get_inventory()
def _configure_from_file(self):
"""Initialize from .ini file.
Configuration file is assumed to be named 'brook.ini' and to be located on the same
directory than this file, unless the environment variable BROOK_INI_PATH says otherwise.
"""
brook_ini_default_path = \
os.path.join(os.path.dirname(os.path.realpath(__file__)), 'brook.ini')
brook_ini_path = os.environ.get('BROOK_INI_PATH', brook_ini_default_path)
config = ConfigParser(defaults={
'api_token': '',
'project_id': ''
})
config.read(brook_ini_path)
self.api_token = config.get('brook', 'api_token')
self.project_id = config.get('brook', 'project_id')
if not self.api_token:
sys.exit('You must provide (at least) your Brook.io API token to generate the dynamic '
'inventory.')
def get_api_client(self):
"""Authenticate user via the provided credentials and return the corresponding API client.
"""
# Get JWT token from API token
#
unauthenticated_client = libbrook.ApiClient(host=self._API_ENDPOINT)
auth_api = libbrook.AuthApi(unauthenticated_client)
api_token = libbrook.AuthTokenRequest()
api_token.token = self.api_token
jwt = auth_api.auth_token(token=api_token)
# Create authenticated API client
#
return libbrook.ApiClient(host=self._API_ENDPOINT,
header_name='Authorization',
header_value='Bearer %s' % jwt.token)
def get_inventory(self):
"""Generate Ansible inventory.
"""
groups = dict()
meta = dict()
meta['hostvars'] = dict()
instances_api = libbrook.InstancesApi(self.client)
projects_api = libbrook.ProjectsApi(self.client)
templates_api = libbrook.TemplatesApi(self.client)
# If no project is given, get all projects the requesting user has access to
#
if not self.project_id:
projects = [project.id for project in projects_api.index_projects()]
else:
projects = [self.project_id]
# Build inventory from instances in all projects
#
for project_id in projects:
project = projects_api.show_project(project_id=project_id)
for instance in instances_api.index_instances(project_id=project_id):
# Get template used for this instance if known
template = templates_api.show_template(template_id=instance.template) if instance.template else None
# Update hostvars
try:
meta['hostvars'][instance.name] = \
self.hostvars(project, instance, template, instances_api)
except libbrook.rest.ApiException:
continue
# Group by project
project_group = 'project_%s' % project.name
if project_group in groups:
groups[project_group].append(instance.name)
else:
groups[project_group] = [instance.name]
# Group by status
status_group = 'status_%s' % meta['hostvars'][instance.name]['brook_status']
if status_group in groups:
groups[status_group].append(instance.name)
else:
groups[status_group] = [instance.name]
# Group by tags
tags = meta['hostvars'][instance.name]['brook_tags']
for tag in tags:
tag_group = 'tag_%s' % tag
if tag_group in groups:
groups[tag_group].append(instance.name)
else:
groups[tag_group] = [instance.name]
groups['_meta'] = meta
return groups
def hostvars(self, project, instance, template, api):
"""Return the hostvars dictionary for the given instance.
Raise libbrook.rest.ApiException if it cannot retrieve all required information from the
Brook.io API.
"""
hostvars = instance.to_dict()
hostvars['brook_pid'] = hostvars.pop('pid')
hostvars['brook_name'] = hostvars.pop('name')
hostvars['brook_description'] = hostvars.pop('description')
hostvars['brook_project'] = hostvars.pop('project')
hostvars['brook_template'] = hostvars.pop('template')
hostvars['brook_region'] = hostvars.pop('region')
hostvars['brook_zone'] = hostvars.pop('zone')
hostvars['brook_created_at'] = hostvars.pop('created_at')
hostvars['brook_updated_at'] = hostvars.pop('updated_at')
del hostvars['id']
del hostvars['key']
del hostvars['provider']
del hostvars['image']
# Substitute identifiers for names
#
hostvars['brook_project'] = project.name
hostvars['brook_template'] = template.name if template else None
# Retrieve instance state
#
status = api.status_instance(project_id=project.id, instance_id=instance.id)
hostvars.update({'brook_status': status.state})
# Retrieve instance tags
#
tags = api.instance_tags(project_id=project.id, instance_id=instance.id)
hostvars.update({'brook_tags': tags})
# Retrieve instance addresses
#
addresses = api.instance_addresses(project_id=project.id, instance_id=instance.id)
internal_ips = [address.address for address in addresses if address.scope == 'internal']
external_ips = [address.address for address in addresses
if address.address and address.scope == 'external']
hostvars.update({'brook_internal_ips': internal_ips})
hostvars.update({'brook_external_ips': external_ips})
try:
hostvars.update({'ansible_ssh_host': external_ips[0]})
except IndexError:
raise libbrook.rest.ApiException(status='502', reason='Instance without public IP')
return hostvars
# Run the script
#
brook = BrookInventory()
print(json.dumps(brook.inventory))

@ -1,40 +0,0 @@
[cloudforms]
# the version of CloudForms ; currently not used, but tested with
version = 4.1
# This should be the hostname of the CloudForms server
url = https://cfme.example.com
# This will more than likely need to be a local CloudForms username
username = <set your username here>
# The password for said username
password = <set your password here>
# True = verify SSL certificate / False = trust anything
ssl_verify = True
# limit the number of vms returned per request
limit = 100
# purge the CloudForms actions from hosts
purge_actions = True
# Clean up group names (from tags and other groupings so Ansible doesn't complain)
clean_group_keys = True
# Explode tags into nested groups / subgroups
nest_tags = False
# If set, ensure host name are suffixed with this value
# Note: This suffix *must* include the leading '.' as it is appended to the hostname as is
# suffix = .example.org
# If true, will try and use an IPv4 address for the ansible_ssh_host rather than just the first IP address in the list
prefer_ipv4 = False
[cache]
# Maximum time to trust the cache in seconds
max_age = 600

@ -1,483 +0,0 @@
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
#
# Copyright (C) 2016 Guido Günther <agx@sigxcpu.org>
#
# This script is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with it. If not, see <http://www.gnu.org/licenses/>.
#
# This is loosely based on the foreman inventory script
# -- Josh Preston <jpreston@redhat.com>
#
from __future__ import print_function
import argparse
from ansible.module_utils.six.moves import configparser as ConfigParser
import os
import re
from time import time
import requests
from requests.auth import HTTPBasicAuth
import warnings
from ansible.errors import AnsibleError
import json
class CloudFormsInventory(object):
def __init__(self):
"""
Main execution path
"""
self.inventory = dict() # A list of groups and the hosts in that group
self.hosts = dict() # Details about hosts in the inventory
# Parse CLI arguments
self.parse_cli_args()
# Read settings
self.read_settings()
# Cache
if self.args.refresh_cache or not self.is_cache_valid():
self.update_cache()
else:
self.load_inventory_from_cache()
self.load_hosts_from_cache()
data_to_print = ""
# Data to print
if self.args.host:
if self.args.debug:
print("Fetching host [%s]" % self.args.host)
data_to_print += self.get_host_info(self.args.host)
else:
self.inventory['_meta'] = {'hostvars': {}}
for hostname in self.hosts:
self.inventory['_meta']['hostvars'][hostname] = {
'cloudforms': self.hosts[hostname],
}
# include the ansible_ssh_host in the top level
if 'ansible_ssh_host' in self.hosts[hostname]:
self.inventory['_meta']['hostvars'][hostname]['ansible_ssh_host'] = self.hosts[hostname]['ansible_ssh_host']
data_to_print += self.json_format_dict(self.inventory, self.args.pretty)
print(data_to_print)
def is_cache_valid(self):
"""
Determines if the cache files have expired, or if it is still valid
"""
if self.args.debug:
print("Determining if cache [%s] is still valid (< %s seconds old)" % (self.cache_path_hosts, self.cache_max_age))
if os.path.isfile(self.cache_path_hosts):
mod_time = os.path.getmtime(self.cache_path_hosts)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_inventory):
if self.args.debug:
print("Cache is still valid!")
return True
if self.args.debug:
print("Cache is stale or does not exist.")
return False
def read_settings(self):
"""
Reads the settings from the cloudforms.ini file
"""
config = ConfigParser.SafeConfigParser()
config_paths = [
os.path.dirname(os.path.realpath(__file__)) + '/cloudforms.ini',
"/etc/ansible/cloudforms.ini",
]
env_value = os.environ.get('CLOUDFORMS_INI_PATH')
if env_value is not None:
config_paths.append(os.path.expanduser(os.path.expandvars(env_value)))
if self.args.debug:
for config_path in config_paths:
print("Reading from configuration file [%s]" % config_path)
config.read(config_paths)
# CloudForms API related
if config.has_option('cloudforms', 'url'):
self.cloudforms_url = config.get('cloudforms', 'url')
else:
self.cloudforms_url = None
if not self.cloudforms_url:
warnings.warn("No url specified, expected something like 'https://cfme.example.com'")
if config.has_option('cloudforms', 'username'):
self.cloudforms_username = config.get('cloudforms', 'username')
else:
self.cloudforms_username = None
if not self.cloudforms_username:
warnings.warn("No username specified, you need to specify a CloudForms username.")
if config.has_option('cloudforms', 'password'):
self.cloudforms_pw = config.get('cloudforms', 'password', raw=True)
else:
self.cloudforms_pw = None
if not self.cloudforms_pw:
warnings.warn("No password specified, you need to specify a password for the CloudForms user.")
if config.has_option('cloudforms', 'ssl_verify'):
self.cloudforms_ssl_verify = config.getboolean('cloudforms', 'ssl_verify')
else:
self.cloudforms_ssl_verify = True
if config.has_option('cloudforms', 'version'):
self.cloudforms_version = config.get('cloudforms', 'version')
else:
self.cloudforms_version = None
if config.has_option('cloudforms', 'limit'):
self.cloudforms_limit = config.getint('cloudforms', 'limit')
else:
self.cloudforms_limit = 100
if config.has_option('cloudforms', 'purge_actions'):
self.cloudforms_purge_actions = config.getboolean('cloudforms', 'purge_actions')
else:
self.cloudforms_purge_actions = True
if config.has_option('cloudforms', 'clean_group_keys'):
self.cloudforms_clean_group_keys = config.getboolean('cloudforms', 'clean_group_keys')
else:
self.cloudforms_clean_group_keys = True
if config.has_option('cloudforms', 'nest_tags'):
self.cloudforms_nest_tags = config.getboolean('cloudforms', 'nest_tags')
else:
self.cloudforms_nest_tags = False
if config.has_option('cloudforms', 'suffix'):
self.cloudforms_suffix = config.get('cloudforms', 'suffix')
if self.cloudforms_suffix[0] != '.':
raise AnsibleError('Leading fullstop is required for Cloudforms suffix')
else:
self.cloudforms_suffix = None
if config.has_option('cloudforms', 'prefer_ipv4'):
self.cloudforms_prefer_ipv4 = config.getboolean('cloudforms', 'prefer_ipv4')
else:
self.cloudforms_prefer_ipv4 = False
# Ansible related
try:
group_patterns = config.get('ansible', 'group_patterns')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
group_patterns = "[]"
self.group_patterns = eval(group_patterns)
# Cache related
try:
cache_path = os.path.expanduser(config.get('cache', 'path'))
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
cache_path = '.'
(script, ext) = os.path.splitext(os.path.basename(__file__))
self.cache_path_hosts = cache_path + "/%s.hosts" % script
self.cache_path_inventory = cache_path + "/%s.inventory" % script
self.cache_max_age = config.getint('cache', 'max_age')
if self.args.debug:
print("CloudForms settings:")
print("cloudforms_url = %s" % self.cloudforms_url)
print("cloudforms_username = %s" % self.cloudforms_username)
print("cloudforms_pw = %s" % self.cloudforms_pw)
print("cloudforms_ssl_verify = %s" % self.cloudforms_ssl_verify)
print("cloudforms_version = %s" % self.cloudforms_version)
print("cloudforms_limit = %s" % self.cloudforms_limit)
print("cloudforms_purge_actions = %s" % self.cloudforms_purge_actions)
print("Cache settings:")
print("cache_max_age = %s" % self.cache_max_age)
print("cache_path_hosts = %s" % self.cache_path_hosts)
print("cache_path_inventory = %s" % self.cache_path_inventory)
def parse_cli_args(self):
"""
Command line argument processing
"""
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on CloudForms managed VMs')
parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
parser.add_argument('--pretty', action='store_true', default=False, help='Pretty print JSON output (default: False)')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to CloudForms (default: False - use cache files)')
parser.add_argument('--debug', action='store_true', default=False, help='Show debug output while running (default: False)')
self.args = parser.parse_args()
def _get_json(self, url):
"""
Make a request and return the JSON
"""
results = []
ret = requests.get(url,
auth=HTTPBasicAuth(self.cloudforms_username, self.cloudforms_pw),
verify=self.cloudforms_ssl_verify)
ret.raise_for_status()
try:
results = json.loads(ret.text)
except ValueError:
warnings.warn("Unexpected response from {0} ({1}): {2}".format(self.cloudforms_url, ret.status_code, ret.reason))
results = {}
if self.args.debug:
print("=======================================================================")
print("=======================================================================")
print("=======================================================================")
print(ret.text)
print("=======================================================================")
print("=======================================================================")
print("=======================================================================")
return results
def _get_hosts(self):
"""
Get all hosts by paging through the results
"""
limit = self.cloudforms_limit
page = 0
last_page = False
results = []
while not last_page:
offset = page * limit
ret = self._get_json("%s/api/vms?offset=%s&limit=%s&expand=resources,tags,hosts,&attributes=ipaddresses" % (self.cloudforms_url, offset, limit))
results += ret['resources']
if ret['subcount'] < limit:
last_page = True
page += 1
return results
def update_cache(self):
"""
Make calls to cloudforms and save the output in a cache
"""
self.groups = dict()
self.hosts = dict()
if self.args.debug:
print("Updating cache...")
for host in self._get_hosts():
if self.cloudforms_suffix is not None and not host['name'].endswith(self.cloudforms_suffix):
host['name'] = host['name'] + self.cloudforms_suffix
# Ignore VMs that are not powered on
if host['power_state'] != 'on':
if self.args.debug:
print("Skipping %s because power_state = %s" % (host['name'], host['power_state']))
continue
# purge actions
if self.cloudforms_purge_actions and 'actions' in host:
del host['actions']
# Create ansible groups for tags
if 'tags' in host:
# Create top-level group
if 'tags' not in self.inventory:
self.inventory['tags'] = dict(children=[], vars={}, hosts=[])
if not self.cloudforms_nest_tags:
# don't expand tags, just use them in a safe way
for group in host['tags']:
# Add sub-group, as a child of top-level
safe_key = self.to_safe(group['name'])
if safe_key:
if self.args.debug:
print("Adding sub-group '%s' to parent 'tags'" % safe_key)
if safe_key not in self.inventory['tags']['children']:
self.push(self.inventory['tags'], 'children', safe_key)
self.push(self.inventory, safe_key, host['name'])
if self.args.debug:
print("Found tag [%s] for host which will be mapped to [%s]" % (group['name'], safe_key))
else:
# expand the tags into nested groups / sub-groups
# Create nested groups for tags
safe_parent_tag_name = 'tags'
for tag in host['tags']:
tag_hierarchy = tag['name'][1:].split('/')
if self.args.debug:
print("Working on list %s" % tag_hierarchy)
for tag_name in tag_hierarchy:
if self.args.debug:
print("Working on tag_name = %s" % tag_name)
safe_tag_name = self.to_safe(tag_name)
if self.args.debug:
print("Using sanitized name %s" % safe_tag_name)
# Create sub-group
if safe_tag_name not in self.inventory:
self.inventory[safe_tag_name] = dict(children=[], vars={}, hosts=[])
# Add sub-group, as a child of top-level
if safe_parent_tag_name:
if self.args.debug:
print("Adding sub-group '%s' to parent '%s'" % (safe_tag_name, safe_parent_tag_name))
if safe_tag_name not in self.inventory[safe_parent_tag_name]['children']:
self.push(self.inventory[safe_parent_tag_name], 'children', safe_tag_name)
# Make sure the next one uses this one as it's parent
safe_parent_tag_name = safe_tag_name
# Add the host to the last tag
self.push(self.inventory[safe_parent_tag_name], 'hosts', host['name'])
# Set ansible_ssh_host to the first available ip address
if 'ipaddresses' in host and host['ipaddresses'] and isinstance(host['ipaddresses'], list):
# If no preference for IPv4, just use the first entry
if not self.cloudforms_prefer_ipv4:
host['ansible_ssh_host'] = host['ipaddresses'][0]
else:
# Before we search for an IPv4 address, set using the first entry in case we don't find any
host['ansible_ssh_host'] = host['ipaddresses'][0]
for currenthost in host['ipaddresses']:
if '.' in currenthost:
host['ansible_ssh_host'] = currenthost
# Create additional groups
for key in ('location', 'type', 'vendor'):
safe_key = self.to_safe(host[key])
# Create top-level group
if key not in self.inventory:
self.inventory[key] = dict(children=[], vars={}, hosts=[])
# Create sub-group
if safe_key not in self.inventory:
self.inventory[safe_key] = dict(children=[], vars={}, hosts=[])
# Add sub-group, as a child of top-level
if safe_key not in self.inventory[key]['children']:
self.push(self.inventory[key], 'children', safe_key)
if key in host:
# Add host to sub-group
self.push(self.inventory[safe_key], 'hosts', host['name'])
self.hosts[host['name']] = host
self.push(self.inventory, 'all', host['name'])
if self.args.debug:
print("Saving cached data")
self.write_to_cache(self.hosts, self.cache_path_hosts)
self.write_to_cache(self.inventory, self.cache_path_inventory)
def get_host_info(self, host):
"""
Get variables about a specific host
"""
if not self.hosts or len(self.hosts) == 0:
# Need to load cache from cache
self.load_hosts_from_cache()
if host not in self.hosts:
if self.args.debug:
print("[%s] not found in cache." % host)
# try updating the cache
self.update_cache()
if host not in self.hosts:
if self.args.debug:
print("[%s] does not exist after cache update." % host)
# host might not exist anymore
return self.json_format_dict({}, self.args.pretty)
return self.json_format_dict(self.hosts[host], self.args.pretty)
def push(self, d, k, v):
"""
Safely puts a new entry onto an array.
"""
if k in d:
d[k].append(v)
else:
d[k] = [v]
def load_inventory_from_cache(self):
"""
Reads the inventory from the cache file sets self.inventory
"""
cache = open(self.cache_path_inventory, 'r')
json_inventory = cache.read()
self.inventory = json.loads(json_inventory)
def load_hosts_from_cache(self):
"""
Reads the cache from the cache file sets self.hosts
"""
cache = open(self.cache_path_hosts, 'r')
json_cache = cache.read()
self.hosts = json.loads(json_cache)
def write_to_cache(self, data, filename):
"""
Writes data in JSON format to a file
"""
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def to_safe(self, word):
"""
Converts 'bad' characters in a string to underscores so they can be used as Ansible groups
"""
if self.cloudforms_clean_group_keys:
regex = r"[^A-Za-z0-9\_]"
return re.sub(regex, "_", word.replace(" ", ""))
else:
return word
def json_format_dict(self, data, pretty=False):
"""
Converts a dict to a JSON object and dumps it as a formatted string
"""
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
CloudFormsInventory()

@ -1,5 +0,0 @@
[cloudstack]
#endpoint = https://api.exoscale.ch/compute
endpoint = https://cloud.example.com/client/api
key = cloudstack api key
secret = cloudstack api secret

@ -1,277 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
"""
Ansible CloudStack external inventory script.
=============================================
Generates Ansible inventory from CloudStack. Configuration is read from
'cloudstack.ini'. If you need to pass the project, write a simple wrapper
script, e.g. project_cloudstack.sh:
#!/bin/bash
cloudstack.py --project <your_project> $@
When run against a specific host, this script returns the following attributes
based on the data obtained from CloudStack API:
"web01": {
"cpu_number": 2,
"nic": [
{
"ip": "10.102.76.98",
"mac": "02:00:50:99:00:01",
"type": "Isolated",
"netmask": "255.255.255.0",
"gateway": "10.102.76.1"
},
{
"ip": "10.102.138.63",
"mac": "06:b7:5a:00:14:84",
"type": "Shared",
"netmask": "255.255.255.0",
"gateway": "10.102.138.1"
}
],
"default_ip": "10.102.76.98",
"zone": "ZUERICH",
"created": "2014-07-02T07:53:50+0200",
"hypervisor": "VMware",
"memory": 2048,
"state": "Running",
"tags": [],
"cpu_speed": 1800,
"affinity_group": [],
"service_offering": "Small",
"cpu_used": "62%"
}
usage: cloudstack.py [--list] [--host HOST] [--project PROJECT] [--domain DOMAIN]
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import sys
import argparse
import json
try:
from cs import CloudStack, CloudStackException, read_config
except ImportError:
print("Error: CloudStack library must be installed: pip install cs.",
file=sys.stderr)
sys.exit(1)
class CloudStackInventory(object):
def __init__(self):
parser = argparse.ArgumentParser()
parser.add_argument('--host')
parser.add_argument('--list', action='store_true')
parser.add_argument('--tag', help="Filter machines by a tag. Should be in the form key=value.")
parser.add_argument('--project')
parser.add_argument('--domain')
options = parser.parse_args()
try:
self.cs = CloudStack(**read_config())
except CloudStackException:
print("Error: Could not connect to CloudStack API", file=sys.stderr)
domain_id = None
if options.domain:
domain_id = self.get_domain_id(options.domain)
project_id = None
if options.project:
project_id = self.get_project_id(options.project, domain_id)
if options.host:
data = self.get_host(options.host, project_id, domain_id)
print(json.dumps(data, indent=2))
elif options.list:
tags = dict()
if options.tag:
tags['tags[0].key'], tags['tags[0].value'] = options.tag.split('=')
data = self.get_list(project_id, domain_id, **tags)
print(json.dumps(data, indent=2))
else:
print("usage: --list [--tag <tag>] | --host <hostname> [--project <project>] [--domain <domain_path>]",
file=sys.stderr)
sys.exit(1)
def get_domain_id(self, domain):
domains = self.cs.listDomains(listall=True)
if domains:
for d in domains['domain']:
if d['path'].lower() == domain.lower():
return d['id']
print("Error: Domain %s not found." % domain, file=sys.stderr)
sys.exit(1)
def get_project_id(self, project, domain_id=None):
projects = self.cs.listProjects(domainid=domain_id)
if projects:
for p in projects['project']:
if p['name'] == project or p['id'] == project:
return p['id']
print("Error: Project %s not found." % project, file=sys.stderr)
sys.exit(1)
def get_host(self, name, project_id=None, domain_id=None, **kwargs):
hosts = self.cs.listVirtualMachines(projectid=project_id, domainid=domain_id, fetch_list=True, **kwargs)
data = {}
if not hosts:
return data
for host in hosts:
host_name = host['displayname']
if name == host_name:
data['zone'] = host['zonename']
if 'group' in host:
data['group'] = host['group']
data['state'] = host['state']
data['service_offering'] = host['serviceofferingname']
data['affinity_group'] = host['affinitygroup']
data['security_group'] = host['securitygroup']
data['cpu_number'] = host['cpunumber']
if 'cpu_speed' in host:
data['cpu_speed'] = host['cpuspeed']
if 'cpuused' in host:
data['cpu_used'] = host['cpuused']
data['memory'] = host['memory']
data['tags'] = host['tags']
if 'hypervisor' in host:
data['hypervisor'] = host['hypervisor']
data['created'] = host['created']
data['nic'] = []
for nic in host['nic']:
nicdata = {
'ip': nic['ipaddress'],
'mac': nic['macaddress'],
'netmask': nic['netmask'],
'gateway': nic['gateway'],
'type': nic['type'],
}
if 'ip6address' in nic:
nicdata['ip6'] = nic['ip6address']
if 'gateway' in nic:
nicdata['gateway'] = nic['gateway']
if 'netmask' in nic:
nicdata['netmask'] = nic['netmask']
data['nic'].append(nicdata)
if nic['isdefault']:
data['default_ip'] = nic['ipaddress']
if 'ip6address' in nic:
data['default_ip6'] = nic['ip6address']
break
return data
def get_list(self, project_id=None, domain_id=None, **kwargs):
data = {
'all': {
'hosts': [],
},
'_meta': {
'hostvars': {},
},
}
groups = self.cs.listInstanceGroups(projectid=project_id, domainid=domain_id)
if groups:
for group in groups['instancegroup']:
group_name = group['name']
if group_name and group_name not in data:
data[group_name] = {
'hosts': []
}
hosts = self.cs.listVirtualMachines(projectid=project_id, domainid=domain_id, fetch_list=True, **kwargs)
if not hosts:
return data
for host in hosts:
host_name = host['displayname']
data['all']['hosts'].append(host_name)
data['_meta']['hostvars'][host_name] = {}
# Make a group per zone
data['_meta']['hostvars'][host_name]['zone'] = host['zonename']
group_name = host['zonename']
if group_name not in data:
data[group_name] = {
'hosts': []
}
data[group_name]['hosts'].append(host_name)
if 'group' in host:
data['_meta']['hostvars'][host_name]['group'] = host['group']
data['_meta']['hostvars'][host_name]['state'] = host['state']
data['_meta']['hostvars'][host_name]['service_offering'] = host['serviceofferingname']
data['_meta']['hostvars'][host_name]['affinity_group'] = host['affinitygroup']
data['_meta']['hostvars'][host_name]['security_group'] = host['securitygroup']
data['_meta']['hostvars'][host_name]['cpu_number'] = host['cpunumber']
if 'cpuspeed' in host:
data['_meta']['hostvars'][host_name]['cpu_speed'] = host['cpuspeed']
if 'cpuused' in host:
data['_meta']['hostvars'][host_name]['cpu_used'] = host['cpuused']
data['_meta']['hostvars'][host_name]['created'] = host['created']
data['_meta']['hostvars'][host_name]['memory'] = host['memory']
data['_meta']['hostvars'][host_name]['tags'] = host['tags']
if 'hypervisor' in host:
data['_meta']['hostvars'][host_name]['hypervisor'] = host['hypervisor']
data['_meta']['hostvars'][host_name]['created'] = host['created']
data['_meta']['hostvars'][host_name]['nic'] = []
for nic in host['nic']:
nicdata = {
'ip': nic['ipaddress'],
'mac': nic['macaddress'],
'netmask': nic['netmask'],
'gateway': nic['gateway'],
'type': nic['type'],
}
if 'ip6address' in nic:
nicdata['ip6'] = nic['ip6address']
if 'gateway' in nic:
nicdata['gateway'] = nic['gateway']
if 'netmask' in nic:
nicdata['netmask'] = nic['netmask']
data['_meta']['hostvars'][host_name]['nic'].append(nicdata)
if nic['isdefault']:
data['_meta']['hostvars'][host_name]['default_ip'] = nic['ipaddress']
if 'ip6address' in nic:
data['_meta']['hostvars'][host_name]['default_ip6'] = nic['ip6address']
group_name = ''
if 'group' in host:
group_name = host['group']
if group_name and group_name in data:
data[group_name]['hosts'].append(host_name)
return data
if __name__ == '__main__':
CloudStackInventory()

@ -1,24 +0,0 @@
# Ansible Cobbler external inventory script settings
#
[cobbler]
host = http://PATH_TO_COBBLER_SERVER/cobbler_api
# If API needs authentication add 'username' and 'password' options here.
#username = foo
#password = bar
# API calls to Cobbler can be slow. For this reason, we cache the results of an API
# call. Set this to the path you want cache files to be written to. Two files
# will be written to this directory:
# - ansible-cobbler.cache
# - ansible-cobbler.index
cache_path = /tmp
# The number of seconds a cache file is considered valid. After this many
# seconds, a new API call will be made, and the cache file will be updated.
cache_max_age = 900

@ -1,312 +0,0 @@
#!/usr/bin/env python
"""
Cobbler external inventory script
=================================
Ansible has a feature where instead of reading from /etc/ansible/hosts
as a text file, it can query external programs to obtain the list
of hosts, groups the hosts are in, and even variables to assign to each host.
To use this, copy this file over /etc/ansible/hosts and chmod +x the file.
This, more or less, allows you to keep one central database containing
info about all of your managed instances.
This script is an example of sourcing that data from Cobbler
(https://cobbler.github.io). With cobbler each --mgmt-class in cobbler
will correspond to a group in Ansible, and --ks-meta variables will be
passed down for use in templates or even in argument lines.
NOTE: The cobbler system names will not be used. Make sure a
cobbler --dns-name is set for each cobbler system. If a system
appears with two DNS names we do not add it twice because we don't want
ansible talking to it twice. The first one found will be used. If no
--dns-name is set the system will NOT be visible to ansible. We do
not add cobbler system names because there is no requirement in cobbler
that those correspond to addresses.
Tested with Cobbler 2.0.11.
Changelog:
- 2015-06-21 dmccue: Modified to support run-once _meta retrieval, results in
higher performance at ansible startup. Groups are determined by owner rather than
default mgmt_classes. DNS name determined from hostname. cobbler values are written
to a 'cobbler' fact namespace
- 2013-09-01 pgehres: Refactored implementation to make use of caching and to
limit the number of connections to external cobbler server for performance.
Added use of cobbler.ini file to configure settings. Tested with Cobbler 2.4.0
"""
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
######################################################################
import argparse
import os
import re
from time import time
import xmlrpclib
import json
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import configparser as ConfigParser
# NOTE -- this file assumes Ansible is being accessed FROM the cobbler
# server, so it does not attempt to login with a username and password.
# this will be addressed in a future version of this script.
orderby_keyname = 'owners' # alternatively 'mgmt_classes'
class CobblerInventory(object):
def __init__(self):
""" Main execution path """
self.conn = None
self.inventory = dict() # A list of groups and the hosts in that group
self.cache = dict() # Details about hosts in the inventory
self.ignore_settings = False # used to only look at env vars for settings.
# Read env vars, read settings, and parse CLI arguments
self.parse_env_vars()
self.read_settings()
self.parse_cli_args()
# Cache
if self.args.refresh_cache:
self.update_cache()
elif not self.is_cache_valid():
self.update_cache()
else:
self.load_inventory_from_cache()
self.load_cache_from_cache()
data_to_print = ""
# Data to print
if self.args.host:
data_to_print += self.get_host_info()
else:
self.inventory['_meta'] = {'hostvars': {}}
for hostname in self.cache:
self.inventory['_meta']['hostvars'][hostname] = {'cobbler': self.cache[hostname]}
data_to_print += self.json_format_dict(self.inventory, True)
print(data_to_print)
def _connect(self):
if not self.conn:
self.conn = xmlrpclib.Server(self.cobbler_host, allow_none=True)
self.token = None
if self.cobbler_username is not None:
self.token = self.conn.login(self.cobbler_username, self.cobbler_password)
def is_cache_valid(self):
""" Determines if the cache files have expired, or if it is still valid """
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_inventory):
return True
return False
def read_settings(self):
""" Reads the settings from the cobbler.ini file """
if(self.ignore_settings):
return
config = ConfigParser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/cobbler.ini')
self.cobbler_host = config.get('cobbler', 'host')
self.cobbler_username = None
self.cobbler_password = None
if config.has_option('cobbler', 'username'):
self.cobbler_username = config.get('cobbler', 'username')
if config.has_option('cobbler', 'password'):
self.cobbler_password = config.get('cobbler', 'password')
# Cache related
cache_path = config.get('cobbler', 'cache_path')
self.cache_path_cache = cache_path + "/ansible-cobbler.cache"
self.cache_path_inventory = cache_path + "/ansible-cobbler.index"
self.cache_max_age = config.getint('cobbler', 'cache_max_age')
def parse_env_vars(self):
""" Reads the settings from the environment """
# Env. Vars:
# COBBLER_host
# COBBLER_username
# COBBLER_password
# COBBLER_cache_path
# COBBLER_cache_max_age
# COBBLER_ignore_settings
self.cobbler_host = os.getenv('COBBLER_host', None)
self.cobbler_username = os.getenv('COBBLER_username', None)
self.cobbler_password = os.getenv('COBBLER_password', None)
# Cache related
cache_path = os.getenv('COBBLER_cache_path', None)
if(cache_path is not None):
self.cache_path_cache = cache_path + "/ansible-cobbler.cache"
self.cache_path_inventory = cache_path + "/ansible-cobbler.index"
self.cache_max_age = int(os.getenv('COBBLER_cache_max_age', "30"))
# ignore_settings is used to ignore the settings file, for use in Ansible
# Tower (or AWX inventory scripts and not throw python exceptions.)
if(os.getenv('COBBLER_ignore_settings', False) == "True"):
self.ignore_settings = True
def parse_cli_args(self):
""" Command line argument processing """
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Cobbler')
parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to cobbler (default: False - use cache files)')
self.args = parser.parse_args()
def update_cache(self):
""" Make calls to cobbler and save the output in a cache """
self._connect()
self.groups = dict()
self.hosts = dict()
if self.token is not None:
data = self.conn.get_systems(self.token)
else:
data = self.conn.get_systems()
for host in data:
# Get the FQDN for the host and add it to the right groups
dns_name = host['hostname'] # None
ksmeta = None
interfaces = host['interfaces']
# hostname is often empty for non-static IP hosts
if dns_name == '':
for (iname, ivalue) in iteritems(interfaces):
if ivalue['management'] or not ivalue['static']:
this_dns_name = ivalue.get('dns_name', None)
dns_name = this_dns_name if this_dns_name else ''
if dns_name == '' or dns_name is None:
continue
status = host['status']
profile = host['profile']
classes = host[orderby_keyname]
if status not in self.inventory:
self.inventory[status] = []
self.inventory[status].append(dns_name)
if profile not in self.inventory:
self.inventory[profile] = []
self.inventory[profile].append(dns_name)
for cls in classes:
if cls not in self.inventory:
self.inventory[cls] = []
self.inventory[cls].append(dns_name)
# Since we already have all of the data for the host, update the host details as well
# The old way was ksmeta only -- provide backwards compatibility
self.cache[dns_name] = host
if "ks_meta" in host:
for key, value in iteritems(host["ks_meta"]):
self.cache[dns_name][key] = value
self.write_to_cache(self.cache, self.cache_path_cache)
self.write_to_cache(self.inventory, self.cache_path_inventory)
def get_host_info(self):
""" Get variables about a specific host """
if not self.cache or len(self.cache) == 0:
# Need to load index from cache
self.load_cache_from_cache()
if self.args.host not in self.cache:
# try updating the cache
self.update_cache()
if self.args.host not in self.cache:
# host might not exist anymore
return self.json_format_dict({}, True)
return self.json_format_dict(self.cache[self.args.host], True)
def push(self, my_dict, key, element):
""" Pushed an element onto an array that may not have been defined in the dict """
if key in my_dict:
my_dict[key].append(element)
else:
my_dict[key] = [element]
def load_inventory_from_cache(self):
""" Reads the index from the cache file sets self.index """
cache = open(self.cache_path_inventory, 'r')
json_inventory = cache.read()
self.inventory = json.loads(json_inventory)
def load_cache_from_cache(self):
""" Reads the cache from the cache file sets self.cache """
cache = open(self.cache_path_cache, 'r')
json_cache = cache.read()
self.cache = json.loads(json_cache)
def write_to_cache(self, data, filename):
""" Writes data in JSON format to a file """
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def to_safe(self, word):
""" Converts 'bad' characters in a string to underscores so they can be used as Ansible groups """
return re.sub(r"[^A-Za-z0-9\-]", "_", word)
def json_format_dict(self, data, pretty=False):
""" Converts a dict to a JSON object and dumps it as a formatted string """
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
CobblerInventory()

@ -1,57 +0,0 @@
# Ansible Collins external inventory script settings
#
[collins]
# You should not have a trailing slash or collins
# will not properly match the URI
host = http://localhost:9000
username = blake
password = admin:first
# Specifies a timeout for all HTTP requests to Collins.
timeout_secs = 120
# Specifies a maximum number of retries per Collins request.
max_retries = 5
# Specifies the number of results to return per paginated query as specified in
# the Pagination section of the Collins API docs:
# http://tumblr.github.io/collins/api.html
results_per_query = 100
# Specifies the Collins asset type which will be queried for; most typically
# you'll want to leave this at the default of SERVER_NODE.
asset_type = SERVER_NODE
# Collins assets can optionally be assigned hostnames; this option will preference
# the selection of an asset's hostname over an IP address as the primary identifier
# in the Ansible inventory. Typically, this value should be set to true if assets
# are assigned hostnames.
prefer_hostnames = true
# Within Collins, assets can be granted multiple IP addresses; this configuration
# value specifies the index within the 'ADDRESSES' array as returned by the
# following API endpoint:
# http://tumblr.github.io/collins/api.html#api-ipam-asset-addresses-section
ip_address_index = 0
# Sets whether Collins instances in multiple datacenters will be queried.
query_remote_dcs = false
# API calls to Collins can involve large, substantial queries. For this reason,
# we cache the results of an API call. Set this to the path you want cache files
# to be written to. Two files will be written to this directory:
# - ansible-collins.cache
# - ansible-collins.index
cache_path = /tmp
# If errors occur while querying inventory, logging messages will be written
# to a logfile in the specified directory:
# - ansible-collins.log
log_path = /tmp
# The number of seconds that a cache file is considered valid. After this many
# seconds, a new API call will be made, and the cache file will be updated.
cache_max_age = 600

@ -1,439 +0,0 @@
#!/usr/bin/env python
"""
Collins external inventory script
=================================
Ansible has a feature where instead of reading from /etc/ansible/hosts
as a text file, it can query external programs to obtain the list
of hosts, groups the hosts are in, and even variables to assign to each host.
Collins is a hardware asset management system originally developed by
Tumblr for tracking new hardware as it built out its own datacenters. It
exposes a rich API for manipulating and querying one's hardware inventory,
which makes it an ideal 'single point of truth' for driving systems
automation like Ansible. Extensive documentation on Collins, including a quickstart,
API docs, and a full reference manual, can be found here:
http://tumblr.github.io/collins
This script adds support to Ansible for obtaining a dynamic inventory of
assets in your infrastructure, grouping them in Ansible by their useful attributes,
and binding all facts provided by Collins to each host so that they can be used to
drive automation. Some parts of this script were cribbed shamelessly from mdehaan's
Cobbler inventory script.
To use it, copy it to your repo and pass -i <collins script> to the ansible or
ansible-playbook command; if you'd like to use it by default, simply copy collins.ini
to /etc/ansible and this script to /etc/ansible/hosts.
Alongside the options set in collins.ini, there are several environment variables
that will be used instead of the configured values if they are set:
- COLLINS_USERNAME - specifies a username to use for Collins authentication
- COLLINS_PASSWORD - specifies a password to use for Collins authentication
- COLLINS_ASSET_TYPE - specifies a Collins asset type to use during querying;
this can be used to run Ansible automation against different asset classes than
server nodes, such as network switches and PDUs
- COLLINS_CONFIG - specifies an alternative location for collins.ini, defaults to
<location of collins.py>/collins.ini
If errors are encountered during operation, this script will return an exit code of
255; otherwise, it will return an exit code of 0.
Collins attributes are accessible as variables in ansible via the COLLINS['attribute_name'].
Tested against Ansible 1.8.2 and Collins 1.3.0.
"""
# (c) 2014, Steve Salevan <steve.salevan@gmail.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import argparse
import logging
import os
import re
import sys
from time import time
import traceback
import json
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import configparser as ConfigParser
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.module_utils.urls import open_url
class CollinsDefaults(object):
ASSETS_API_ENDPOINT = '%s/api/assets'
SPECIAL_ATTRIBUTES = set([
'CREATED',
'DELETED',
'UPDATED',
'STATE',
])
LOG_FORMAT = '%(asctime)-15s %(message)s'
class Error(Exception):
pass
class MaxRetriesError(Error):
pass
class CollinsInventory(object):
def __init__(self):
""" Constructs CollinsInventory object and reads all configuration. """
self.inventory = dict() # A list of groups and the hosts in that group
self.cache = dict() # Details about hosts in the inventory
# Read settings and parse CLI arguments
self.read_settings()
self.parse_cli_args()
logging.basicConfig(format=CollinsDefaults.LOG_FORMAT,
filename=self.log_location)
self.log = logging.getLogger('CollinsInventory')
def _asset_get_attribute(self, asset, attrib):
""" Returns a user-defined attribute from an asset if it exists; otherwise,
returns None. """
if 'ATTRIBS' in asset:
for attrib_block in asset['ATTRIBS'].keys():
if attrib in asset['ATTRIBS'][attrib_block]:
return asset['ATTRIBS'][attrib_block][attrib]
return None
def _asset_has_attribute(self, asset, attrib):
""" Returns whether a user-defined attribute is present on an asset. """
if 'ATTRIBS' in asset:
for attrib_block in asset['ATTRIBS'].keys():
if attrib in asset['ATTRIBS'][attrib_block]:
return True
return False
def run(self):
""" Main execution path """
# Updates cache if cache is not present or has expired.
successful = True
if self.args.refresh_cache:
successful = self.update_cache()
elif not self.is_cache_valid():
successful = self.update_cache()
else:
successful = self.load_inventory_from_cache()
successful &= self.load_cache_from_cache()
data_to_print = ""
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of instances for inventory
data_to_print = self.json_format_dict(self.inventory, self.args.pretty)
else: # default action with no options
data_to_print = self.json_format_dict(self.inventory, self.args.pretty)
print(data_to_print)
return successful
def find_assets(self, attributes=None, operation='AND'):
""" Obtains Collins assets matching the provided attributes. """
attributes = {} if attributes is None else attributes
# Formats asset search query to locate assets matching attributes, using
# the CQL search feature as described here:
# http://tumblr.github.io/collins/recipes.html
attributes_query = ['='.join(attr_pair) for attr_pair in iteritems(attributes)]
query_parameters = {
'details': ['True'],
'operation': [operation],
'query': attributes_query,
'remoteLookup': [str(self.query_remote_dcs)],
'size': [self.results_per_query],
'type': [self.collins_asset_type],
}
assets = []
cur_page = 0
num_retries = 0
# Locates all assets matching the provided query, exhausting pagination.
while True:
if num_retries == self.collins_max_retries:
raise MaxRetriesError("Maximum of %s retries reached; giving up" % self.collins_max_retries)
query_parameters['page'] = cur_page
query_url = "%s?%s" % (
(CollinsDefaults.ASSETS_API_ENDPOINT % self.collins_host),
urlencode(query_parameters, doseq=True)
)
try:
response = open_url(query_url,
timeout=self.collins_timeout_secs,
url_username=self.collins_username,
url_password=self.collins_password,
force_basic_auth=True)
json_response = json.loads(response.read())
# Adds any assets found to the array of assets.
assets += json_response['data']['Data']
# If we've retrieved all of our assets, breaks out of the loop.
if len(json_response['data']['Data']) == 0:
break
cur_page += 1
num_retries = 0
except Exception:
self.log.error("Error while communicating with Collins, retrying:\n%s", traceback.format_exc())
num_retries += 1
return assets
def is_cache_valid(self):
""" Determines if the cache files have expired, or if it is still valid """
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_inventory):
return True
return False
def read_settings(self):
""" Reads the settings from the collins.ini file """
config_loc = os.getenv('COLLINS_CONFIG', os.path.dirname(os.path.realpath(__file__)) + '/collins.ini')
config = ConfigParser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/collins.ini')
self.collins_host = config.get('collins', 'host')
self.collins_username = os.getenv('COLLINS_USERNAME', config.get('collins', 'username'))
self.collins_password = os.getenv('COLLINS_PASSWORD', config.get('collins', 'password'))
self.collins_asset_type = os.getenv('COLLINS_ASSET_TYPE', config.get('collins', 'asset_type'))
self.collins_timeout_secs = config.getint('collins', 'timeout_secs')
self.collins_max_retries = config.getint('collins', 'max_retries')
self.results_per_query = config.getint('collins', 'results_per_query')
self.ip_address_index = config.getint('collins', 'ip_address_index')
self.query_remote_dcs = config.getboolean('collins', 'query_remote_dcs')
self.prefer_hostnames = config.getboolean('collins', 'prefer_hostnames')
cache_path = config.get('collins', 'cache_path')
self.cache_path_cache = cache_path + \
'/ansible-collins-%s.cache' % self.collins_asset_type
self.cache_path_inventory = cache_path + \
'/ansible-collins-%s.index' % self.collins_asset_type
self.cache_max_age = config.getint('collins', 'cache_max_age')
log_path = config.get('collins', 'log_path')
self.log_location = log_path + '/ansible-collins.log'
def parse_cli_args(self):
""" Command line argument processing """
parser = argparse.ArgumentParser(
description='Produces an Ansible Inventory file based on Collins')
parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to Collins '
'(default: False - use cache files)')
parser.add_argument('--pretty', action='store_true', default=False, help='Pretty print all JSON output')
self.args = parser.parse_args()
def update_cache(self):
""" Make calls to Collins and saves the output in a cache """
self.cache = dict()
self.inventory = dict()
# Locates all server assets from Collins.
try:
server_assets = self.find_assets()
except Exception:
self.log.error("Error while locating assets from Collins:\n%s", traceback.format_exc())
return False
for asset in server_assets:
# Determines the index to retrieve the asset's IP address either by an
# attribute set on the Collins asset or the pre-configured value.
if self._asset_has_attribute(asset, 'ANSIBLE_IP_INDEX'):
ip_index = self._asset_get_attribute(asset, 'ANSIBLE_IP_INDEX')
try:
ip_index = int(ip_index)
except Exception:
self.log.error(
"ANSIBLE_IP_INDEX attribute on asset %s not an integer: %s", asset,
ip_index)
else:
ip_index = self.ip_address_index
asset['COLLINS'] = {}
# Attempts to locate the asset's primary identifier (hostname or IP address),
# which will be used to index the asset throughout the Ansible inventory.
if self.prefer_hostnames and self._asset_has_attribute(asset, 'HOSTNAME'):
asset_identifier = self._asset_get_attribute(asset, 'HOSTNAME')
elif 'ADDRESSES' not in asset:
self.log.warning("No IP addresses found for asset '%s', skipping", asset)
continue
elif len(asset['ADDRESSES']) < ip_index + 1:
self.log.warning(
"No IP address found at index %s for asset '%s', skipping",
ip_index, asset)
continue
else:
asset_identifier = asset['ADDRESSES'][ip_index]['ADDRESS']
# Adds an asset index to the Ansible inventory based upon unpacking
# the name of the asset's current STATE from its dictionary.
if 'STATE' in asset['ASSET'] and asset['ASSET']['STATE']:
state_inventory_key = self.to_safe(
'STATE-%s' % asset['ASSET']['STATE']['NAME'])
self.push(self.inventory, state_inventory_key, asset_identifier)
# Indexes asset by all user-defined Collins attributes.
if 'ATTRIBS' in asset:
for attrib_block in asset['ATTRIBS'].keys():
for attrib in asset['ATTRIBS'][attrib_block].keys():
asset['COLLINS'][attrib] = asset['ATTRIBS'][attrib_block][attrib]
attrib_key = self.to_safe('%s-%s' % (attrib, asset['ATTRIBS'][attrib_block][attrib]))
self.push(self.inventory, attrib_key, asset_identifier)
# Indexes asset by all built-in Collins attributes.
for attribute in asset['ASSET'].keys():
if attribute not in CollinsDefaults.SPECIAL_ATTRIBUTES:
attribute_val = asset['ASSET'][attribute]
if attribute_val is not None:
attrib_key = self.to_safe('%s-%s' % (attribute, attribute_val))
self.push(self.inventory, attrib_key, asset_identifier)
# Indexes asset by hardware product information.
if 'HARDWARE' in asset:
if 'PRODUCT' in asset['HARDWARE']['BASE']:
product = asset['HARDWARE']['BASE']['PRODUCT']
if product:
product_key = self.to_safe(
'HARDWARE-PRODUCT-%s' % asset['HARDWARE']['BASE']['PRODUCT'])
self.push(self.inventory, product_key, asset_identifier)
# Indexing now complete, adds the host details to the asset cache.
self.cache[asset_identifier] = asset
try:
self.write_to_cache(self.cache, self.cache_path_cache)
self.write_to_cache(self.inventory, self.cache_path_inventory)
except Exception:
self.log.error("Error while writing to cache:\n%s", traceback.format_exc())
return False
return True
def push(self, dictionary, key, value):
""" Adds a value to a list at a dictionary key, creating the list if it doesn't
exist. """
if key not in dictionary:
dictionary[key] = []
dictionary[key].append(value)
def get_host_info(self):
""" Get variables about a specific host. """
if not self.cache or len(self.cache) == 0:
# Need to load index from cache
self.load_cache_from_cache()
if self.args.host not in self.cache:
# try updating the cache
self.update_cache()
if self.args.host not in self.cache:
# host might not exist anymore
return self.json_format_dict({}, self.args.pretty)
return self.json_format_dict(self.cache[self.args.host], self.args.pretty)
def load_inventory_from_cache(self):
""" Reads the index from the cache file sets self.index """
try:
cache = open(self.cache_path_inventory, 'r')
json_inventory = cache.read()
self.inventory = json.loads(json_inventory)
return True
except Exception:
self.log.error("Error while loading inventory:\n%s",
traceback.format_exc())
self.inventory = {}
return False
def load_cache_from_cache(self):
""" Reads the cache from the cache file sets self.cache """
try:
cache = open(self.cache_path_cache, 'r')
json_cache = cache.read()
self.cache = json.loads(json_cache)
return True
except Exception:
self.log.error("Error while loading host cache:\n%s",
traceback.format_exc())
self.cache = {}
return False
def write_to_cache(self, data, filename):
""" Writes data in JSON format to a specified file. """
json_data = self.json_format_dict(data, self.args.pretty)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def to_safe(self, word):
""" Converts 'bad' characters in a string to underscores so they
can be used as Ansible groups """
return re.sub(r"[^A-Za-z0-9\-]", "_", word)
def json_format_dict(self, data, pretty=False):
""" Converts a dict to a JSON object and dumps it as a formatted string """
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
if __name__ in '__main__':
inventory = CollinsInventory()
if inventory.run():
sys.exit(0)
else:
sys.exit(-1)

@ -1,54 +0,0 @@
# Ansible Consul external inventory script settings.
[consul]
#
# Bulk load. Load all possible data before building inventory JSON
# If true, script processes in-memory data. JSON generation reduces drastically
#
bulk_load = false
# restrict included nodes to those from this datacenter
#datacenter = nyc1
# url of the consul cluster to query
#url = http://demo.consul.io
url = http://localhost:8500
# suffix added to each service to create a group name e.g Service of 'redis' and
# a suffix of '_servers' will add each address to the group name 'redis_servers'
servers_suffix = _servers
#
# By default, final JSON is built based on all available info in consul.
# Suffixes means that services groups will be added in addition to basic information. See servers_suffix for additional info
# There are cases when speed is preferable than having services groups
# False value will reduce script execution time drastically.
#
suffixes = true
# if specified then the inventory will generate domain names that will resolve
# via Consul's inbuilt DNS.
#domain=consul
# make groups from service tags. the name of the group is derived from the
# service name and the tag name e.g. a service named nginx with tags ['master', 'v1']
# will create groups nginx_master and nginx_v1
tags = true
# looks up the node name at the given path for a list of groups to which the
# node should be added.
kv_groups=ansible/groups
# looks up the node name at the given path for a json dictionary of metadata that
# should be attached as metadata for the node
kv_metadata=ansible/metadata
# looks up the health of each service and adds the node to 'up' and 'down' groups
# based on the service availability
#
# !!!! if availability is true, suffixes also must be true. !!!!
#
availability = true
available_suffix = _up
unavailable_suffix = _down

@ -1,537 +0,0 @@
#!/usr/bin/env python
#
# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
'''
Consul.io inventory script (http://consul.io)
======================================
Generates Ansible inventory from nodes in a Consul cluster. This script will
group nodes by:
- datacenter,
- registered service
- service tags
- service status
- values from the k/v store
This script can be run with the switches
--list as expected groups all the nodes in all datacenters
--datacenter, to restrict the nodes to a single datacenter
--host to restrict the inventory to a single named node. (requires datacenter config)
The configuration for this plugin is read from a consul_io.ini file located in the
same directory as this inventory script. All config options in the config file
are optional except the host and port, which must point to a valid agent or
server running the http api. For more information on enabling the endpoint see.
http://www.consul.io/docs/agent/options.html
Other options include:
'datacenter':
which restricts the included nodes to those from the given datacenter
This can also be set with the environmental variable CONSUL_DATACENTER
'url':
the URL of the Consul cluster. host, port and scheme are derived from the
URL. If not specified, connection configuration defaults to http requests
to localhost on port 8500.
This can also be set with the environmental variable CONSUL_URL
'domain':
if specified then the inventory will generate domain names that will resolve
via Consul's inbuilt DNS. The name is derived from the node name, datacenter
and domain <node_name>.node.<datacenter>.<domain>. Note that you will need to
have consul hooked into your DNS server for these to resolve. See the consul
DNS docs for more info.
which restricts the included nodes to those from the given datacenter
'servers_suffix':
defining the a suffix to add to the service name when creating the service
group. e.g Service name of 'redis' and a suffix of '_servers' will add
each nodes address to the group name 'redis_servers'. No suffix is added
if this is not set
'tags':
boolean flag defining if service tags should be used to create Inventory
groups e.g. an nginx service with the tags ['master', 'v1'] will create
groups nginx_master and nginx_v1 to which the node running the service
will be added. No tag groups are created if this is missing.
'token':
ACL token to use to authorize access to the key value store. May be required
to retrieve the kv_groups and kv_metadata based on your consul configuration.
'kv_groups':
This is used to lookup groups for a node in the key value store. It specifies a
path to which each discovered node's name will be added to create a key to query
the key/value store. There it expects to find a comma separated list of group
names to which the node should be added e.g. if the inventory contains node
'nyc-web-1' in datacenter 'nyc-dc1' and kv_groups = 'ansible/groups' then the key
'ansible/groups/nyc-dc1/nyc-web-1' will be queried for a group list. If this query
returned 'test,honeypot' then the node address to both groups.
'kv_metadata':
kv_metadata is used to lookup metadata for each discovered node. Like kv_groups
above it is used to build a path to lookup in the kv store where it expects to
find a json dictionary of metadata entries. If found, each key/value pair in the
dictionary is added to the metadata for the node. eg node 'nyc-web-1' in datacenter
'nyc-dc1' and kv_metadata = 'ansible/metadata', then the key
'ansible/metadata/nyc-dc1/nyc-web-1' should contain '{"databse": "postgres"}'
'availability':
if true then availability groups will be created for each service. The node will
be added to one of the groups based on the health status of the service. The
group name is derived from the service name and the configurable availability
suffixes
'available_suffix':
suffix that should be appended to the service availability groups for available
services e.g. if the suffix is '_up' and the service is nginx, then nodes with
healthy nginx services will be added to the nginix_up group. Defaults to
'_available'
'unavailable_suffix':
as above but for unhealthy services, defaults to '_unavailable'
Note that if the inventory discovers an 'ssh' service running on a node it will
register the port as ansible_ssh_port in the node's metadata and this port will
be used to access the machine.
```
'''
import os
import re
import argparse
import sys
from ansible.module_utils.six.moves import configparser
def get_log_filename():
tty_filename = '/dev/tty'
stdout_filename = '/dev/stdout'
if not os.path.exists(tty_filename):
return stdout_filename
if not os.access(tty_filename, os.W_OK):
return stdout_filename
if os.getenv('TEAMCITY_VERSION'):
return stdout_filename
return tty_filename
def setup_logging():
filename = get_log_filename()
import logging.config
logging.config.dictConfig({
'version': 1,
'formatters': {
'simple': {
'format': '%(asctime)s - %(name)s - %(levelname)s - %(message)s',
},
},
'root': {
'level': os.getenv('ANSIBLE_INVENTORY_CONSUL_IO_LOG_LEVEL', 'WARN'),
'handlers': ['console'],
},
'handlers': {
'console': {
'class': 'logging.FileHandler',
'filename': filename,
'formatter': 'simple',
},
},
'loggers': {
'iso8601': {
'qualname': 'iso8601',
'level': 'INFO',
},
},
})
logger = logging.getLogger('consul_io.py')
logger.debug('Invoked with %r', sys.argv)
if os.getenv('ANSIBLE_INVENTORY_CONSUL_IO_LOG_ENABLED'):
setup_logging()
import json
try:
import consul
except ImportError as e:
sys.exit("""failed=True msg='python-consul required for this module.
See https://python-consul.readthedocs.io/en/latest/#installation'""")
from ansible.module_utils.six import iteritems
class ConsulInventory(object):
def __init__(self):
''' Create an inventory based on the catalog of nodes and services
registered in a consul cluster'''
self.node_metadata = {}
self.nodes = {}
self.nodes_by_service = {}
self.nodes_by_tag = {}
self.nodes_by_datacenter = {}
self.nodes_by_kv = {}
self.nodes_by_availability = {}
self.current_dc = None
self.inmemory_kv = []
self.inmemory_nodes = []
config = ConsulConfig()
self.config = config
self.consul_api = config.get_consul_api()
if config.has_config('datacenter'):
if config.has_config('host'):
self.load_data_for_node(config.host, config.datacenter)
else:
self.load_data_for_datacenter(config.datacenter)
else:
self.load_all_data_consul()
self.combine_all_results()
print(json.dumps(self.inventory, sort_keys=True, indent=2))
def bulk_load(self, datacenter):
index, groups_list = self.consul_api.kv.get(self.config.kv_groups, recurse=True, dc=datacenter)
index, metadata_list = self.consul_api.kv.get(self.config.kv_metadata, recurse=True, dc=datacenter)
index, nodes = self.consul_api.catalog.nodes(dc=datacenter)
self.inmemory_kv += groups_list
self.inmemory_kv += metadata_list
self.inmemory_nodes += nodes
def load_all_data_consul(self):
''' cycle through each of the datacenters in the consul catalog and process
the nodes in each '''
self.datacenters = self.consul_api.catalog.datacenters()
for datacenter in self.datacenters:
self.current_dc = datacenter
self.bulk_load(datacenter)
self.load_data_for_datacenter(datacenter)
def load_availability_groups(self, node, datacenter):
'''check the health of each service on a node and add the node to either
an 'available' or 'unavailable' grouping. The suffix for each group can be
controlled from the config'''
if self.config.has_config('availability'):
for service_name, service in iteritems(node['Services']):
for node in self.consul_api.health.service(service_name)[1]:
if self.is_service_available(node, service_name):
suffix = self.config.get_availability_suffix(
'available_suffix', '_available')
else:
suffix = self.config.get_availability_suffix(
'unavailable_suffix', '_unavailable')
self.add_node_to_map(self.nodes_by_availability,
service_name + suffix, node['Node'])
def is_service_available(self, node, service_name):
'''check the availability of the service on the node beside ensuring the
availability of the node itself'''
consul_ok = service_ok = False
for check in node['Checks']:
if check['CheckID'] == 'serfHealth':
consul_ok = check['Status'] == 'passing'
elif check['ServiceName'] == service_name:
service_ok = check['Status'] == 'passing'
return consul_ok and service_ok
def consul_get_kv_inmemory(self, key):
result = filter(lambda x: x['Key'] == key, self.inmemory_kv)
return result.pop() if result else None
def consul_get_node_inmemory(self, node):
result = filter(lambda x: x['Node'] == node, self.inmemory_nodes)
return {"Node": result.pop(), "Services": {}} if result else None
def load_data_for_datacenter(self, datacenter):
'''processes all the nodes in a particular datacenter'''
if self.config.bulk_load == 'true':
nodes = self.inmemory_nodes
else:
index, nodes = self.consul_api.catalog.nodes(dc=datacenter)
for node in nodes:
self.add_node_to_map(self.nodes_by_datacenter, datacenter, node)
self.load_data_for_node(node['Node'], datacenter)
def load_data_for_node(self, node, datacenter):
'''loads the data for a single node adding it to various groups based on
metadata retrieved from the kv store and service availability'''
if self.config.suffixes == 'true':
index, node_data = self.consul_api.catalog.node(node, dc=datacenter)
else:
node_data = self.consul_get_node_inmemory(node)
node = node_data['Node']
self.add_node_to_map(self.nodes, 'all', node)
self.add_metadata(node_data, "consul_datacenter", datacenter)
self.add_metadata(node_data, "consul_nodename", node['Node'])
self.load_groups_from_kv(node_data)
self.load_node_metadata_from_kv(node_data)
if self.config.suffixes == 'true':
self.load_availability_groups(node_data, datacenter)
for name, service in node_data['Services'].items():
self.load_data_from_service(name, service, node_data)
def load_node_metadata_from_kv(self, node_data):
''' load the json dict at the metadata path defined by the kv_metadata value
and the node name add each entry in the dictionary to the node's
metadata '''
node = node_data['Node']
if self.config.has_config('kv_metadata'):
key = "%s/%s/%s" % (self.config.kv_metadata, self.current_dc, node['Node'])
if self.config.bulk_load == 'true':
metadata = self.consul_get_kv_inmemory(key)
else:
index, metadata = self.consul_api.kv.get(key)
if metadata and metadata['Value']:
try:
metadata = json.loads(metadata['Value'])
for k, v in metadata.items():
self.add_metadata(node_data, k, v)
except Exception:
pass
def load_groups_from_kv(self, node_data):
''' load the comma separated list of groups at the path defined by the
kv_groups config value and the node name add the node address to each
group found '''
node = node_data['Node']
if self.config.has_config('kv_groups'):
key = "%s/%s/%s" % (self.config.kv_groups, self.current_dc, node['Node'])
if self.config.bulk_load == 'true':
groups = self.consul_get_kv_inmemory(key)
else:
index, groups = self.consul_api.kv.get(key)
if groups and groups['Value']:
for group in groups['Value'].split(','):
self.add_node_to_map(self.nodes_by_kv, group.strip(), node)
def load_data_from_service(self, service_name, service, node_data):
'''process a service registered on a node, adding the node to a group with
the service name. Each service tag is extracted and the node is added to a
tag grouping also'''
self.add_metadata(node_data, "consul_services", service_name, True)
if self.is_service("ssh", service_name):
self.add_metadata(node_data, "ansible_ssh_port", service['Port'])
if self.config.has_config('servers_suffix'):
service_name = service_name + self.config.servers_suffix
self.add_node_to_map(self.nodes_by_service, service_name, node_data['Node'])
self.extract_groups_from_tags(service_name, service, node_data)
def is_service(self, target, name):
return name and (name.lower() == target.lower())
def extract_groups_from_tags(self, service_name, service, node_data):
'''iterates each service tag and adds the node to groups derived from the
service and tag names e.g. nginx_master'''
if self.config.has_config('tags') and service['Tags']:
tags = service['Tags']
self.add_metadata(node_data, "consul_%s_tags" % service_name, tags)
for tag in service['Tags']:
tagname = service_name + '_' + tag
self.add_node_to_map(self.nodes_by_tag, tagname, node_data['Node'])
def combine_all_results(self):
'''prunes and sorts all groupings for combination into the final map'''
self.inventory = {"_meta": {"hostvars": self.node_metadata}}
groupings = [self.nodes, self.nodes_by_datacenter, self.nodes_by_service,
self.nodes_by_tag, self.nodes_by_kv, self.nodes_by_availability]
for grouping in groupings:
for name, addresses in grouping.items():
self.inventory[name] = sorted(list(set(addresses)))
def add_metadata(self, node_data, key, value, is_list=False):
''' Pushed an element onto a metadata dict for the node, creating
the dict if it doesn't exist '''
key = self.to_safe(key)
node = self.get_inventory_name(node_data['Node'])
if node in self.node_metadata:
metadata = self.node_metadata[node]
else:
metadata = {}
self.node_metadata[node] = metadata
if is_list:
self.push(metadata, key, value)
else:
metadata[key] = value
def get_inventory_name(self, node_data):
'''return the ip or a node name that can be looked up in consul's dns'''
domain = self.config.domain
if domain:
node_name = node_data['Node']
if self.current_dc:
return '%s.node.%s.%s' % (node_name, self.current_dc, domain)
else:
return '%s.node.%s' % (node_name, domain)
else:
return node_data['Address']
def add_node_to_map(self, map, name, node):
self.push(map, name, self.get_inventory_name(node))
def push(self, my_dict, key, element):
''' Pushed an element onto an array that may not have been defined in the
dict '''
key = self.to_safe(key)
if key in my_dict:
my_dict[key].append(element)
else:
my_dict[key] = [element]
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used
as Ansible groups '''
return re.sub(r'[^A-Za-z0-9\-\.]', '_', word)
def sanitize_dict(self, d):
new_dict = {}
for k, v in d.items():
if v is not None:
new_dict[self.to_safe(str(k))] = self.to_safe(str(v))
return new_dict
def sanitize_list(self, seq):
new_seq = []
for d in seq:
new_seq.append(self.sanitize_dict(d))
return new_seq
class ConsulConfig(dict):
def __init__(self):
self.read_settings()
self.read_cli_args()
self.read_env_vars()
def has_config(self, name):
if hasattr(self, name):
return getattr(self, name)
else:
return False
def read_settings(self):
''' Reads the settings from the consul_io.ini file (or consul.ini for backwards compatibility)'''
config = configparser.SafeConfigParser()
if os.path.isfile(os.path.dirname(os.path.realpath(__file__)) + '/consul_io.ini'):
config.read(os.path.dirname(os.path.realpath(__file__)) + '/consul_io.ini')
else:
config.read(os.path.dirname(os.path.realpath(__file__)) + '/consul.ini')
config_options = ['host', 'token', 'datacenter', 'servers_suffix',
'tags', 'kv_metadata', 'kv_groups', 'availability',
'unavailable_suffix', 'available_suffix', 'url',
'domain', 'suffixes', 'bulk_load']
for option in config_options:
value = None
if config.has_option('consul', option):
value = config.get('consul', option).lower()
setattr(self, option, value)
def read_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based nodes in a Consul cluster')
parser.add_argument('--list', action='store_true',
help='Get all inventory variables from all nodes in the consul cluster')
parser.add_argument('--host', action='store',
help='Get all inventory variables about a specific consul node,'
'requires datacenter set in consul.ini.')
parser.add_argument('--datacenter', action='store',
help='Get all inventory about a specific consul datacenter')
args = parser.parse_args()
arg_names = ['host', 'datacenter']
for arg in arg_names:
if getattr(args, arg):
setattr(self, arg, getattr(args, arg))
def read_env_vars(self):
env_var_options = ['datacenter', 'url']
for option in env_var_options:
value = None
env_var = 'CONSUL_' + option.upper()
if os.environ.get(env_var):
setattr(self, option, os.environ.get(env_var))
def get_availability_suffix(self, suffix, default):
if self.has_config(suffix):
return self.has_config(suffix)
return default
def get_consul_api(self):
'''get an instance of the api based on the supplied configuration'''
host = 'localhost'
port = 8500
token = None
scheme = 'http'
if hasattr(self, 'url'):
from ansible.module_utils.six.moves.urllib.parse import urlparse
o = urlparse(self.url)
if o.hostname:
host = o.hostname
if o.port:
port = o.port
if o.scheme:
scheme = o.scheme
if hasattr(self, 'token'):
token = self.token
if not token:
token = 'anonymous'
return consul.Consul(host=host, port=port, token=token, scheme=scheme)
ConsulInventory()

@ -1,34 +0,0 @@
# Ansible DigitalOcean external inventory script settings
#
[digital_ocean]
# The module needs your DigitalOcean API Token.
# It may also be specified on the command line via --api-token
# or via the environment variables DO_API_TOKEN or DO_API_KEY
#
#api_token = 123456abcdefg
# API calls to DigitalOcean may be slow. For this reason, we cache the results
# of an API call. Set this to the path you want cache files to be written to.
# One file will be written to this directory:
# - ansible-digital_ocean.cache
#
cache_path = /tmp
# The number of seconds a cache file is considered valid. After this many
# seconds, a new API call will be made, and the cache file will be updated.
#
cache_max_age = 300
# Use the private network IP address instead of the public when available.
#
use_private_network = False
# Pass variables to every group, e.g.:
#
# group_variables = { 'ansible_user': 'root' }
#
group_variables = {}

@ -1,551 +0,0 @@
#!/usr/bin/env python
"""
DigitalOcean external inventory script
======================================
Generates Ansible inventory of DigitalOcean Droplets.
In addition to the --list and --host options used by Ansible, there are options
for generating JSON of other DigitalOcean data. This is useful when creating
droplets. For example, --regions will return all the DigitalOcean Regions.
This information can also be easily found in the cache file, whose default
location is /tmp/ansible-digital_ocean.cache).
The --pretty (-p) option pretty-prints the output for better human readability.
----
Although the cache stores all the information received from DigitalOcean,
the cache is not used for current droplet information (in --list, --host,
--all, and --droplets). This is so that accurate droplet information is always
found. You can force this script to use the cache with --force-cache.
----
Configuration is read from `digital_ocean.ini`, then from environment variables,
and then from command-line arguments.
Most notably, the DigitalOcean API Token must be specified. It can be specified
in the INI file or with the following environment variables:
export DO_API_TOKEN='abc123' or
export DO_API_KEY='abc123'
Alternatively, it can be passed on the command-line with --api-token.
If you specify DigitalOcean credentials in the INI file, a handy way to
get them into your environment (e.g., to use the digital_ocean module)
is to use the output of the --env option with export:
export $(digital_ocean.py --env)
----
The following groups are generated from --list:
- ID (droplet ID)
- NAME (droplet NAME)
- digital_ocean
- image_ID
- image_NAME
- distro_NAME (distribution NAME from image)
- region_NAME
- size_NAME
- status_STATUS
For each host, the following variables are registered:
- do_backup_ids
- do_created_at
- do_disk
- do_features - list
- do_id
- do_image - object
- do_ip_address
- do_private_ip_address
- do_kernel - object
- do_locked
- do_memory
- do_name
- do_networks - object
- do_next_backup_window
- do_region - object
- do_size - object
- do_size_slug
- do_snapshot_ids - list
- do_status
- do_tags
- do_vcpus
- do_volume_ids
-----
```
usage: digital_ocean.py [-h] [--list] [--host HOST] [--all] [--droplets]
[--regions] [--images] [--sizes] [--ssh-keys]
[--domains] [--tags] [--pretty]
[--cache-path CACHE_PATH]
[--cache-max_age CACHE_MAX_AGE] [--force-cache]
[--refresh-cache] [--env] [--api-token API_TOKEN]
Produce an Ansible Inventory file based on DigitalOcean credentials
optional arguments:
-h, --help show this help message and exit
--list List all active Droplets as Ansible inventory
(default: True)
--host HOST Get all Ansible inventory variables about a specific
Droplet
--all List all DigitalOcean information as JSON
--droplets, -d List Droplets as JSON
--regions List Regions as JSON
--images List Images as JSON
--sizes List Sizes as JSON
--ssh-keys List SSH keys as JSON
--domains List Domains as JSON
--tags List Tags as JSON
--pretty, -p Pretty-print results
--cache-path CACHE_PATH
Path to the cache files (default: .)
--cache-max_age CACHE_MAX_AGE
Maximum age of the cached items (default: 0)
--force-cache Only use data from the cache
--refresh-cache, -r Force refresh of cache by making API requests to
DigitalOcean (default: False - use cache files)
--env, -e Display DO_API_TOKEN
--api-token API_TOKEN, -a API_TOKEN
DigitalOcean API Token
```
"""
# (c) 2013, Evan Wies <evan@neomantra.net>
# (c) 2017, Ansible Project
# (c) 2017, Abhijeet Kasurde <akasurde@redhat.com>
#
# Inspired by the EC2 inventory plugin:
# https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import argparse
import ast
import os
import re
import requests
import sys
from time import time
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
import json
class DoManager:
def __init__(self, api_token):
self.api_token = api_token
self.api_endpoint = 'https://api.digitalocean.com/v2'
self.headers = {'Authorization': 'Bearer {0}'.format(self.api_token),
'Content-type': 'application/json'}
self.timeout = 60
def _url_builder(self, path):
if path[0] == '/':
path = path[1:]
return '%s/%s' % (self.api_endpoint, path)
def send(self, url, method='GET', data=None):
url = self._url_builder(url)
data = json.dumps(data)
try:
if method == 'GET':
resp_data = {}
incomplete = True
while incomplete:
resp = requests.get(url, data=data, headers=self.headers, timeout=self.timeout)
json_resp = resp.json()
for key, value in json_resp.items():
if isinstance(value, list) and key in resp_data:
resp_data[key] += value
else:
resp_data[key] = value
try:
url = json_resp['links']['pages']['next']
except KeyError:
incomplete = False
except ValueError as e:
sys.exit("Unable to parse result from %s: %s" % (url, e))
return resp_data
def all_active_droplets(self):
resp = self.send('droplets/')
return resp['droplets']
def all_regions(self):
resp = self.send('regions/')
return resp['regions']
def all_images(self, filter_name='global'):
params = {'filter': filter_name}
resp = self.send('images/', data=params)
return resp['images']
def sizes(self):
resp = self.send('sizes/')
return resp['sizes']
def all_ssh_keys(self):
resp = self.send('account/keys')
return resp['ssh_keys']
def all_domains(self):
resp = self.send('domains/')
return resp['domains']
def show_droplet(self, droplet_id):
resp = self.send('droplets/%s' % droplet_id)
return resp['droplet']
def all_tags(self):
resp = self.send('tags')
return resp['tags']
class DigitalOceanInventory(object):
###########################################################################
# Main execution path
###########################################################################
def __init__(self):
"""Main execution path """
# DigitalOceanInventory data
self.data = {} # All DigitalOcean data
self.inventory = {} # Ansible Inventory
# Define defaults
self.cache_path = '.'
self.cache_max_age = 0
self.use_private_network = False
self.group_variables = {}
# Read settings, environment variables, and CLI arguments
self.read_settings()
self.read_environment()
self.read_cli_args()
# Verify credentials were set
if not hasattr(self, 'api_token'):
msg = 'Could not find values for DigitalOcean api_token. They must be specified via either ini file, ' \
'command line argument (--api-token), or environment variables (DO_API_TOKEN)\n'
sys.stderr.write(msg)
sys.exit(-1)
# env command, show DigitalOcean credentials
if self.args.env:
print("DO_API_TOKEN=%s" % self.api_token)
sys.exit(0)
# Manage cache
self.cache_filename = self.cache_path + "/ansible-digital_ocean.cache"
self.cache_refreshed = False
if self.is_cache_valid():
self.load_from_cache()
if len(self.data) == 0:
if self.args.force_cache:
sys.stderr.write('Cache is empty and --force-cache was specified\n')
sys.exit(-1)
self.manager = DoManager(self.api_token)
# Pick the json_data to print based on the CLI command
if self.args.droplets:
self.load_from_digital_ocean('droplets')
json_data = {'droplets': self.data['droplets']}
elif self.args.regions:
self.load_from_digital_ocean('regions')
json_data = {'regions': self.data['regions']}
elif self.args.images:
self.load_from_digital_ocean('images')
json_data = {'images': self.data['images']}
elif self.args.sizes:
self.load_from_digital_ocean('sizes')
json_data = {'sizes': self.data['sizes']}
elif self.args.ssh_keys:
self.load_from_digital_ocean('ssh_keys')
json_data = {'ssh_keys': self.data['ssh_keys']}
elif self.args.domains:
self.load_from_digital_ocean('domains')
json_data = {'domains': self.data['domains']}
elif self.args.tags:
self.load_from_digital_ocean('tags')
json_data = {'tags': self.data['tags']}
elif self.args.all:
self.load_from_digital_ocean()
json_data = self.data
elif self.args.host:
json_data = self.load_droplet_variables_for_host()
else: # '--list' this is last to make it default
self.load_from_digital_ocean('droplets')
self.build_inventory()
json_data = self.inventory
if self.cache_refreshed:
self.write_to_cache()
if self.args.pretty:
print(json.dumps(json_data, indent=2))
else:
print(json.dumps(json_data))
###########################################################################
# Script configuration
###########################################################################
def read_settings(self):
""" Reads the settings from the digital_ocean.ini file """
config = ConfigParser.ConfigParser()
config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'digital_ocean.ini')
config.read(config_path)
# Credentials
if config.has_option('digital_ocean', 'api_token'):
self.api_token = config.get('digital_ocean', 'api_token')
# Cache related
if config.has_option('digital_ocean', 'cache_path'):
self.cache_path = config.get('digital_ocean', 'cache_path')
if config.has_option('digital_ocean', 'cache_max_age'):
self.cache_max_age = config.getint('digital_ocean', 'cache_max_age')
# Private IP Address
if config.has_option('digital_ocean', 'use_private_network'):
self.use_private_network = config.getboolean('digital_ocean', 'use_private_network')
# Group variables
if config.has_option('digital_ocean', 'group_variables'):
self.group_variables = ast.literal_eval(config.get('digital_ocean', 'group_variables'))
def read_environment(self):
""" Reads the settings from environment variables """
# Setup credentials
if os.getenv("DO_API_TOKEN"):
self.api_token = os.getenv("DO_API_TOKEN")
if os.getenv("DO_API_KEY"):
self.api_token = os.getenv("DO_API_KEY")
def read_cli_args(self):
""" Command line argument processing """
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on DigitalOcean credentials')
parser.add_argument('--list', action='store_true', help='List all active Droplets as Ansible inventory (default: True)')
parser.add_argument('--host', action='store', help='Get all Ansible inventory variables about a specific Droplet')
parser.add_argument('--all', action='store_true', help='List all DigitalOcean information as JSON')
parser.add_argument('--droplets', '-d', action='store_true', help='List Droplets as JSON')
parser.add_argument('--regions', action='store_true', help='List Regions as JSON')
parser.add_argument('--images', action='store_true', help='List Images as JSON')
parser.add_argument('--sizes', action='store_true', help='List Sizes as JSON')
parser.add_argument('--ssh-keys', action='store_true', help='List SSH keys as JSON')
parser.add_argument('--domains', action='store_true', help='List Domains as JSON')
parser.add_argument('--tags', action='store_true', help='List Tags as JSON')
parser.add_argument('--pretty', '-p', action='store_true', help='Pretty-print results')
parser.add_argument('--cache-path', action='store', help='Path to the cache files (default: .)')
parser.add_argument('--cache-max_age', action='store', help='Maximum age of the cached items (default: 0)')
parser.add_argument('--force-cache', action='store_true', default=False, help='Only use data from the cache')
parser.add_argument('--refresh-cache', '-r', action='store_true', default=False,
help='Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files)')
parser.add_argument('--env', '-e', action='store_true', help='Display DO_API_TOKEN')
parser.add_argument('--api-token', '-a', action='store', help='DigitalOcean API Token')
self.args = parser.parse_args()
if self.args.api_token:
self.api_token = self.args.api_token
# Make --list default if none of the other commands are specified
if (not self.args.droplets and not self.args.regions and
not self.args.images and not self.args.sizes and
not self.args.ssh_keys and not self.args.domains and
not self.args.tags and
not self.args.all and not self.args.host):
self.args.list = True
###########################################################################
# Data Management
###########################################################################
def load_from_digital_ocean(self, resource=None):
"""Get JSON from DigitalOcean API """
if self.args.force_cache and os.path.isfile(self.cache_filename):
return
# We always get fresh droplets
if self.is_cache_valid() and not (resource == 'droplets' or resource is None):
return
if self.args.refresh_cache:
resource = None
if resource == 'droplets' or resource is None:
self.data['droplets'] = self.manager.all_active_droplets()
self.cache_refreshed = True
if resource == 'regions' or resource is None:
self.data['regions'] = self.manager.all_regions()
self.cache_refreshed = True
if resource == 'images' or resource is None:
self.data['images'] = self.manager.all_images()
self.cache_refreshed = True
if resource == 'sizes' or resource is None:
self.data['sizes'] = self.manager.sizes()
self.cache_refreshed = True
if resource == 'ssh_keys' or resource is None:
self.data['ssh_keys'] = self.manager.all_ssh_keys()
self.cache_refreshed = True
if resource == 'domains' or resource is None:
self.data['domains'] = self.manager.all_domains()
self.cache_refreshed = True
if resource == 'tags' or resource is None:
self.data['tags'] = self.manager.all_tags()
self.cache_refreshed = True
def add_inventory_group(self, key):
""" Method to create group dict """
host_dict = {'hosts': [], 'vars': {}}
self.inventory[key] = host_dict
return
def add_host(self, group, host):
""" Helper method to reduce host duplication """
if group not in self.inventory:
self.add_inventory_group(group)
if host not in self.inventory[group]['hosts']:
self.inventory[group]['hosts'].append(host)
return
def build_inventory(self):
""" Build Ansible inventory of droplets """
self.inventory = {
'all': {
'hosts': [],
'vars': self.group_variables
},
'_meta': {'hostvars': {}}
}
# add all droplets by id and name
for droplet in self.data['droplets']:
for net in droplet['networks']['v4']:
if net['type'] == 'public':
dest = net['ip_address']
else:
continue
self.inventory['all']['hosts'].append(dest)
self.add_host(droplet['id'], dest)
self.add_host(droplet['name'], dest)
# groups that are always present
for group in ('digital_ocean',
'region_' + droplet['region']['slug'],
'image_' + str(droplet['image']['id']),
'size_' + droplet['size']['slug'],
'distro_' + DigitalOceanInventory.to_safe(droplet['image']['distribution']),
'status_' + droplet['status']):
self.add_host(group, dest)
# groups that are not always present
for group in (droplet['image']['slug'],
droplet['image']['name']):
if group:
image = 'image_' + DigitalOceanInventory.to_safe(group)
self.add_host(image, dest)
if droplet['tags']:
for tag in droplet['tags']:
self.add_host(tag, dest)
# hostvars
info = self.do_namespace(droplet)
self.inventory['_meta']['hostvars'][dest] = info
def load_droplet_variables_for_host(self):
""" Generate a JSON response to a --host call """
host = int(self.args.host)
droplet = self.manager.show_droplet(host)
info = self.do_namespace(droplet)
return {'droplet': info}
###########################################################################
# Cache Management
###########################################################################
def is_cache_valid(self):
""" Determines if the cache files have expired, or if it is still valid """
if os.path.isfile(self.cache_filename):
mod_time = os.path.getmtime(self.cache_filename)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
return True
return False
def load_from_cache(self):
""" Reads the data from the cache file and assigns it to member variables as Python Objects """
try:
with open(self.cache_filename, 'r') as cache:
json_data = cache.read()
data = json.loads(json_data)
except IOError:
data = {'data': {}, 'inventory': {}}
self.data = data['data']
self.inventory = data['inventory']
def write_to_cache(self):
""" Writes data in JSON format to a file """
data = {'data': self.data, 'inventory': self.inventory}
json_data = json.dumps(data, indent=2)
with open(self.cache_filename, 'w') as cache:
cache.write(json_data)
###########################################################################
# Utilities
###########################################################################
@staticmethod
def to_safe(word):
""" Converts 'bad' characters in a string to underscores so they can be used as Ansible groups """
return re.sub(r"[^A-Za-z0-9\-.]", "_", word)
@staticmethod
def do_namespace(data):
""" Returns a copy of the dictionary with all the keys put in a 'do_' namespace """
info = {}
for k, v in data.items():
info['do_' + k] = v
return info
###########################################################################
# Run the script
DigitalOceanInventory()

@ -1,905 +0,0 @@
#!/usr/bin/env python
#
# (c) 2016 Paul Durivage <paul.durivage@gmail.com>
# Chris Houseknecht <house@redhat.com>
# James Tanner <jtanner@redhat.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
Docker Inventory Script
=======================
The inventory script generates dynamic inventory by making API requests to one or more Docker APIs. It's dynamic
because the inventory is generated at run-time rather than being read from a static file. The script generates the
inventory by connecting to one or many Docker APIs and inspecting the containers it finds at each API. Which APIs the
script contacts can be defined using environment variables or a configuration file.
Requirements
------------
Using the docker modules requires having docker-py <https://docker-py.readthedocs.io/en/stable/>
installed on the host running Ansible. To install docker-py:
pip install docker-py
Run for Specific Host
---------------------
When run for a specific container using the --host option this script returns the following hostvars:
{
"ansible_ssh_host": "",
"ansible_ssh_port": 0,
"docker_apparmorprofile": "",
"docker_args": [],
"docker_config": {
"AttachStderr": false,
"AttachStdin": false,
"AttachStdout": false,
"Cmd": [
"/hello"
],
"Domainname": "",
"Entrypoint": null,
"Env": null,
"Hostname": "9f2f80b0a702",
"Image": "hello-world",
"Labels": {},
"OnBuild": null,
"OpenStdin": false,
"StdinOnce": false,
"Tty": false,
"User": "",
"Volumes": null,
"WorkingDir": ""
},
"docker_created": "2016-04-18T02:05:59.659599249Z",
"docker_driver": "aufs",
"docker_execdriver": "native-0.2",
"docker_execids": null,
"docker_graphdriver": {
"Data": null,
"Name": "aufs"
},
"docker_hostconfig": {
"Binds": null,
"BlkioWeight": 0,
"CapAdd": null,
"CapDrop": null,
"CgroupParent": "",
"ConsoleSize": [
0,
0
],
"ContainerIDFile": "",
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuShares": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": null,
"Dns": null,
"DnsOptions": null,
"DnsSearch": null,
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "",
"KernelMemory": 0,
"Links": null,
"LogConfig": {
"Config": {},
"Type": "json-file"
},
"LxcConf": null,
"Memory": 0,
"MemoryReservation": 0,
"MemorySwap": 0,
"MemorySwappiness": null,
"NetworkMode": "default",
"OomKillDisable": false,
"PidMode": "host",
"PortBindings": null,
"Privileged": false,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"RestartPolicy": {
"MaximumRetryCount": 0,
"Name": ""
},
"SecurityOpt": [
"label:disable"
],
"UTSMode": "",
"Ulimits": null,
"VolumeDriver": "",
"VolumesFrom": null
},
"docker_hostnamepath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/hostname",
"docker_hostspath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/hosts",
"docker_id": "9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14",
"docker_image": "0a6ba66e537a53a5ea94f7c6a99c534c6adb12e3ed09326d4bf3b38f7c3ba4e7",
"docker_logpath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/9f2f80b0a702361d1ac432e6a-json.log",
"docker_mountlabel": "",
"docker_mounts": [],
"docker_name": "/hello-world",
"docker_networksettings": {
"Bridge": "",
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"HairpinMode": false,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"MacAddress": "",
"Networks": {
"bridge": {
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": ""
}
},
"Ports": null,
"SandboxID": "",
"SandboxKey": "",
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null
},
"docker_path": "/hello",
"docker_processlabel": "",
"docker_resolvconfpath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/resolv.conf",
"docker_restartcount": 0,
"docker_short_id": "9f2f80b0a7023",
"docker_state": {
"Dead": false,
"Error": "",
"ExitCode": 0,
"FinishedAt": "2016-04-18T02:06:00.296619369Z",
"OOMKilled": false,
"Paused": false,
"Pid": 0,
"Restarting": false,
"Running": false,
"StartedAt": "2016-04-18T02:06:00.272065041Z",
"Status": "exited"
}
}
Groups
------
When run in --list mode (the default), container instances are grouped by:
- container id
- container name
- container short id
- image_name (image_<image name>)
- stack_name (stack_<stack name>)
- service_name (service_<service name>)
- docker_host
- running
- stopped
Configuration:
--------------
You can control the behavior of the inventory script by passing arguments, defining environment variables, or
creating a configuration file named docker.yml (sample provided in ansible/contrib/inventory). The order of precedence
is command line args, then the docker.yml file and finally environment variables.
Environment variables:
......................
To connect to a single Docker API the following variables can be defined in the environment to control the connection
options. These are the same environment variables used by the Docker modules.
DOCKER_HOST
The URL or Unix socket path used to connect to the Docker API. Defaults to unix://var/run/docker.sock.
DOCKER_API_VERSION:
The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported
by docker-py.
DOCKER_TIMEOUT:
The maximum amount of time in seconds to wait on a response fromm the API. Defaults to 60 seconds.
DOCKER_TLS:
Secure the connection to the API by using TLS without verifying the authenticity of the Docker host server.
Defaults to False.
DOCKER_TLS_VERIFY:
Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
Default is False
DOCKER_TLS_HOSTNAME:
When verifying the authenticity of the Docker Host server, provide the expected name of the server. Defaults
to localhost.
DOCKER_CERT_PATH:
Path to the directory containing the client certificate, client key and CA certificate.
DOCKER_SSL_VERSION:
Provide a valid SSL version number. Default value determined by docker-py, which at the time of this writing
was 1.0
In addition to the connection variables there are a couple variables used to control the execution and output of the
script:
DOCKER_CONFIG_FILE
Path to the configuration file. Defaults to ./docker.yml.
DOCKER_PRIVATE_SSH_PORT:
The private port (container port) on which SSH is listening for connections. Defaults to 22.
DOCKER_DEFAULT_IP:
The IP address to assign to ansible_host when the container's SSH port is mapped to interface '0.0.0.0'.
Configuration File
..................
Using a configuration file provides a means for defining a set of Docker APIs from which to build an inventory.
The default name of the file is derived from the name of the inventory script. By default the script will look for
basename of the script (i.e. docker) with an extension of '.yml'.
You can also override the default name of the script by defining DOCKER_CONFIG_FILE in the environment.
Here's what you can define in docker_inventory.yml:
defaults
Defines a default connection. Defaults will be taken from this and applied to any values not provided
for a host defined in the hosts list.
hosts
If you wish to get inventory from more than one Docker host, define a hosts list.
For the default host and each host in the hosts list define the following attributes:
host:
description: The URL or Unix socket path used to connect to the Docker API.
required: yes
tls:
description: Connect using TLS without verifying the authenticity of the Docker host server.
default: false
required: false
tls_verify:
description: Connect using TLS without verifying the authenticity of the Docker host server.
default: false
required: false
cert_path:
description: Path to the client's TLS certificate file.
default: null
required: false
cacert_path:
description: Use a CA certificate when performing server verification by providing the path to a CA certificate file.
default: null
required: false
key_path:
description: Path to the client's TLS key file.
default: null
required: false
version:
description: The Docker API version.
required: false
default: will be supplied by the docker-py module.
timeout:
description: The amount of time in seconds to wait on an API response.
required: false
default: 60
default_ip:
description: The IP address to assign to ansible_host when the container's SSH port is mapped to interface
'0.0.0.0'.
required: false
default: 127.0.0.1
private_ssh_port:
description: The port containers use for SSH
required: false
default: 22
Examples
--------
# Connect to the Docker API on localhost port 4243 and format the JSON output
DOCKER_HOST=tcp://localhost:4243 ./docker.py --pretty
# Any container's ssh port exposed on 0.0.0.0 will be mapped to
# another IP address (where Ansible will attempt to connect via SSH)
DOCKER_DEFAULT_IP=1.2.3.4 ./docker.py --pretty
# Run as input to a playbook:
ansible-playbook -i ~/projects/ansible/contrib/inventory/docker.py docker_inventory_test.yml
# Simple playbook to invoke with the above example:
- name: Test docker_inventory
hosts: all
connection: local
gather_facts: no
tasks:
- debug: msg="Container - {{ inventory_hostname }}"
'''
import os
import sys
import json
import argparse
import re
import yaml
from collections import defaultdict
# Manipulation of the path is needed because the docker-py
# module is imported by the name docker, and because this file
# is also named docker
for path in [os.getcwd(), '', os.path.dirname(os.path.abspath(__file__))]:
try:
del sys.path[sys.path.index(path)]
except Exception:
pass
HAS_DOCKER_PY = True
HAS_DOCKER_ERROR = False
try:
from docker.errors import APIError, TLSParameterError
from docker.tls import TLSConfig
from docker.constants import DEFAULT_TIMEOUT_SECONDS, DEFAULT_DOCKER_API_VERSION
except ImportError as exc:
HAS_DOCKER_ERROR = str(exc)
HAS_DOCKER_PY = False
# Client has recently been split into DockerClient and APIClient
try:
from docker import Client
except ImportError as dummy:
try:
from docker import APIClient as Client
except ImportError as exc:
HAS_DOCKER_ERROR = str(exc)
HAS_DOCKER_PY = False
class Client:
pass
DEFAULT_DOCKER_CONFIG_FILE = os.path.splitext(os.path.basename(__file__))[0] + '.yml'
DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock'
DEFAULT_TLS = False
DEFAULT_TLS_VERIFY = False
DEFAULT_TLS_HOSTNAME = "localhost"
DEFAULT_IP = '127.0.0.1'
DEFAULT_SSH_PORT = '22'
BOOLEANS_TRUE = ['yes', 'on', '1', 'true', 1, True]
BOOLEANS_FALSE = ['no', 'off', '0', 'false', 0, False]
DOCKER_ENV_ARGS = dict(
config_file='DOCKER_CONFIG_FILE',
docker_host='DOCKER_HOST',
api_version='DOCKER_API_VERSION',
cert_path='DOCKER_CERT_PATH',
ssl_version='DOCKER_SSL_VERSION',
tls='DOCKER_TLS',
tls_verify='DOCKER_TLS_VERIFY',
tls_hostname='DOCKER_TLS_HOSTNAME',
timeout='DOCKER_TIMEOUT',
private_ssh_port='DOCKER_DEFAULT_SSH_PORT',
default_ip='DOCKER_DEFAULT_IP',
)
def fail(msg):
sys.stderr.write("%s\n" % msg)
sys.exit(1)
def log(msg, pretty_print=False):
if pretty_print:
print(json.dumps(msg, sort_keys=True, indent=2))
else:
print(msg + u'\n')
class AnsibleDockerClient(Client):
def __init__(self, auth_params, debug):
self.auth_params = auth_params
self.debug = debug
self._connect_params = self._get_connect_params()
try:
super(AnsibleDockerClient, self).__init__(**self._connect_params)
except APIError as exc:
self.fail("Docker API error: %s" % exc)
except Exception as exc:
self.fail("Error connecting: %s" % exc)
def fail(self, msg):
fail(msg)
def log(self, msg, pretty_print=False):
if self.debug:
log(msg, pretty_print)
def _get_tls_config(self, **kwargs):
self.log("get_tls_config:")
for key in kwargs:
self.log(" %s: %s" % (key, kwargs[key]))
try:
tls_config = TLSConfig(**kwargs)
return tls_config
except TLSParameterError as exc:
self.fail("TLS config error: %s" % exc)
def _get_connect_params(self):
auth = self.auth_params
self.log("auth params:")
for key in auth:
self.log(" %s: %s" % (key, auth[key]))
if auth['tls'] or auth['tls_verify']:
auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://')
if auth['tls'] and auth['cert_path'] and auth['key_path']:
# TLS with certs and no host verification
tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
verify=False,
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls']:
# TLS with no certs and not host verification
tls_config = self._get_tls_config(verify=False,
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls_verify'] and auth['cert_path'] and auth['key_path']:
# TLS with certs and host verification
if auth['cacert_path']:
tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
ca_cert=auth['cacert_path'],
verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'])
else:
tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls_verify'] and auth['cacert_path']:
# TLS with cacert only
tls_config = self._get_tls_config(ca_cert=auth['cacert_path'],
assert_hostname=auth['tls_hostname'],
verify=True,
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls_verify']:
# TLS with verify and no certs
tls_config = self._get_tls_config(verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
# No TLS
return dict(base_url=auth['docker_host'],
version=auth['api_version'],
timeout=auth['timeout'])
def _handle_ssl_error(self, error):
match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
if match:
msg = "You asked for verification that Docker host name matches %s. The actual hostname is %s. " \
"Most likely you need to set DOCKER_TLS_HOSTNAME or pass tls_hostname with a value of %s. " \
"You may also use TLS without verification by setting the tls parameter to true." \
% (self.auth_params['tls_hostname'], match.group(1), match.group(1))
self.fail(msg)
self.fail("SSL Exception: %s" % (error))
class EnvArgs(object):
def __init__(self):
self.config_file = None
self.docker_host = None
self.api_version = None
self.cert_path = None
self.ssl_version = None
self.tls = None
self.tls_verify = None
self.tls_hostname = None
self.timeout = None
self.default_ssh_port = None
self.default_ip = None
class DockerInventory(object):
def __init__(self):
self._args = self._parse_cli_args()
self._env_args = self._parse_env_args()
self.groups = defaultdict(list)
self.hostvars = defaultdict(dict)
def run(self):
config_from_file = self._parse_config_file()
if not config_from_file:
config_from_file = dict()
docker_hosts = self.get_hosts(config_from_file)
for host in docker_hosts:
client = AnsibleDockerClient(host, self._args.debug)
self.get_inventory(client, host)
if not self._args.host:
self.groups['docker_hosts'] = [host.get('docker_host') for host in docker_hosts]
self.groups['_meta'] = dict(
hostvars=self.hostvars
)
print(self._json_format_dict(self.groups, pretty_print=self._args.pretty))
else:
print(self._json_format_dict(self.hostvars.get(self._args.host, dict()), pretty_print=self._args.pretty))
sys.exit(0)
def get_inventory(self, client, host):
ssh_port = host.get('default_ssh_port')
default_ip = host.get('default_ip')
hostname = host.get('docker_host')
try:
containers = client.containers(all=True)
except Exception as exc:
self.fail("Error fetching containers for host %s - %s" % (hostname, str(exc)))
for container in containers:
id = container.get('Id')
short_id = id[:13]
try:
name = container.get('Names', list()).pop(0).lstrip('/')
except IndexError:
name = short_id
if not self._args.host or (self._args.host and self._args.host in [name, id, short_id]):
try:
inspect = client.inspect_container(id)
except Exception as exc:
self.fail("Error inspecting container %s - %s" % (name, str(exc)))
running = inspect.get('State', dict()).get('Running')
# Add container to groups
image_name = inspect.get('Config', dict()).get('Image')
if image_name:
self.groups["image_%s" % (image_name)].append(name)
stack_name = inspect.get('Config', dict()).get('Labels', dict()).get('com.docker.stack.namespace')
if stack_name:
self.groups["stack_%s" % stack_name].append(name)
service_name = inspect.get('Config', dict()).get('Labels', dict()).get('com.docker.swarm.service.name')
if service_name:
self.groups["service_%s" % service_name].append(name)
self.groups[id].append(name)
self.groups[name].append(name)
if short_id not in self.groups:
self.groups[short_id].append(name)
self.groups[hostname].append(name)
if running is True:
self.groups['running'].append(name)
else:
self.groups['stopped'].append(name)
# Figure ous ssh IP and Port
try:
# Lookup the public facing port Nat'ed to ssh port.
port = client.port(container, ssh_port)[0]
except (IndexError, AttributeError, TypeError):
port = dict()
try:
ip = default_ip if port['HostIp'] == '0.0.0.0' else port['HostIp']
except KeyError:
ip = ''
facts = dict(
ansible_ssh_host=ip,
ansible_ssh_port=port.get('HostPort', int()),
docker_name=name,
docker_short_id=short_id
)
for key in inspect:
fact_key = self._slugify(key)
facts[fact_key] = inspect.get(key)
self.hostvars[name].update(facts)
def _slugify(self, value):
return 'docker_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_'))
def get_hosts(self, config):
'''
Determine the list of docker hosts we need to talk to.
:param config: dictionary read from config file. can be empty.
:return: list of connection dictionaries
'''
hosts = list()
hosts_list = config.get('hosts')
defaults = config.get('defaults', dict())
self.log('defaults:')
self.log(defaults, pretty_print=True)
def_host = defaults.get('host')
def_tls = defaults.get('tls')
def_tls_verify = defaults.get('tls_verify')
def_tls_hostname = defaults.get('tls_hostname')
def_ssl_version = defaults.get('ssl_version')
def_cert_path = defaults.get('cert_path')
def_cacert_path = defaults.get('cacert_path')
def_key_path = defaults.get('key_path')
def_version = defaults.get('version')
def_timeout = defaults.get('timeout')
def_ip = defaults.get('default_ip')
def_ssh_port = defaults.get('private_ssh_port')
if hosts_list:
# use hosts from config file
for host in hosts_list:
docker_host = host.get('host') or def_host or self._args.docker_host or \
self._env_args.docker_host or DEFAULT_DOCKER_HOST
api_version = host.get('version') or def_version or self._args.api_version or \
self._env_args.api_version or DEFAULT_DOCKER_API_VERSION
tls_hostname = host.get('tls_hostname') or def_tls_hostname or self._args.tls_hostname or \
self._env_args.tls_hostname or DEFAULT_TLS_HOSTNAME
tls_verify = host.get('tls_verify') or def_tls_verify or self._args.tls_verify or \
self._env_args.tls_verify or DEFAULT_TLS_VERIFY
tls = host.get('tls') or def_tls or self._args.tls or self._env_args.tls or DEFAULT_TLS
ssl_version = host.get('ssl_version') or def_ssl_version or self._args.ssl_version or \
self._env_args.ssl_version
cert_path = host.get('cert_path') or def_cert_path or self._args.cert_path or \
self._env_args.cert_path
if cert_path and cert_path == self._env_args.cert_path:
cert_path = os.path.join(cert_path, 'cert.pem')
cacert_path = host.get('cacert_path') or def_cacert_path or self._args.cacert_path or \
self._env_args.cert_path
if cacert_path and cacert_path == self._env_args.cert_path:
cacert_path = os.path.join(cacert_path, 'ca.pem')
key_path = host.get('key_path') or def_key_path or self._args.key_path or \
self._env_args.cert_path
if key_path and key_path == self._env_args.cert_path:
key_path = os.path.join(key_path, 'key.pem')
timeout = host.get('timeout') or def_timeout or self._args.timeout or self._env_args.timeout or \
DEFAULT_TIMEOUT_SECONDS
default_ip = host.get('default_ip') or def_ip or self._env_args.default_ip or \
self._args.default_ip_address or DEFAULT_IP
default_ssh_port = host.get('private_ssh_port') or def_ssh_port or self._args.private_ssh_port or \
DEFAULT_SSH_PORT
host_dict = dict(
docker_host=docker_host,
api_version=api_version,
tls=tls,
tls_verify=tls_verify,
tls_hostname=tls_hostname,
cert_path=cert_path,
cacert_path=cacert_path,
key_path=key_path,
ssl_version=ssl_version,
timeout=timeout,
default_ip=default_ip,
default_ssh_port=default_ssh_port,
)
hosts.append(host_dict)
else:
# use default definition
docker_host = def_host or self._args.docker_host or self._env_args.docker_host or DEFAULT_DOCKER_HOST
api_version = def_version or self._args.api_version or self._env_args.api_version or \
DEFAULT_DOCKER_API_VERSION
tls_hostname = def_tls_hostname or self._args.tls_hostname or self._env_args.tls_hostname or \
DEFAULT_TLS_HOSTNAME
tls_verify = def_tls_verify or self._args.tls_verify or self._env_args.tls_verify or DEFAULT_TLS_VERIFY
tls = def_tls or self._args.tls or self._env_args.tls or DEFAULT_TLS
ssl_version = def_ssl_version or self._args.ssl_version or self._env_args.ssl_version
cert_path = def_cert_path or self._args.cert_path or self._env_args.cert_path
if cert_path and cert_path == self._env_args.cert_path:
cert_path = os.path.join(cert_path, 'cert.pem')
cacert_path = def_cacert_path or self._args.cacert_path or self._env_args.cert_path
if cacert_path and cacert_path == self._env_args.cert_path:
cacert_path = os.path.join(cacert_path, 'ca.pem')
key_path = def_key_path or self._args.key_path or self._env_args.cert_path
if key_path and key_path == self._env_args.cert_path:
key_path = os.path.join(key_path, 'key.pem')
timeout = def_timeout or self._args.timeout or self._env_args.timeout or DEFAULT_TIMEOUT_SECONDS
default_ip = def_ip or self._env_args.default_ip or self._args.default_ip_address or DEFAULT_IP
default_ssh_port = def_ssh_port or self._args.private_ssh_port or DEFAULT_SSH_PORT
host_dict = dict(
docker_host=docker_host,
api_version=api_version,
tls=tls,
tls_verify=tls_verify,
tls_hostname=tls_hostname,
cert_path=cert_path,
cacert_path=cacert_path,
key_path=key_path,
ssl_version=ssl_version,
timeout=timeout,
default_ip=default_ip,
default_ssh_port=default_ssh_port,
)
hosts.append(host_dict)
self.log("hosts: ")
self.log(hosts, pretty_print=True)
return hosts
def _parse_config_file(self):
config = dict()
config_file = DEFAULT_DOCKER_CONFIG_FILE
if self._args.config_file:
config_file = self._args.config_file
elif self._env_args.config_file:
config_file = self._env_args.config_file
config_file = os.path.abspath(config_file)
if os.path.isfile(config_file):
with open(config_file) as f:
try:
config = yaml.safe_load(f.read())
except Exception as exc:
self.fail("Error: parsing %s - %s" % (config_file, str(exc)))
else:
msg = "Error: config file given by {} does not exist - " + config_file
if self._args.config_file:
self.fail(msg.format('command line argument'))
elif self._env_args.config_file:
self.fail(msg.format(DOCKER_ENV_ARGS.get('config_file')))
else:
self.log(msg.format('DEFAULT_DOCKER_CONFIG_FILE'))
return config
def log(self, msg, pretty_print=False):
if self._args.debug:
log(msg, pretty_print)
def fail(self, msg):
fail(msg)
def _parse_env_args(self):
args = EnvArgs()
for key, value in DOCKER_ENV_ARGS.items():
if os.environ.get(value):
val = os.environ.get(value)
if val in BOOLEANS_TRUE:
val = True
if val in BOOLEANS_FALSE:
val = False
setattr(args, key, val)
return args
def _parse_cli_args(self):
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Return Ansible inventory for one or more Docker hosts.')
parser.add_argument('--list', action='store_true', default=True,
help='List all containers (default: True)')
parser.add_argument('--debug', action='store_true', default=False,
help='Send debug messages to STDOUT')
parser.add_argument('--host', action='store',
help='Only get information for a specific container.')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty print JSON output(default: False)')
parser.add_argument('--config-file', action='store', default=None,
help="Name of the config file to use. Default is %s" % (DEFAULT_DOCKER_CONFIG_FILE))
parser.add_argument('--docker-host', action='store', default=None,
help="The base url or Unix sock path to connect to the docker daemon. Defaults to %s"
% (DEFAULT_DOCKER_HOST))
parser.add_argument('--tls-hostname', action='store', default=None,
help="Host name to expect in TLS certs. Defaults to %s" % DEFAULT_TLS_HOSTNAME)
parser.add_argument('--api-version', action='store', default=None,
help="Docker daemon API version. Defaults to %s" % (DEFAULT_DOCKER_API_VERSION))
parser.add_argument('--timeout', action='store', default=None,
help="Docker connection timeout in seconds. Defaults to %s"
% (DEFAULT_TIMEOUT_SECONDS))
parser.add_argument('--cacert-path', action='store', default=None,
help="Path to the TLS certificate authority pem file.")
parser.add_argument('--cert-path', action='store', default=None,
help="Path to the TLS certificate pem file.")
parser.add_argument('--key-path', action='store', default=None,
help="Path to the TLS encryption key pem file.")
parser.add_argument('--ssl-version', action='store', default=None,
help="TLS version number")
parser.add_argument('--tls', action='store_true', default=None,
help="Use TLS. Defaults to %s" % (DEFAULT_TLS))
parser.add_argument('--tls-verify', action='store_true', default=None,
help="Verify TLS certificates. Defaults to %s" % (DEFAULT_TLS_VERIFY))
parser.add_argument('--private-ssh-port', action='store', default=None,
help="Default private container SSH Port. Defaults to %s" % (DEFAULT_SSH_PORT))
parser.add_argument('--default-ip-address', action='store', default=None,
help="Default container SSH IP address. Defaults to %s" % (DEFAULT_IP))
return parser.parse_args()
def _json_format_dict(self, data, pretty_print=False):
# format inventory data for output
if pretty_print:
return json.dumps(data, sort_keys=True, indent=4)
else:
return json.dumps(data)
def main():
if not HAS_DOCKER_PY:
fail("Failed to import docker-py. Try `pip install docker-py` - %s" % (HAS_DOCKER_ERROR))
DockerInventory().run()
main()

@ -1,74 +0,0 @@
# This is the configuration file for the Docker inventory script: docker_inventory.py.
#
# You can define the following in this file:
#
# defaults
# Defines a default connection. Defaults will be taken from this and applied to any values not provided
# for a host defined in the hosts list.
#
# hosts
# If you wish to get inventory from more than one Docker host, define a hosts list.
#
# For the default host and each host in the hosts list define the following attributes:
#
# host:
# description: The URL or Unix socket path used to connect to the Docker API.
# required: yes
#
# tls:
# description: Connect using TLS without verifying the authenticity of the Docker host server.
# default: false
# required: false
#
# tls_verify:
# description: Connect using TLS without verifying the authenticity of the Docker host server.
# default: false
# required: false
#
# cert_path:
# description: Path to the client's TLS certificate file.
# default: null
# required: false
#
# cacert_path:
# description: Use a CA certificate when performing server verification by providing the path to a CA certificate file.
# default: null
# required: false
#
# key_path:
# description: Path to the client's TLS key file.
# default: null
# required: false
#
# version:
# description: The Docker API version.
# required: false
# default: will be supplied by the docker-py module.
#
# timeout:
# description: The amount of time in seconds to wait on an API response.
# required: false
# default: 60
#
# default_ip:
# description: The IP address to assign to ansible_host when the container's SSH port is mapped to interface
# '0.0.0.0'.
# required: false
# default: 127.0.0.1
#
# private_ssh_port:
# description: The port containers use for SSH
# required: false
# default: 22
#defaults:
# host: unix:///var/run/docker.sock
# private_ssh_port: 22
# default_ip: 127.0.0.1
#hosts:
# - host: tcp://10.45.5.16:4243
# private_ssh_port: 2022
# default_ip: 172.16.3.45
# - host: tcp://localhost:4243
# private_ssh_port: 2029

@ -1,107 +0,0 @@
#!/usr/bin/env python
"""
fleetctl base external inventory script. Automatically finds the IPs of the booted coreos instances and
returns it under the host group 'coreos'
"""
# Copyright (C) 2014 Andrew Rothstein <andrew.rothstein at gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Thanks to the vagrant.py inventory script for giving me the basic structure
# of this.
#
import sys
import subprocess
import re
import string
from optparse import OptionParser
import json
# Options
# ------------------------------
parser = OptionParser(usage="%prog [options] --list | --host <machine>")
parser.add_option('--list', default=False, dest="list", action="store_true",
help="Produce a JSON consumable grouping of servers in your fleet")
parser.add_option('--host', default=None, dest="host",
help="Generate additional host specific details for given host for Ansible")
(options, args) = parser.parse_args()
#
# helper functions
#
def get_ssh_config():
configs = []
for box in list_running_boxes():
config = get_a_ssh_config(box)
configs.append(config)
return configs
# list all the running instances in the fleet
def list_running_boxes():
boxes = []
for line in subprocess.check_output(["fleetctl", "list-machines"]).split('\n'):
matcher = re.search(r"[^\s]+[\s]+([^\s]+).+", line)
if matcher and matcher.group(1) != "IP":
boxes.append(matcher.group(1))
return boxes
def get_a_ssh_config(box_name):
config = {}
config['Host'] = box_name
config['ansible_ssh_user'] = 'core'
config['ansible_python_interpreter'] = '/opt/bin/python'
return config
# List out servers that vagrant has running
# ------------------------------
if options.list:
ssh_config = get_ssh_config()
hosts = {'coreos': []}
for data in ssh_config:
hosts['coreos'].append(data['Host'])
print(json.dumps(hosts))
sys.exit(1)
# Get out the host details
# ------------------------------
elif options.host:
result = {}
ssh_config = get_ssh_config()
details = filter(lambda x: (x['Host'] == options.host), ssh_config)
if len(details) > 0:
# pass through the port, in case it's non standard.
result = details[0]
print(json.dumps(result))
sys.exit(1)
# Print out help
# ------------------------------
else:
parser.print_help()
sys.exit(1)

@ -1,200 +0,0 @@
# Foreman inventory (https://github.com/theforeman/foreman_ansible_inventory)
#
# This script can be used as an Ansible dynamic inventory.
# The connection parameters are set up via *foreman.ini*
# This is how the script founds the configuration file in
# order of discovery.
#
# * `/etc/ansible/foreman.ini`
# * Current directory of your inventory script.
# * `FOREMAN_INI_PATH` environment variable.
#
# ## Variables and Parameters
#
# The data returned from Foreman for each host is stored in a foreman
# hash so they're available as *host_vars* along with the parameters
# of the host and it's hostgroups:
#
# "foo.example.com": {
# "foreman": {
# "architecture_id": 1,
# "architecture_name": "x86_64",
# "build": false,
# "build_status": 0,
# "build_status_label": "Installed",
# "capabilities": [
# "build",
# "image"
# ],
# "compute_profile_id": 4,
# "hostgroup_name": "webtier/myapp",
# "id": 70,
# "image_name": "debian8.1",
# ...
# "uuid": "50197c10-5ebb-b5cf-b384-a1e203e19e77"
# },
# "foreman_params": {
# "testparam1": "foobar",
# "testparam2": "small",
# ...
# }
#
# and could therefore be used in Ansible like:
#
# - debug: msg="From Foreman host {{ foreman['uuid'] }}"
#
# Which yields
#
# TASK [test_foreman : debug] ****************************************************
# ok: [foo.example.com] => {
# "msg": "From Foreman host 50190bd1-052a-a34a-3c9c-df37a39550bf"
# }
#
# ## Automatic Ansible groups
#
# The inventory will provide a set of groups, by default prefixed by
# 'foreman_'. If you want to customize this prefix, change the
# group_prefix option in /etc/ansible/foreman.ini. The rest of this
# guide will assume the default prefix of 'foreman'
#
# The hostgroup, location, organization, content view, and lifecycle
# environment of each host are created as Ansible groups with a
# foreman_<grouptype> prefix, all lowercase and problematic parameters
# removed. So e.g. the foreman hostgroup
#
# myapp / webtier / datacenter1
#
# would turn into the Ansible group:
#
# foreman_hostgroup_myapp_webtier_datacenter1
#
# If the parameter want_hostcollections is set to true, the
# collections each host is in are created as Ansible groups with a
# foreman_hostcollection prefix, all lowercase and problematic
# parameters removed. So e.g. the Foreman host collection
#
# Patch Window Thursday
#
# would turn into the Ansible group:
#
# foreman_hostcollection_patchwindowthursday
#
# If the parameter host_filters is set, it will be used as the
# "search" parameter for the /api/v2/hosts call. This can be used to
# restrict the list of returned host, as shown below.
#
# Furthermore Ansible groups can be created on the fly using the
# *group_patterns* variable in *foreman.ini* so that you can build up
# hierarchies using parameters on the hostgroup and host variables.
#
# Lets assume you have a host that is built using this nested hostgroup:
#
# myapp / webtier / datacenter1
#
# and each of the hostgroups defines a parameters respectively:
#
# myapp: app_param = myapp
# webtier: tier_param = webtier
# datacenter1: dc_param = datacenter1
#
# The host is also in a subnet called "mysubnet" and provisioned via an image
# then *group_patterns* like:
#
# [ansible]
# group_patterns = ["{app_param}-{tier_param}-{dc_param}",
# "{app_param}-{tier_param}",
# "{app_param}",
# "{subnet_name}-{provision_method}"]
#
# would put the host into the additional Ansible groups:
#
# - myapp-webtier-datacenter1
# - myapp-webtier
# - myapp
# - mysubnet-image
#
# by recursively resolving the hostgroups, getting the parameter keys
# and values and doing a Python *string.format()* like replacement on
# it.
#
[foreman]
url = http://localhost:3000/
user = foreman
password = secret
ssl_verify = True
# Foreman 1.24 introduces a new reports API to improve performance of the inventory script.
# Note: This requires foreman_ansible plugin installed.
# Set to False if you want to use the old API. Defaults to True.
use_reports_api = True
# Retrieve only hosts from the organization "Web Engineering".
# host_filters = organization="Web Engineering"
# Retrieve only hosts from the organization "Web Engineering" that are
# also in the host collection "Apache Servers".
# host_filters = organization="Web Engineering" and host_collection="Apache Servers"
# Foreman Inventory report related configuration options.
# Configs that default to True :
# want_organization , want_location, want_ipv4, want_host_group, want_subnet, want_smart_proxies, want_facts
# Configs that default to False :
# want_ipv6, want_subnet_v6, want_content_facet_attributes, want_host_params
[report]
# want_organization = True
# want_location = True
# want_ipv4 = True
# want_ipv6 = False
# want_host_group = True
# want_subnet = True
# want_subnet_v6 = False
# want_smart_proxies = True
# want_content_facet_attributes = False
# want_host_params = False
# use this config to determine if facts are to be fetched in the report and stored on the hosts.
# want_facts = False
# Upon receiving a request to return inventory report, Foreman schedules a report generation job.
# The script then polls the report_data endpoint repeatedly to check if the job is complete and retrieves data
# poll_interval allows to define the polling interval between 2 calls to the report_data endpoint while polling.
# Defaults to 10 seconds
# poll_interval = 10
[ansible]
group_patterns = ["{app}-{tier}-{color}",
"{app}-{color}",
"{app}",
"{tier}"]
group_prefix = foreman_
# Whether to fetch facts from Foreman and store them on the host
want_facts = True
# Whether to create Ansible groups for host collections. Only tested
# with Katello (Red Hat Satellite). Disabled by default to not break
# the script for stand-alone Foreman.
want_hostcollections = False
# Whether to interpret global parameters value as JSON (if possible, else
# take as is). Only tested with Katello (Red Hat Satellite).
# This allows to define lists and dictionaries (and more complicated structures)
# variables by entering them as JSON string in Foreman parameters.
# Disabled by default as the change would else not be backward compatible.
rich_params = False
# Whether to populate the ansible_ssh_host variable to explicitly specify the
# connection target. Only tested with Katello (Red Hat Satellite).
# If the foreman 'ip' fact exists then the ansible_ssh_host varibale is populated
# to permit connections where DNS resolution fails.
want_ansible_ssh_host = False
[cache]
path = .
max_age = 60
# Whether to scan foreman to add recently created hosts in inventory cache
scan_new_hosts = True

@ -1,662 +0,0 @@
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
#
# Copyright (C) 2016 Guido Günther <agx@sigxcpu.org>,
# Daniel Lobato Garcia <dlobatog@redhat.com>
#
# This script is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with it. If not, see <http://www.gnu.org/licenses/>.
#
# This is somewhat based on cobbler inventory
# Stdlib imports
# __future__ imports must occur at the beginning of file
from __future__ import print_function
import json
import argparse
import copy
import os
import re
import sys
from time import time, sleep
from collections import defaultdict
from distutils.version import LooseVersion, StrictVersion
# 3rd party imports
import requests
if LooseVersion(requests.__version__) < LooseVersion('1.1.0'):
print('This script requires python-requests 1.1 as a minimum version')
sys.exit(1)
from requests.auth import HTTPBasicAuth
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves import configparser as ConfigParser
def json_format_dict(data, pretty=False):
"""Converts a dict to a JSON object and dumps it as a formatted string"""
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
class ForemanInventory(object):
def __init__(self):
self.inventory = defaultdict(list) # A list of groups and the hosts in that group
self.cache = dict() # Details about hosts in the inventory
self.params = dict() # Params of each host
self.facts = dict() # Facts of each host
self.hostgroups = dict() # host groups
self.hostcollections = dict() # host collections
self.session = None # Requests session
self.config_paths = [
"/etc/ansible/foreman.ini",
os.path.dirname(os.path.realpath(__file__)) + '/foreman.ini',
]
env_value = os.environ.get('FOREMAN_INI_PATH')
if env_value is not None:
self.config_paths.append(os.path.expanduser(os.path.expandvars(env_value)))
def read_settings(self):
"""Reads the settings from the foreman.ini file"""
config = ConfigParser.SafeConfigParser()
config.read(self.config_paths)
# Foreman API related
try:
self.foreman_url = config.get('foreman', 'url')
self.foreman_user = config.get('foreman', 'user')
self.foreman_pw = config.get('foreman', 'password', raw=True)
self.foreman_ssl_verify = config.getboolean('foreman', 'ssl_verify')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError) as e:
print("Error parsing configuration: %s" % e, file=sys.stderr)
return False
# Inventory Report Related
try:
self.foreman_use_reports_api = config.getboolean('foreman', 'use_reports_api')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.foreman_use_reports_api = True
try:
self.want_organization = config.getboolean('report', 'want_organization')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_organization = True
try:
self.want_location = config.getboolean('report', 'want_location')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_location = True
try:
self.want_IPv4 = config.getboolean('report', 'want_ipv4')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_IPv4 = True
try:
self.want_IPv6 = config.getboolean('report', 'want_ipv6')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_IPv6 = False
try:
self.want_host_group = config.getboolean('report', 'want_host_group')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_host_group = True
try:
self.want_host_params = config.getboolean('report', 'want_host_params')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_host_params = False
try:
self.want_subnet = config.getboolean('report', 'want_subnet')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_subnet = True
try:
self.want_subnet_v6 = config.getboolean('report', 'want_subnet_v6')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_subnet_v6 = False
try:
self.want_smart_proxies = config.getboolean('report', 'want_smart_proxies')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_smart_proxies = True
try:
self.want_content_facet_attributes = config.getboolean('report', 'want_content_facet_attributes')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_content_facet_attributes = False
try:
self.report_want_facts = config.getboolean('report', 'want_facts')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.report_want_facts = True
try:
self.poll_interval = config.getint('report', 'poll_interval')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.poll_interval = 10
# Ansible related
try:
group_patterns = config.get('ansible', 'group_patterns')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
group_patterns = "[]"
self.group_patterns = json.loads(group_patterns)
try:
self.group_prefix = config.get('ansible', 'group_prefix')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.group_prefix = "foreman_"
try:
self.want_facts = config.getboolean('ansible', 'want_facts')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_facts = True
self.want_facts = self.want_facts and self.report_want_facts
try:
self.want_hostcollections = config.getboolean('ansible', 'want_hostcollections')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_hostcollections = False
try:
self.want_ansible_ssh_host = config.getboolean('ansible', 'want_ansible_ssh_host')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_ansible_ssh_host = False
# Do we want parameters to be interpreted if possible as JSON? (no by default)
try:
self.rich_params = config.getboolean('ansible', 'rich_params')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.rich_params = False
try:
self.host_filters = config.get('foreman', 'host_filters')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.host_filters = None
# Cache related
try:
cache_path = os.path.expanduser(config.get('cache', 'path'))
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
cache_path = '.'
(script, ext) = os.path.splitext(os.path.basename(__file__))
self.cache_path_cache = cache_path + "/%s.cache" % script
self.cache_path_inventory = cache_path + "/%s.index" % script
self.cache_path_params = cache_path + "/%s.params" % script
self.cache_path_facts = cache_path + "/%s.facts" % script
self.cache_path_hostcollections = cache_path + "/%s.hostcollections" % script
try:
self.cache_max_age = config.getint('cache', 'max_age')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.cache_max_age = 60
try:
self.scan_new_hosts = config.getboolean('cache', 'scan_new_hosts')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.scan_new_hosts = False
return True
def parse_cli_args(self):
"""Command line argument processing"""
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on foreman')
parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to foreman (default: False - use cache files)')
self.args = parser.parse_args()
def _get_session(self):
if not self.session:
self.session = requests.session()
self.session.auth = HTTPBasicAuth(self.foreman_user, self.foreman_pw)
self.session.verify = self.foreman_ssl_verify
return self.session
def _get_json(self, url, ignore_errors=None, params=None):
if params is None:
params = {}
params['per_page'] = 250
page = 1
results = []
s = self._get_session()
while True:
params['page'] = page
ret = s.get(url, params=params)
if ignore_errors and ret.status_code in ignore_errors:
break
ret.raise_for_status()
json = ret.json()
# /hosts/:id has not results key
if 'results' not in json:
return json
# Facts are returned as dict in results not list
if isinstance(json['results'], dict):
return json['results']
# List of all hosts is returned paginaged
results = results + json['results']
if len(results) >= json['subtotal']:
break
page += 1
if len(json['results']) == 0:
print("Did not make any progress during loop. "
"expected %d got %d" % (json['total'], len(results)),
file=sys.stderr)
break
return results
def _use_inventory_report(self):
if not self.foreman_use_reports_api:
return False
status_url = "%s/api/v2/status" % self.foreman_url
result = self._get_json(status_url)
foreman_version = (LooseVersion(result.get('version')) >= LooseVersion('1.24.0'))
return foreman_version
def _fetch_params(self):
options, params = ("no", "yes"), dict()
params["Organization"] = options[self.want_organization]
params["Location"] = options[self.want_location]
params["IPv4"] = options[self.want_IPv4]
params["IPv6"] = options[self.want_IPv6]
params["Facts"] = options[self.want_facts]
params["Host Group"] = options[self.want_host_group]
params["Host Collections"] = options[self.want_hostcollections]
params["Subnet"] = options[self.want_subnet]
params["Subnet v6"] = options[self.want_subnet_v6]
params["Smart Proxies"] = options[self.want_smart_proxies]
params["Content Attributes"] = options[self.want_content_facet_attributes]
params["Host Parameters"] = options[self.want_host_params]
if self.host_filters:
params["Hosts"] = self.host_filters
return params
def _post_request(self):
url = "%s/ansible/api/v2/ansible_inventories/schedule" % self.foreman_url
session = self._get_session()
params = {'input_values': self._fetch_params()}
ret = session.post(url, json=params)
if not ret:
raise Exception("Error scheduling inventory report on foreman. Please check foreman logs!")
url = "{0}/{1}".format(self.foreman_url, ret.json().get('data_url'))
response = session.get(url)
while response:
if response.status_code != 204:
break
else:
sleep(self.poll_interval)
response = session.get(url)
if not response:
raise Exception("Error receiving inventory report from foreman. Please check foreman logs!")
else:
return response.json()
def _get_hosts(self):
url = "%s/api/v2/hosts" % self.foreman_url
params = {}
if self.host_filters:
params['search'] = self.host_filters
return self._get_json(url, params=params)
def _get_host_data_by_id(self, hid):
url = "%s/api/v2/hosts/%s" % (self.foreman_url, hid)
return self._get_json(url)
def _get_facts_by_id(self, hid):
url = "%s/api/v2/hosts/%s/facts" % (self.foreman_url, hid)
return self._get_json(url)
def _resolve_params(self, host_params):
"""Convert host params to dict"""
params = {}
for param in host_params:
name = param['name']
if self.rich_params:
try:
params[name] = json.loads(param['value'])
except ValueError:
params[name] = param['value']
else:
params[name] = param['value']
return params
def _get_facts(self, host):
"""Fetch all host facts of the host"""
if not self.want_facts:
return {}
ret = self._get_facts_by_id(host['id'])
if len(ret.values()) == 0:
facts = {}
elif len(ret.values()) == 1:
facts = list(ret.values())[0]
else:
raise ValueError("More than one set of facts returned for '%s'" % host)
return facts
def write_to_cache(self, data, filename):
"""Write data in JSON format to a file"""
json_data = json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def _write_cache(self):
self.write_to_cache(self.cache, self.cache_path_cache)
self.write_to_cache(self.inventory, self.cache_path_inventory)
self.write_to_cache(self.params, self.cache_path_params)
self.write_to_cache(self.facts, self.cache_path_facts)
self.write_to_cache(self.hostcollections, self.cache_path_hostcollections)
def to_safe(self, word):
'''Converts 'bad' characters in a string to underscores
so they can be used as Ansible groups
>>> ForemanInventory.to_safe("foo-bar baz")
'foo_barbaz'
'''
regex = r"[^A-Za-z0-9\_]"
return re.sub(regex, "_", word.replace(" ", ""))
def update_cache(self, scan_only_new_hosts=False):
"""Make calls to foreman and save the output in a cache"""
use_inventory_report = self._use_inventory_report()
if use_inventory_report:
self._update_cache_inventory(scan_only_new_hosts)
else:
self._update_cache_host_api(scan_only_new_hosts)
def _update_cache_inventory(self, scan_only_new_hosts):
self.groups = dict()
self.hosts = dict()
try:
inventory_report_response = self._post_request()
except Exception:
self._update_cache_host_api(scan_only_new_hosts)
return
host_data = json.loads(inventory_report_response)
for host in host_data:
if not(host) or (host["name"] in self.cache.keys() and scan_only_new_hosts):
continue
dns_name = host['name']
host_params = host.pop('host_parameters', {})
fact_list = host.pop('facts', {})
content_facet_attributes = host.get('content_attributes', {}) or {}
# Create ansible groups for hostgroup
group = 'host_group'
val = host.get(group)
if val:
safe_key = self.to_safe('%s%s_%s' % (
to_text(self.group_prefix),
group,
to_text(val).lower()
))
self.inventory[safe_key].append(dns_name)
# Create ansible groups for environment, location and organization
for group in ['environment', 'location', 'organization']:
val = host.get('%s' % group)
if val:
safe_key = self.to_safe('%s%s_%s' % (
to_text(self.group_prefix),
group,
to_text(val).lower()
))
self.inventory[safe_key].append(dns_name)
for group in ['lifecycle_environment', 'content_view']:
val = content_facet_attributes.get('%s_name' % group)
if val:
safe_key = self.to_safe('%s%s_%s' % (
to_text(self.group_prefix),
group,
to_text(val).lower()
))
self.inventory[safe_key].append(dns_name)
params = host_params
# Ansible groups by parameters in host groups and Foreman host
# attributes.
groupby = dict()
for k, v in params.items():
groupby[k] = self.to_safe(to_text(v))
# The name of the ansible groups is given by group_patterns:
for pattern in self.group_patterns:
try:
key = pattern.format(**groupby)
self.inventory[key].append(dns_name)
except KeyError:
pass # Host not part of this group
if self.want_hostcollections:
hostcollections = host.get('host_collections')
if hostcollections:
# Create Ansible groups for host collections
for hostcollection in hostcollections:
safe_key = self.to_safe('%shostcollection_%s' % (self.group_prefix, hostcollection.lower()))
self.inventory[safe_key].append(dns_name)
self.hostcollections[dns_name] = hostcollections
self.cache[dns_name] = host
self.params[dns_name] = params
self.facts[dns_name] = fact_list
self.inventory['all'].append(dns_name)
self._write_cache()
def _update_cache_host_api(self, scan_only_new_hosts):
"""Make calls to foreman and save the output in a cache"""
self.groups = dict()
self.hosts = dict()
for host in self._get_hosts():
if host['name'] in self.cache.keys() and scan_only_new_hosts:
continue
dns_name = host['name']
host_data = self._get_host_data_by_id(host['id'])
host_params = host_data.get('all_parameters', {})
# Create ansible groups for hostgroup
group = 'hostgroup'
val = host.get('%s_title' % group) or host.get('%s_name' % group)
if val:
safe_key = self.to_safe('%s%s_%s' % (
to_text(self.group_prefix),
group,
to_text(val).lower()
))
self.inventory[safe_key].append(dns_name)
# Create ansible groups for environment, location and organization
for group in ['environment', 'location', 'organization']:
val = host.get('%s_name' % group)
if val:
safe_key = self.to_safe('%s%s_%s' % (
to_text(self.group_prefix),
group,
to_text(val).lower()
))
self.inventory[safe_key].append(dns_name)
for group in ['lifecycle_environment', 'content_view']:
val = host.get('content_facet_attributes', {}).get('%s_name' % group)
if val:
safe_key = self.to_safe('%s%s_%s' % (
to_text(self.group_prefix),
group,
to_text(val).lower()
))
self.inventory[safe_key].append(dns_name)
params = self._resolve_params(host_params)
# Ansible groups by parameters in host groups and Foreman host
# attributes.
groupby = dict()
for k, v in params.items():
groupby[k] = self.to_safe(to_text(v))
# The name of the ansible groups is given by group_patterns:
for pattern in self.group_patterns:
try:
key = pattern.format(**groupby)
self.inventory[key].append(dns_name)
except KeyError:
pass # Host not part of this group
if self.want_hostcollections:
hostcollections = host_data.get('host_collections')
if hostcollections:
# Create Ansible groups for host collections
for hostcollection in hostcollections:
safe_key = self.to_safe('%shostcollection_%s' % (self.group_prefix, hostcollection['name'].lower()))
self.inventory[safe_key].append(dns_name)
self.hostcollections[dns_name] = hostcollections
self.cache[dns_name] = host
self.params[dns_name] = params
self.facts[dns_name] = self._get_facts(host)
self.inventory['all'].append(dns_name)
self._write_cache()
def is_cache_valid(self):
"""Determines if the cache is still valid"""
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if (os.path.isfile(self.cache_path_inventory) and
os.path.isfile(self.cache_path_params) and
os.path.isfile(self.cache_path_facts)):
return True
return False
def load_inventory_from_cache(self):
"""Read the index from the cache file sets self.index"""
with open(self.cache_path_inventory, 'r') as fp:
self.inventory = json.load(fp)
def load_params_from_cache(self):
"""Read the index from the cache file sets self.index"""
with open(self.cache_path_params, 'r') as fp:
self.params = json.load(fp)
def load_facts_from_cache(self):
"""Read the index from the cache file sets self.facts"""
if not self.want_facts:
return
with open(self.cache_path_facts, 'r') as fp:
self.facts = json.load(fp)
def load_hostcollections_from_cache(self):
"""Read the index from the cache file sets self.hostcollections"""
if not self.want_hostcollections:
return
with open(self.cache_path_hostcollections, 'r') as fp:
self.hostcollections = json.load(fp)
def load_cache_from_cache(self):
"""Read the cache from the cache file sets self.cache"""
with open(self.cache_path_cache, 'r') as fp:
self.cache = json.load(fp)
def get_inventory(self):
if self.args.refresh_cache or not self.is_cache_valid():
self.update_cache()
else:
self.load_inventory_from_cache()
self.load_params_from_cache()
self.load_facts_from_cache()
self.load_hostcollections_from_cache()
self.load_cache_from_cache()
if self.scan_new_hosts:
self.update_cache(True)
def get_host_info(self):
"""Get variables about a specific host"""
if not self.cache or len(self.cache) == 0:
# Need to load index from cache
self.load_cache_from_cache()
if self.args.host not in self.cache:
# try updating the cache
self.update_cache()
if self.args.host not in self.cache:
# host might not exist anymore
return json_format_dict({}, True)
return json_format_dict(self.cache[self.args.host], True)
def _print_data(self):
data_to_print = ""
if self.args.host:
data_to_print += self.get_host_info()
else:
self.inventory['_meta'] = {'hostvars': {}}
for hostname in self.cache:
self.inventory['_meta']['hostvars'][hostname] = {
'foreman': self.cache[hostname],
'foreman_params': self.params[hostname],
}
if self.want_ansible_ssh_host and 'ip' in self.cache[hostname]:
self.inventory['_meta']['hostvars'][hostname]['ansible_ssh_host'] = self.cache[hostname]['ip']
if self.want_facts:
self.inventory['_meta']['hostvars'][hostname]['foreman_facts'] = self.facts[hostname]
data_to_print += json_format_dict(self.inventory, True)
print(data_to_print)
def run(self):
# Read settings and parse CLI arguments
if not self.read_settings():
return False
self.parse_cli_args()
self.get_inventory()
self._print_data()
return True
if __name__ == '__main__':
sys.exit(not ForemanInventory().run())

@ -1,123 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
import argparse
from distutils.version import LooseVersion
import json
import os
import sys
from ipalib import api, errors, __version__ as IPA_VERSION
from ansible.module_utils.six import u
def initialize():
'''
This function initializes the FreeIPA/IPA API. This function requires
no arguments. A kerberos key must be present in the users keyring in
order for this to work. IPA default configuration directory is /etc/ipa,
this path could be overridden with IPA_CONFDIR environment variable.
'''
api.bootstrap(context='cli')
if not os.path.isdir(api.env.confdir):
print("WARNING: IPA configuration directory (%s) is missing. "
"Environment variable IPA_CONFDIR could be used to override "
"default path." % api.env.confdir)
if LooseVersion(IPA_VERSION) >= LooseVersion('4.6.2'):
# With ipalib < 4.6.0 'server' and 'domain' have default values
# ('localhost:8888', 'example.com'), newer versions don't and
# DNS autodiscovery is broken, then one of jsonrpc_uri / xmlrpc_uri is
# required.
# ipalib 4.6.0 is unusable (https://pagure.io/freeipa/issue/7132)
# that's why 4.6.2 is explicitely tested.
if 'server' not in api.env or 'domain' not in api.env:
sys.exit("ERROR: ('jsonrpc_uri' or 'xmlrpc_uri') or 'domain' are not "
"defined in '[global]' section of '%s' nor in '%s'." %
(api.env.conf, api.env.conf_default))
api.finalize()
try:
api.Backend.rpcclient.connect()
except AttributeError:
# FreeIPA < 4.0 compatibility
api.Backend.xmlclient.connect()
return api
def list_groups(api):
'''
This function prints a list of all host groups. This function requires
one argument, the FreeIPA/IPA API object.
'''
inventory = {}
hostvars = {}
result = api.Command.hostgroup_find(all=True)['result']
for hostgroup in result:
# Get direct and indirect members (nested hostgroups) of hostgroup
members = []
if 'member_host' in hostgroup:
members = [host for host in hostgroup['member_host']]
if 'memberindirect_host' in hostgroup:
members += (host for host in hostgroup['memberindirect_host'])
inventory[hostgroup['cn'][0]] = {'hosts': [host for host in members]}
for member in members:
hostvars[member] = {}
inventory['_meta'] = {'hostvars': hostvars}
inv_string = json.dumps(inventory, indent=1, sort_keys=True)
print(inv_string)
return None
def parse_args():
'''
This function parses the arguments that were passed in via the command line.
This function expects no arguments.
'''
parser = argparse.ArgumentParser(description='Ansible FreeIPA/IPA '
'inventory module')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', action='store_true',
help='List active servers')
group.add_argument('--host', help='List details about the specified host')
return parser.parse_args()
def get_host_attributes(api, host):
"""
This function expects one string, this hostname to lookup variables for.
Args:
api: FreeIPA API Object
host: Name of Hostname
Returns: Dict of Host vars if found else None
"""
try:
result = api.Command.host_show(u(host))['result']
if 'usercertificate' in result:
del result['usercertificate']
return json.dumps(result, indent=1)
except errors.NotFound as e:
return {}
if __name__ == '__main__':
args = parse_args()
api = initialize()
if args.host:
print(get_host_attributes(api, args.host))
elif args.list:
list_groups(api)

@ -1,76 +0,0 @@
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# The GCE inventory script has the following dependencies:
# 1. A valid Google Cloud Platform account with Google Compute Engine
# enabled. See https://cloud.google.com
# 2. An OAuth2 Service Account flow should be enabled. This will generate
# a private key file that the inventory script will use for API request
# authorization. See https://developers.google.com/accounts/docs/OAuth2
# 3. Convert the private key from PKCS12 to PEM format
# $ openssl pkcs12 -in pkey.pkcs12 -passin pass:notasecret \
# > -nodes -nocerts | openssl rsa -out pkey.pem
# 4. The libcloud (>=0.13.3) python libray. See http://libcloud.apache.org
#
# (See ansible/test/gce_tests.py comments for full install instructions)
#
# Author: Eric Johnson <erjohnso@google.com>
# Contributors: John Roach <johnroach1985@gmail.com>
[gce]
# GCE Service Account configuration information can be stored in the
# libcloud 'secrets.py' file. Ideally, the 'secrets.py' file will already
# exist in your PYTHONPATH and be picked up automatically with an import
# statement in the inventory script. However, you can specify an absolute
# path to the secrets.py file with 'libcloud_secrets' parameter.
# This option will be deprecated in a future release.
libcloud_secrets =
# If you are not going to use a 'secrets.py' file, you can set the necessary
# authorization parameters here.
# You can add multiple gce projects to by using a comma separated list. Make
# sure that the service account used has permissions on said projects.
gce_service_account_email_address =
gce_service_account_pem_file_path =
gce_project_id =
gce_zone =
# Filter inventory based on state. Leave undefined to return instances regardless of state.
# example: Uncomment to only return inventory in the running or provisioning state
#instance_states = RUNNING,PROVISIONING
# Filter inventory based on instance tags. Leave undefined to return instances regardless of tags.
# example: Uncomment to only return inventory with the http-server or https-server tag
#instance_tags = http-server,https-server
[inventory]
# The 'inventory_ip_type' parameter specifies whether 'ansible_ssh_host' should
# contain the instance internal or external address. Values may be either
# 'internal' or 'external'. If 'external' is specified but no external instance
# address exists, the internal address will be used.
# The INVENTORY_IP_TYPE environment variable will override this value.
inventory_ip_type =
[cache]
# directory in which cache should be created
cache_path = ~/.ansible/tmp
# The number of seconds a cache file is considered valid. After this many
# seconds, a new API call will be made, and the cache file will be updated.
# To disable the cache, set this value to 0
cache_max_age = 300

@ -1,521 +0,0 @@
#!/usr/bin/env python
# Copyright: (c) 2013, Google Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
'''
GCE external inventory script
=================================
Generates inventory that Ansible can understand by making API requests
Google Compute Engine via the libcloud library. Full install/configuration
instructions for the gce* modules can be found in the comments of
ansible/test/gce_tests.py.
When run against a specific host, this script returns the following variables
based on the data obtained from the libcloud Node object:
- gce_uuid
- gce_id
- gce_image
- gce_machine_type
- gce_private_ip
- gce_public_ip
- gce_name
- gce_description
- gce_status
- gce_zone
- gce_tags
- gce_metadata
- gce_network
- gce_subnetwork
When run in --list mode, instances are grouped by the following categories:
- zone:
zone group name examples are us-central1-b, europe-west1-a, etc.
- instance tags:
An entry is created for each tag. For example, if you have two instances
with a common tag called 'foo', they will both be grouped together under
the 'tag_foo' name.
- network name:
the name of the network is appended to 'network_' (e.g. the 'default'
network will result in a group named 'network_default')
- machine type
types follow a pattern like n1-standard-4, g1-small, etc.
- running status:
group name prefixed with 'status_' (e.g. status_running, status_stopped,..)
- image:
when using an ephemeral/scratch disk, this will be set to the image name
used when creating the instance (e.g. debian-7-wheezy-v20130816). when
your instance was created with a root persistent disk it will be set to
'persistent_disk' since there is no current way to determine the image.
Examples:
Execute uname on all instances in the us-central1-a zone
$ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a"
Use the GCE inventory script to print out instance specific information
$ contrib/inventory/gce.py --host my_instance
Author: Eric Johnson <erjohnso@google.com>
Contributors: Matt Hite <mhite@hotmail.com>, Tom Melendez <supertom@google.com>,
John Roach <johnroach1985@gmail.com>
Version: 0.0.4
'''
try:
import pkg_resources
except ImportError:
# Use pkg_resources to find the correct versions of libraries and set
# sys.path appropriately when there are multiversion installs. We don't
# fail here as there is code that better expresses the errors where the
# library is used.
pass
USER_AGENT_PRODUCT = "Ansible-gce_inventory_plugin"
USER_AGENT_VERSION = "v2"
import sys
import os
import argparse
from time import time
from ansible.module_utils.six.moves import configparser
import logging
logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler())
import json
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
_ = Provider.GCE
except Exception:
sys.exit("GCE inventory script requires libcloud >= 0.13")
class CloudInventoryCache(object):
def __init__(self, cache_name='ansible-cloud-cache', cache_path='/tmp',
cache_max_age=300):
cache_dir = os.path.expanduser(cache_path)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
self.cache_path_cache = os.path.join(cache_dir, cache_name)
self.cache_max_age = cache_max_age
def is_valid(self, max_age=None):
''' Determines if the cache files have expired, or if it is still valid '''
if max_age is None:
max_age = self.cache_max_age
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + max_age) > current_time:
return True
return False
def get_all_data_from_cache(self, filename=''):
''' Reads the JSON inventory from the cache file. Returns Python dictionary. '''
data = ''
if not filename:
filename = self.cache_path_cache
with open(filename, 'r') as cache:
data = cache.read()
return json.loads(data)
def write_to_cache(self, data, filename=''):
''' Writes data to file as JSON. Returns True. '''
if not filename:
filename = self.cache_path_cache
json_data = json.dumps(data)
with open(filename, 'w') as cache:
cache.write(json_data)
return True
class GceInventory(object):
def __init__(self):
# Cache object
self.cache = None
# dictionary containing inventory read from disk
self.inventory = {}
# Read settings and parse CLI arguments
self.parse_cli_args()
self.config = self.get_config()
self.drivers = self.get_gce_drivers()
self.ip_type = self.get_inventory_options()
if self.ip_type:
self.ip_type = self.ip_type.lower()
# Cache management
start_inventory_time = time()
cache_used = False
if self.args.refresh_cache or not self.cache.is_valid():
self.do_api_calls_update_cache()
else:
self.load_inventory_from_cache()
cache_used = True
self.inventory['_meta']['stats'] = {'use_cache': True}
self.inventory['_meta']['stats'] = {
'inventory_load_time': time() - start_inventory_time,
'cache_used': cache_used
}
# Just display data for specific host
if self.args.host:
print(self.json_format_dict(
self.inventory['_meta']['hostvars'][self.args.host],
pretty=self.args.pretty))
else:
# Otherwise, assume user wants all instances grouped
zones = self.parse_env_zones()
print(self.json_format_dict(self.inventory,
pretty=self.args.pretty))
sys.exit(0)
def get_config(self):
"""
Reads the settings from the gce.ini file.
Populates a ConfigParser object with defaults and
attempts to read an .ini-style configuration from the filename
specified in GCE_INI_PATH. If the environment variable is
not present, the filename defaults to gce.ini in the current
working directory.
"""
gce_ini_default_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "gce.ini")
gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
# Create a ConfigParser.
# This provides empty defaults to each key, so that environment
# variable configuration (as opposed to INI configuration) is able
# to work.
config = configparser.ConfigParser(defaults={
'gce_service_account_email_address': '',
'gce_service_account_pem_file_path': '',
'gce_project_id': '',
'gce_zone': '',
'libcloud_secrets': '',
'instance_tags': '',
'inventory_ip_type': '',
'cache_path': '~/.ansible/tmp',
'cache_max_age': '300'
})
if 'gce' not in config.sections():
config.add_section('gce')
if 'inventory' not in config.sections():
config.add_section('inventory')
if 'cache' not in config.sections():
config.add_section('cache')
config.read(gce_ini_path)
#########
# Section added for processing ini settings
#########
# Set the instance_states filter based on config file options
self.instance_states = []
if config.has_option('gce', 'instance_states'):
states = config.get('gce', 'instance_states')
# Ignore if instance_states is an empty string.
if states:
self.instance_states = states.split(',')
# Set the instance_tags filter, env var overrides config from file
# and cli param overrides all
if self.args.instance_tags:
self.instance_tags = self.args.instance_tags
else:
self.instance_tags = os.environ.get(
'GCE_INSTANCE_TAGS', config.get('gce', 'instance_tags'))
if self.instance_tags:
self.instance_tags = self.instance_tags.split(',')
# Caching
cache_path = config.get('cache', 'cache_path')
cache_max_age = config.getint('cache', 'cache_max_age')
# TOOD(supertom): support project-specific caches
cache_name = 'ansible-gce.cache'
self.cache = CloudInventoryCache(cache_path=cache_path,
cache_max_age=cache_max_age,
cache_name=cache_name)
return config
def get_inventory_options(self):
"""Determine inventory options. Environment variables always
take precedence over configuration files."""
ip_type = self.config.get('inventory', 'inventory_ip_type')
# If the appropriate environment variables are set, they override
# other configuration
ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type)
return ip_type
def get_gce_drivers(self):
"""Determine the GCE authorization settings and return a list of
libcloud drivers.
"""
# Attempt to get GCE params from a configuration file, if one
# exists.
secrets_path = self.config.get('gce', 'libcloud_secrets')
secrets_found = False
try:
import secrets
args = list(secrets.GCE_PARAMS)
kwargs = secrets.GCE_KEYWORD_PARAMS
secrets_found = True
except Exception:
pass
if not secrets_found and secrets_path:
if not secrets_path.endswith('secrets.py'):
err = "Must specify libcloud secrets file as "
err += "/absolute/path/to/secrets.py"
sys.exit(err)
sys.path.append(os.path.dirname(secrets_path))
try:
import secrets
args = list(getattr(secrets, 'GCE_PARAMS', []))
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
secrets_found = True
except Exception:
pass
if not secrets_found:
args = [
self.config.get('gce', 'gce_service_account_email_address'),
self.config.get('gce', 'gce_service_account_pem_file_path')
]
kwargs = {'project': self.config.get('gce', 'gce_project_id'),
'datacenter': self.config.get('gce', 'gce_zone')}
# If the appropriate environment variables are set, they override
# other configuration; process those into our args and kwargs.
args[0] = os.environ.get('GCE_EMAIL', args[0])
args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
args[1] = os.environ.get('GCE_CREDENTIALS_FILE_PATH', args[1])
kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
kwargs['datacenter'] = os.environ.get('GCE_ZONE', kwargs['datacenter'])
gce_drivers = []
projects = kwargs['project'].split(',')
for project in projects:
kwargs['project'] = project
gce = get_driver(Provider.GCE)(*args, **kwargs)
gce.connection.user_agent_append(
'%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION),
)
gce_drivers.append(gce)
return gce_drivers
def parse_env_zones(self):
'''returns a list of comma separated zones parsed from the GCE_ZONE environment variable.
If provided, this will be used to filter the results of the grouped_instances call'''
import csv
reader = csv.reader([os.environ.get('GCE_ZONE', "")], skipinitialspace=True)
zones = [r for r in reader]
return [z for z in zones[0]]
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file based on GCE')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all information about an instance')
parser.add_argument('--instance-tags', action='store',
help='Only include instances with this tags, separated by comma')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty format (default: False)')
parser.add_argument(
'--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests (default: False - use cache files)')
self.args = parser.parse_args()
def node_to_dict(self, inst):
md = {}
if inst is None:
return {}
if 'items' in inst.extra['metadata']:
for entry in inst.extra['metadata']['items']:
md[entry['key']] = entry['value']
net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
subnet = None
if 'subnetwork' in inst.extra['networkInterfaces'][0]:
subnet = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1]
# default to exernal IP unless user has specified they prefer internal
if self.ip_type == 'internal':
ssh_host = inst.private_ips[0]
else:
ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
return {
'gce_uuid': inst.uuid,
'gce_id': inst.id,
'gce_image': inst.image,
'gce_machine_type': inst.size,
'gce_private_ip': inst.private_ips[0],
'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None,
'gce_name': inst.name,
'gce_description': inst.extra['description'],
'gce_status': inst.extra['status'],
'gce_zone': inst.extra['zone'].name,
'gce_tags': inst.extra['tags'],
'gce_metadata': md,
'gce_network': net,
'gce_subnetwork': subnet,
# Hosts don't have a public name, so we add an IP
'ansible_ssh_host': ssh_host
}
def load_inventory_from_cache(self):
''' Loads inventory from JSON on disk. '''
try:
self.inventory = self.cache.get_all_data_from_cache()
hosts = self.inventory['_meta']['hostvars']
except Exception as e:
print(
"Invalid inventory file %s. Please rebuild with -refresh-cache option."
% (self.cache.cache_path_cache))
raise
def do_api_calls_update_cache(self):
''' Do API calls and save data in cache. '''
zones = self.parse_env_zones()
data = self.group_instances(zones)
self.cache.write_to_cache(data)
self.inventory = data
def list_nodes(self):
all_nodes = []
params, more_results = {'maxResults': 500}, True
while more_results:
for driver in self.drivers:
driver.connection.gce_params = params
all_nodes.extend(driver.list_nodes())
more_results = 'pageToken' in params
return all_nodes
def group_instances(self, zones=None):
'''Group all instances'''
groups = {}
meta = {}
meta["hostvars"] = {}
for node in self.list_nodes():
# This check filters on the desired instance states defined in the
# config file with the instance_states config option.
#
# If the instance_states list is _empty_ then _ALL_ states are returned.
#
# If the instance_states list is _populated_ then check the current
# state against the instance_states list
if self.instance_states and not node.extra['status'] in self.instance_states:
continue
# This check filters on the desired instance tags defined in the
# config file with the instance_tags config option, env var GCE_INSTANCE_TAGS,
# or as the cli param --instance-tags.
#
# If the instance_tags list is _empty_ then _ALL_ instances are returned.
#
# If the instance_tags list is _populated_ then check the current
# instance tags against the instance_tags list. If the instance has
# at least one tag from the instance_tags list, it is returned.
if self.instance_tags and not set(self.instance_tags) & set(node.extra['tags']):
continue
name = node.name
meta["hostvars"][name] = self.node_to_dict(node)
zone = node.extra['zone'].name
# To avoid making multiple requests per zone
# we list all nodes and then filter the results
if zones and zone not in zones:
continue
if zone in groups:
groups[zone].append(name)
else:
groups[zone] = [name]
tags = node.extra['tags']
for t in tags:
if t.startswith('group-'):
tag = t[6:]
else:
tag = 'tag_%s' % t
if tag in groups:
groups[tag].append(name)
else:
groups[tag] = [name]
net = node.extra['networkInterfaces'][0]['network'].split('/')[-1]
net = 'network_%s' % net
if net in groups:
groups[net].append(name)
else:
groups[net] = [name]
machine_type = node.size
if machine_type in groups:
groups[machine_type].append(name)
else:
groups[machine_type] = [name]
image = node.image or 'persistent_disk'
if image in groups:
groups[image].append(name)
else:
groups[image] = [name]
status = node.extra['status']
stat = 'status_%s' % status.lower()
if stat in groups:
groups[stat].append(name)
else:
groups[stat] = [name]
for private_ip in node.private_ips:
groups[private_ip] = [name]
if len(node.public_ips) >= 1:
for public_ip in node.public_ips:
groups[public_ip] = [name]
groups["_meta"] = meta
return groups
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
if __name__ == '__main__':
GceInventory()

@ -1,126 +0,0 @@
#!/usr/bin/env python
#
# (c) 2018, Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import os
import sys
import json
import argparse
from ansible.parsing.dataloader import DataLoader
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_text
from ansible.module_utils.net_tools.nios.api import WapiInventory
from ansible.module_utils.net_tools.nios.api import normalize_extattrs, flatten_extattrs
CONFIG_FILES = [
'/etc/ansible/infoblox.yaml',
'/etc/ansible/infoblox.yml'
]
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--list', action='store_true',
help='List host records from NIOS for use in Ansible')
parser.add_argument('--host',
help='List meta data about single host (not used)')
return parser.parse_args()
def main():
args = parse_args()
for config_file in CONFIG_FILES:
if os.path.exists(config_file):
break
else:
sys.stdout.write('unable to locate config file at /etc/ansible/infoblox.yaml\n')
sys.exit(-1)
try:
loader = DataLoader()
config = loader.load_from_file(config_file)
provider = config.get('provider') or {}
wapi = WapiInventory(provider)
except Exception as exc:
sys.stdout.write(to_text(exc))
sys.exit(-1)
if args.host:
host_filter = {'name': args.host}
else:
host_filter = {}
config_filters = config.get('filters')
if config_filters.get('view') is not None:
host_filter['view'] = config_filters['view']
if config_filters.get('extattrs'):
extattrs = normalize_extattrs(config_filters['extattrs'])
else:
extattrs = {}
hostvars = {}
inventory = {
'_meta': {
'hostvars': hostvars
}
}
return_fields = ['name', 'view', 'extattrs', 'ipv4addrs']
hosts = wapi.get_object('record:host',
host_filter,
extattrs=extattrs,
return_fields=return_fields)
if hosts:
for item in hosts:
view = item['view']
name = item['name']
if view not in inventory:
inventory[view] = {'hosts': []}
inventory[view]['hosts'].append(name)
hostvars[name] = {
'view': view
}
if item.get('extattrs'):
for key, value in iteritems(flatten_extattrs(item['extattrs'])):
if key.startswith('ansible_'):
hostvars[name][key] = value
else:
if 'extattrs' not in hostvars[name]:
hostvars[name]['extattrs'] = {}
hostvars[name]['extattrs'][key] = value
sys.stdout.write(json.dumps(inventory, indent=4))
sys.exit(0)
if __name__ == '__main__':
main()

@ -1,24 +0,0 @@
---
# This file provides the configuration information for the Infoblox dynamic
# inventory script that is used to dynamically pull host information from NIOS.
# This file should be copied to /etc/ansible/infoblox.yaml in order for the
# dynamic script to find it.
# Sets the provider arguments for authenticating to the Infoblox server to
# retrieve inventory hosts. Provider arguments can also be set using
# environment variables. Supported environment variables all start with
# INFOBLOX_{{ name }}. For instance, to set the host provider value, the
# environment variable would be INFOBLOX_HOST.
provider:
host: <SERVER_IP>
username: <USERNAME>
password: <PASSWORD>
# Filters allow the dynamic inventory script to restrict the set of hosts that
# are returned from the Infoblox server.
filters:
# restrict returned hosts by extensible attributes
extattrs: {}
# restrict returned hosts to a specified DNS view
view: null

@ -1,37 +0,0 @@
#!/usr/bin/env python
# (c) 2013, Michael Scherer <misc@zarb.org>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from subprocess import Popen, PIPE
import sys
import json
result = {}
result['all'] = {}
pipe = Popen(['jls', '-q', 'name'], stdout=PIPE, universal_newlines=True)
result['all']['hosts'] = [x[:-1] for x in pipe.stdout.readlines()]
result['all']['vars'] = {}
result['all']['vars']['ansible_connection'] = 'jail'
if len(sys.argv) == 2 and sys.argv[1] == '--list':
print(json.dumps(result))
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
print(json.dumps({'ansible_connection': 'jail'}))
else:
sys.stderr.write("Need an argument, either --list or --host <host>\n")

@ -1,125 +0,0 @@
#!/usr/bin/env python
# (c) 2015, Marc Abramowitz <marca@surveymonkey.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Dynamic inventory script which lets you use nodes discovered by Canonical's
# Landscape (http://www.ubuntu.com/management/landscape-features).
#
# Requires the `landscape_api` Python module
# See:
# - https://landscape.canonical.com/static/doc/api/api-client-package.html
# - https://landscape.canonical.com/static/doc/api/python-api.html
#
# Environment variables
# ---------------------
# - `LANDSCAPE_API_URI`
# - `LANDSCAPE_API_KEY`
# - `LANDSCAPE_API_SECRET`
# - `LANDSCAPE_API_SSL_CA_FILE` (optional)
import argparse
import collections
import os
import sys
from landscape_api.base import API, HTTPError
import json
_key = 'landscape'
class EnvironmentConfig(object):
uri = os.getenv('LANDSCAPE_API_URI')
access_key = os.getenv('LANDSCAPE_API_KEY')
secret_key = os.getenv('LANDSCAPE_API_SECRET')
ssl_ca_file = os.getenv('LANDSCAPE_API_SSL_CA_FILE')
def _landscape_client():
env = EnvironmentConfig()
return API(
uri=env.uri,
access_key=env.access_key,
secret_key=env.secret_key,
ssl_ca_file=env.ssl_ca_file)
def get_landscape_members_data():
return _landscape_client().get_computers()
def get_nodes(data):
return [node['hostname'] for node in data]
def get_groups(data):
groups = collections.defaultdict(list)
for node in data:
for value in node['tags']:
groups[value].append(node['hostname'])
return groups
def get_meta(data):
meta = {'hostvars': {}}
for node in data:
meta['hostvars'][node['hostname']] = {'tags': node['tags']}
return meta
def print_list():
data = get_landscape_members_data()
nodes = get_nodes(data)
groups = get_groups(data)
meta = get_meta(data)
inventory_data = {_key: nodes, '_meta': meta}
inventory_data.update(groups)
print(json.dumps(inventory_data))
def print_host(host):
data = get_landscape_members_data()
meta = get_meta(data)
print(json.dumps(meta['hostvars'][host]))
def get_args(args_list):
parser = argparse.ArgumentParser(
description='ansible inventory script reading from landscape cluster')
mutex_group = parser.add_mutually_exclusive_group(required=True)
help_list = 'list all hosts from landscape cluster'
mutex_group.add_argument('--list', action='store_true', help=help_list)
help_host = 'display variables for a host'
mutex_group.add_argument('--host', help=help_host)
return parser.parse_args(args_list)
def main(args_list):
args = get_args(args_list)
if args.list:
print_list()
if args.host:
print_host(args.host)
if __name__ == '__main__':
main(sys.argv[1:])

@ -1,15 +0,0 @@
# Ansible Apache Libcloud Generic inventory script
[driver]
provider = CLOUDSTACK
host =
path =
secure = True
verify_ssl_cert = True
key =
secret =
[cache]
cache_path=/path/to/your/cache
cache_max_age=60

@ -1,37 +0,0 @@
#!/usr/bin/env python
# (c) 2013, Michael Scherer <misc@zarb.org>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from subprocess import Popen, PIPE
import sys
import json
result = {}
result['all'] = {}
pipe = Popen(['virsh', '-q', '-c', 'lxc:///', 'list', '--name', '--all'], stdout=PIPE, universal_newlines=True)
result['all']['hosts'] = [x[:-1] for x in pipe.stdout.readlines()]
result['all']['vars'] = {}
result['all']['vars']['ansible_connection'] = 'libvirt_lxc'
if len(sys.argv) == 2 and sys.argv[1] == '--list':
print(json.dumps(result))
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
print(json.dumps({'ansible_connection': 'libvirt_lxc'}))
else:
sys.stderr.write("Need an argument, either --list or --host <host>\n")

@ -1,18 +0,0 @@
# Ansible Linode external inventory script settings
#
[linode]
# API calls to Linode are slow. For this reason, we cache the results of an API
# call. Set this to the path you want cache files to be written to. Two files
# will be written to this directory:
# - ansible-Linode.cache
# - ansible-Linode.index
cache_path = /tmp
# The number of seconds a cache file is considered valid. After this many
# seconds, a new API call will be made, and the cache file will be updated.
cache_max_age = 300
# If set to true use the hosts public ip in the dictionary instead of the label
use_public_ip = false

@ -1,348 +0,0 @@
#!/usr/bin/env python
'''
Linode external inventory script
=================================
Generates inventory that Ansible can understand by making API request to
Linode using the Chube library.
NOTE: This script assumes Ansible is being executed where Chube is already
installed and has a valid config at ~/.chube. If not, run:
pip install chube
echo -e "---\napi_key: <YOUR API KEY GOES HERE>" > ~/.chube
For more details, see: https://github.com/exosite/chube
NOTE: By default, this script also assumes that the Linodes in your account all have
labels that correspond to hostnames that are in your resolver search path.
Your resolver search path resides in /etc/hosts.
Optionally, if you would like to use the hosts public IP instead of it's label use
the following setting in linode.ini:
use_public_ip = true
When run against a specific host, this script returns the following variables:
- api_id
- datacenter_id
- datacenter_city (lowercase city name of data center, e.g. 'tokyo')
- label
- display_group
- create_dt
- total_hd
- total_xfer
- total_ram
- status
- public_ip (The first public IP found)
- private_ip (The first private IP found, or empty string if none)
- alert_cpu_enabled
- alert_cpu_threshold
- alert_diskio_enabled
- alert_diskio_threshold
- alert_bwin_enabled
- alert_bwin_threshold
- alert_bwout_enabled
- alert_bwout_threshold
- alert_bwquota_enabled
- alert_bwquota_threshold
- backup_weekly_daily
- backup_window
- watchdog
Peter Sankauskas did most of the legwork here with his linode plugin; I
just adapted that for Linode.
'''
# (c) 2013, Dan Slimmon
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
# Standard imports
import os
import re
import sys
import argparse
from time import time
import json
try:
from chube import load_chube_config
from chube import api as chube_api
from chube.datacenter import Datacenter
from chube.linode_obj import Linode
except Exception:
try:
# remove local paths and other stuff that may
# cause an import conflict, as chube is sensitive
# to name collisions on importing
old_path = sys.path
sys.path = [d for d in sys.path if d not in ('', os.getcwd(), os.path.dirname(os.path.realpath(__file__)))]
from chube import load_chube_config
from chube import api as chube_api
from chube.datacenter import Datacenter
from chube.linode_obj import Linode
sys.path = old_path
except Exception as e:
raise Exception("could not import chube")
load_chube_config()
# Imports for ansible
from ansible.module_utils.six.moves import configparser as ConfigParser
class LinodeInventory(object):
def _empty_inventory(self):
return {"_meta": {"hostvars": {}}}
def __init__(self):
"""Main execution path."""
# Inventory grouped by display group
self.inventory = self._empty_inventory()
# Index of label to Linode ID
self.index = {}
# Local cache of Datacenter objects populated by populate_datacenter_cache()
self._datacenter_cache = None
# Read settings and parse CLI arguments
self.read_settings()
self.parse_cli_args()
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of nodes for inventory
if len(self.inventory) == 1:
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print(data_to_print)
def is_cache_valid(self):
"""Determines if the cache file has expired, or if it is still valid."""
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def read_settings(self):
"""Reads the settings from the .ini file."""
config = ConfigParser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/linode.ini')
# Cache related
cache_path = config.get('linode', 'cache_path')
self.cache_path_cache = cache_path + "/ansible-linode.cache"
self.cache_path_index = cache_path + "/ansible-linode.index"
self.cache_max_age = config.getint('linode', 'cache_max_age')
self.use_public_ip = config.getboolean('linode', 'use_public_ip')
def parse_cli_args(self):
"""Command line argument processing"""
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Linode')
parser.add_argument('--list', action='store_true', default=True,
help='List nodes (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific node')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to Linode (default: False - use cache files)')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
"""Do API calls, and save data in cache files."""
self.get_nodes()
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def get_nodes(self):
"""Makes an Linode API call to get the list of nodes."""
try:
for node in Linode.search(status=Linode.STATUS_RUNNING):
self.add_node(node)
except chube_api.linode_api.ApiError as e:
sys.exit("Looks like Linode's API is down:\n %s" % e)
def get_node(self, linode_id):
"""Gets details about a specific node."""
try:
return Linode.find(api_id=linode_id)
except chube_api.linode_api.ApiError as e:
sys.exit("Looks like Linode's API is down:\n%s" % e)
def populate_datacenter_cache(self):
"""Creates self._datacenter_cache, containing all Datacenters indexed by ID."""
self._datacenter_cache = {}
dcs = Datacenter.search()
for dc in dcs:
self._datacenter_cache[dc.api_id] = dc
def get_datacenter_city(self, node):
"""Returns a the lowercase city name of the node's data center."""
if self._datacenter_cache is None:
self.populate_datacenter_cache()
location = self._datacenter_cache[node.datacenter_id].location
location = location.lower()
location = location.split(",")[0]
return location
def add_node(self, node):
"""Adds an node to the inventory and index."""
if self.use_public_ip:
dest = self.get_node_public_ip(node)
else:
dest = node.label
# Add to index
self.index[dest] = node.api_id
# Inventory: Group by node ID (always a group of 1)
self.inventory[node.api_id] = [dest]
# Inventory: Group by datacenter city
self.push(self.inventory, self.get_datacenter_city(node), dest)
# Inventory: Group by display group
self.push(self.inventory, node.display_group, dest)
# Inventory: Add a "linode" global tag group
self.push(self.inventory, "linode", dest)
# Add host info to hostvars
self.inventory["_meta"]["hostvars"][dest] = self._get_host_info(node)
def get_node_public_ip(self, node):
"""Returns a the public IP address of the node"""
return [addr.address for addr in node.ipaddresses if addr.is_public][0]
def get_host_info(self):
"""Get variables about a specific host."""
if len(self.index) == 0:
# Need to load index from cache
self.load_index_from_cache()
if self.args.host not in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if self.args.host not in self.index:
# host might not exist anymore
return self.json_format_dict({}, True)
node_id = self.index[self.args.host]
node = self.get_node(node_id)
return self.json_format_dict(self._get_host_info(node), True)
def _get_host_info(self, node):
node_vars = {}
for direct_attr in [
"api_id",
"datacenter_id",
"label",
"display_group",
"create_dt",
"total_hd",
"total_xfer",
"total_ram",
"status",
"alert_cpu_enabled",
"alert_cpu_threshold",
"alert_diskio_enabled",
"alert_diskio_threshold",
"alert_bwin_enabled",
"alert_bwin_threshold",
"alert_bwout_enabled",
"alert_bwout_threshold",
"alert_bwquota_enabled",
"alert_bwquota_threshold",
"backup_weekly_daily",
"backup_window",
"watchdog"
]:
node_vars[direct_attr] = getattr(node, direct_attr)
node_vars["datacenter_city"] = self.get_datacenter_city(node)
node_vars["public_ip"] = self.get_node_public_ip(node)
# Set the SSH host information, so these inventory items can be used if
# their labels aren't FQDNs
node_vars['ansible_ssh_host'] = node_vars["public_ip"]
node_vars['ansible_host'] = node_vars["public_ip"]
private_ips = [addr.address for addr in node.ipaddresses if not addr.is_public]
if private_ips:
node_vars["private_ip"] = private_ips[0]
return node_vars
def push(self, my_dict, key, element):
"""Pushed an element onto an array that may not have been defined in the dict."""
if key in my_dict:
my_dict[key].append(element)
else:
my_dict[key] = [element]
def get_inventory_from_cache(self):
"""Reads the inventory from the cache file and returns it as a JSON object."""
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory
def load_index_from_cache(self):
"""Reads the index from the cache file and sets self.index."""
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index)
def write_to_cache(self, data, filename):
"""Writes data in JSON format to a file."""
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def to_safe(self, word):
"""Escapes any characters that would be invalid in an ansible group name."""
return re.sub(r"[^A-Za-z0-9\-]", "_", word)
def json_format_dict(self, data, pretty=False):
"""Converts a dict to a JSON object and dumps it as a formatted string."""
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
LinodeInventory()

@ -1,70 +0,0 @@
#!/usr/bin/env python
#
# (c) 2015-16 Florian Haas, hastexo Professional Services GmbH
# <florian@hastexo.com>
# Based in part on:
# libvirt_lxc.py, (c) 2013, Michael Scherer <misc@zarb.org>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
Ansible inventory script for LXC containers. Requires Python
bindings for LXC API.
In LXC, containers can be grouped by setting the lxc.group option,
which may be found more than once in a container's
configuration. So, we enumerate all containers, fetch their list
of groups, and then build the dictionary in the way Ansible expects
it.
"""
from __future__ import print_function
import sys
import lxc
import json
def build_dict():
"""Returns a dictionary keyed to the defined LXC groups. All
containers, including the ones not in any group, are included in the
"all" group."""
# Enumerate all containers, and list the groups they are in. Also,
# implicitly add every container to the 'all' group.
containers = dict([(c,
['all'] +
(lxc.Container(c).get_config_item('lxc.group') or []))
for c in lxc.list_containers()])
# Extract the groups, flatten the list, and remove duplicates
groups = set(sum([g for g in containers.values()], []))
# Create a dictionary for each group (including the 'all' group
return dict([(g, {'hosts': [k for k, v in containers.items() if g in v],
'vars': {'ansible_connection': 'lxc'}}) for g in groups])
def main(argv):
"""Returns a JSON dictionary as expected by Ansible"""
result = build_dict()
if len(argv) == 2 and argv[1] == '--list':
json.dump(result, sys.stdout)
elif len(argv) == 3 and argv[1] == '--host':
json.dump({'ansible_connection': 'lxc'}, sys.stdout)
else:
print("Need an argument, either --list or --host <host>", file=sys.stderr)
if __name__ == '__main__':
main(sys.argv)

@ -1,13 +0,0 @@
# LXD external inventory script settings
[lxd]
# The default resource
#resource = local:
# The group name to add the hosts to
#group = lxd
# The connection type to return for these hosts - lxd hasn't been tested yet
#connection = lxd
connection = smart

@ -1,101 +0,0 @@
#!/usr/bin/env python
# (c) 2013, Michael Scherer <misc@zarb.org>
# (c) 2014, Hiroaki Nakamura <hnakamur@gmail.com>
# (c) 2016, Andew Clarke <andrew@oscailte.org>
#
# This file is based on https://github.com/ansible/ansible/blob/devel/plugins/inventory/libvirt_lxc.py which is part of Ansible,
# and https://github.com/hnakamur/lxc-ansible-playbooks/blob/master/provisioning/inventory-lxc.py
#
# NOTE, this file has some obvious limitations, improvements welcome
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
from subprocess import Popen, PIPE
import distutils.spawn
import sys
import json
from ansible.module_utils.six.moves import configparser
# Set up defaults
resource = 'local:'
group = 'lxd'
connection = 'lxd'
hosts = {}
result = {}
# Read the settings from the lxd.ini file
config = configparser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/lxd.ini')
if config.has_option('lxd', 'resource'):
resource = config.get('lxd', 'resource')
if config.has_option('lxd', 'group'):
group = config.get('lxd', 'group')
if config.has_option('lxd', 'connection'):
connection = config.get('lxd', 'connection')
# Ensure executable exists
if distutils.spawn.find_executable('lxc'):
# Set up containers result and hosts array
result[group] = {}
result[group]['hosts'] = []
# Run the command and load json result
pipe = Popen(['lxc', 'list', resource, '--format', 'json'], stdout=PIPE, universal_newlines=True)
lxdjson = json.load(pipe.stdout)
# Iterate the json lxd output
for item in lxdjson:
# Check state and network
if 'state' in item and item['state'] is not None and 'network' in item['state']:
network = item['state']['network']
# Check for eth0 and addresses
if 'eth0' in network and 'addresses' in network['eth0']:
addresses = network['eth0']['addresses']
# Iterate addresses
for address in addresses:
# Only return inet family addresses
if 'family' in address and address['family'] == 'inet':
if 'address' in address:
ip = address['address']
name = item['name']
# Add the host to the results and the host array
result[group]['hosts'].append(name)
hosts[name] = ip
# Set the other containers result values
result[group]['vars'] = {}
result[group]['vars']['ansible_connection'] = connection
# Process arguments
if len(sys.argv) == 2 and sys.argv[1] == '--list':
print(json.dumps(result))
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
if sys.argv[2] == 'localhost':
print(json.dumps({'ansible_connection': 'local'}))
else:
if connection == 'lxd':
print(json.dumps({'ansible_connection': connection}))
else:
print(json.dumps({'ansible_connection': connection, 'ansible_host': hosts[sys.argv[2]]}))
else:
print("Need an argument, either --list or --host <host>")

@ -1,17 +0,0 @@
[mdt]
# Set the MDT server to connect to
server = localhost.example.com
# Set the MDT Instance
instance = EXAMPLEINSTANCE
# Set the MDT database
database = MDTDB
# Configure login credentials
user = local.domain\admin
password = adminpassword
[tower]
groupname = mdt

@ -1,132 +0,0 @@
#!/usr/bin/env python
# (c) 2016, Julian Barnett <jbarnett@tableau.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
'''
MDT external inventory script
=================================
author: J Barnett 06/23/2016 01:15
maintainer: J Barnett (github @jbarnett1981)
'''
import argparse
import json
import pymssql
from ansible.module_utils.six.moves import configparser
class MDTInventory(object):
def __init__(self):
''' Main execution path '''
self.conn = None
# Initialize empty inventory
self.inventory = self._empty_inventory()
# Read CLI arguments
self.read_settings()
self.parse_cli_args()
# Get Hosts
if self.args.list:
self.get_hosts()
# Get specific host vars
if self.args.host:
self.get_hosts(self.args.host)
def _connect(self, query):
'''
Connect to MDT and dump contents of dbo.ComputerIdentity database
'''
if not self.conn:
self.conn = pymssql.connect(server=self.mdt_server + "\\" + self.mdt_instance, user=self.mdt_user, password=self.mdt_password,
database=self.mdt_database)
cursor = self.conn.cursor()
cursor.execute(query)
self.mdt_dump = cursor.fetchall()
self.conn.close()
def get_hosts(self, hostname=False):
'''
Gets host from MDT Database
'''
if hostname:
query = ("SELECT t1.ID, t1.Description, t1.MacAddress, t2.Role "
"FROM ComputerIdentity as t1 join Settings_Roles as t2 on t1.ID = t2.ID where t1.Description = '%s'" % hostname)
else:
query = 'SELECT t1.ID, t1.Description, t1.MacAddress, t2.Role FROM ComputerIdentity as t1 join Settings_Roles as t2 on t1.ID = t2.ID'
self._connect(query)
# Configure to group name configured in Ansible Tower for this inventory
groupname = self.mdt_groupname
# Initialize empty host list
hostlist = []
# Parse through db dump and populate inventory
for hosts in self.mdt_dump:
self.inventory['_meta']['hostvars'][hosts[1]] = {'id': hosts[0], 'name': hosts[1], 'mac': hosts[2], 'role': hosts[3]}
hostlist.append(hosts[1])
self.inventory[groupname] = hostlist
# Print it all out
print(json.dumps(self.inventory, indent=2))
def _empty_inventory(self):
'''
Create empty inventory dictionary
'''
return {"_meta": {"hostvars": {}}}
def read_settings(self):
'''
Reads the settings from the mdt.ini file
'''
config = configparser.SafeConfigParser()
config.read('mdt.ini')
# MDT Server and instance and database
self.mdt_server = config.get('mdt', 'server')
self.mdt_instance = config.get('mdt', 'instance')
self.mdt_database = config.get('mdt', 'database')
# MDT Login credentials
if config.has_option('mdt', 'user'):
self.mdt_user = config.get('mdt', 'user')
if config.has_option('mdt', 'password'):
self.mdt_password = config.get('mdt', 'password')
# Group name in Tower
if config.has_option('tower', 'groupname'):
self.mdt_groupname = config.get('tower', 'groupname')
def parse_cli_args(self):
'''
Command line argument processing
'''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on MDT')
parser.add_argument('--list', action='store_true', default=False, help='List instances')
parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
self.args = parser.parse_args()
if __name__ == "__main__":
# Run the script
MDTInventory()

@ -1,41 +0,0 @@
# Ansible Nagios external inventory script settings
#
# To get all available possibilities, check following URL:
# http://www.naemon.org/documentation/usersguide/livestatus.html
# https://mathias-kettner.de/checkmk_livestatus.html
#
[local]
# Livestatus URI
# Example for default naemon livestatus unix socket :
# livestatus_uri=unix:/var/cache/naemon/live
[remote]
# default field name for host: name
# Uncomment to override:
# host_field=address
#
# default field group for host: groups
# Uncomment to override:
# group_field=state
# default fields retrieved: address, alias, display_name, children, parents
# To override, uncomment the following line
# fields_to_retrieve=address,alias,display_name
#
# default variable prefix: livestatus_
# To override, uncomment the following line
# var_prefix=naemon_
#
# default filter: None
#
# Uncomment to override
#
# All host with state = OK
# host_filter=state = 0
# Warning: for the moment, you can use only one filter at a time. You cannot combine various conditions.
#
# All host in groups Linux
# host_filter=groups >= Linux
#
livestatus_uri=tcp:192.168.66.137:6557

@ -1,173 +0,0 @@
#!/usr/bin/env python
# (c) 2015, Yannig Perre <yannig.perre@gmail.com>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
'''
Nagios livestatus inventory script. Before using this script, please
update nagios_livestatus.ini file.
Livestatus is a nagios/naemon/shinken module which let you retrieve
informations stored in the monitoring core.
This plugin inventory need livestatus API for python. Please install it
before using this script (apt/pip/yum/...).
Checkmk livestatus: https://mathias-kettner.de/checkmk_livestatus.html
Livestatus API: http://www.naemon.org/documentation/usersguide/livestatus.html
'''
import os
import re
import argparse
import sys
from ansible.module_utils.six.moves import configparser
import json
try:
from mk_livestatus import Socket
except ImportError:
sys.exit("Error: mk_livestatus is needed. Try something like: pip install python-mk-livestatus")
class NagiosLivestatusInventory(object):
def parse_ini_file(self):
config = configparser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/nagios_livestatus.ini')
for section in config.sections():
if not config.has_option(section, 'livestatus_uri'):
continue
# If fields_to_retrieve is not set, using default fields
fields_to_retrieve = self.default_fields_to_retrieve
if config.has_option(section, 'fields_to_retrieve'):
fields_to_retrieve = [field.strip() for field in config.get(section, 'fields_to_retrieve').split(',')]
fields_to_retrieve = tuple(fields_to_retrieve)
# default section values
section_values = {
'var_prefix': 'livestatus_',
'host_filter': None,
'host_field': 'name',
'group_field': 'groups'
}
for key, value in section_values.items():
if config.has_option(section, key):
section_values[key] = config.get(section, key).strip()
# Retrieving livestatus string connection
livestatus_uri = config.get(section, 'livestatus_uri')
backend_definition = None
# Local unix socket
unix_match = re.match('unix:(.*)', livestatus_uri)
if unix_match is not None:
backend_definition = {'connection': unix_match.group(1)}
# Remote tcp connection
tcp_match = re.match('tcp:(.*):([^:]*)', livestatus_uri)
if tcp_match is not None:
backend_definition = {'connection': (tcp_match.group(1), int(tcp_match.group(2)))}
# No valid livestatus_uri => exiting
if backend_definition is None:
raise Exception('livestatus_uri field is invalid (%s). Expected: unix:/path/to/live or tcp:host:port' % livestatus_uri)
# Updating backend_definition with current value
backend_definition['name'] = section
backend_definition['fields'] = fields_to_retrieve
for key, value in section_values.items():
backend_definition[key] = value
self.backends.append(backend_definition)
def parse_options(self):
parser = argparse.ArgumentParser()
parser.add_argument('--host', nargs=1)
parser.add_argument('--list', action='store_true')
parser.add_argument('--pretty', action='store_true')
self.options = parser.parse_args()
def add_host(self, hostname, group):
if group not in self.result:
self.result[group] = {}
self.result[group]['hosts'] = []
if hostname not in self.result[group]['hosts']:
self.result[group]['hosts'].append(hostname)
def query_backend(self, backend, host=None):
'''Query a livestatus backend'''
hosts_request = Socket(backend['connection']).hosts.columns(backend['host_field'], backend['group_field'])
if backend['host_filter'] is not None:
hosts_request = hosts_request.filter(backend['host_filter'])
if host is not None:
hosts_request = hosts_request.filter('name = ' + host[0])
hosts_request._columns += backend['fields']
hosts = hosts_request.call()
for host in hosts:
hostname = host[backend['host_field']]
hostgroups = host[backend['group_field']]
if not isinstance(hostgroups, list):
hostgroups = [hostgroups]
self.add_host(hostname, 'all')
self.add_host(hostname, backend['name'])
for group in hostgroups:
self.add_host(hostname, group)
for field in backend['fields']:
var_name = backend['var_prefix'] + field
if hostname not in self.result['_meta']['hostvars']:
self.result['_meta']['hostvars'][hostname] = {}
self.result['_meta']['hostvars'][hostname][var_name] = host[field]
def __init__(self):
self.defaultgroup = 'group_all'
self.default_fields_to_retrieve = ('address', 'alias', 'display_name', 'childs', 'parents')
self.backends = []
self.options = None
self.parse_ini_file()
self.parse_options()
self.result = {}
self.result['_meta'] = {}
self.result['_meta']['hostvars'] = {}
self.json_indent = None
if self.options.pretty:
self.json_indent = 2
if len(self.backends) == 0:
sys.exit("Error: Livestatus configuration is missing. See nagios_livestatus.ini.")
for backend in self.backends:
self.query_backend(backend, self.options.host)
if self.options.host:
print(json.dumps(self.result['_meta']['hostvars'][self.options.host[0]], indent=self.json_indent))
elif self.options.list:
print(json.dumps(self.result, indent=self.json_indent))
else:
sys.exit("usage: --list or --host HOSTNAME [--pretty]")
NagiosLivestatusInventory()

@ -1,10 +0,0 @@
# Ansible Nagios external inventory script settings
#
[ndo]
# NDO database URI
# Make sure that data is returned as strings and not bytes if using python 3.
# See http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html
# for supported databases and URI format.
# Example for mysqlclient module :
database_uri=mysql+mysqldb://user:passwd@hostname/ndo?charset=utf8&use_unicode=1

@ -1,105 +0,0 @@
#!/usr/bin/env python
# (c) 2014, Jonathan Lestrelin <jonathan.lestrelin@gmail.com>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
Nagios NDO external inventory script.
========================================
Returns hosts and hostgroups from Nagios NDO.
Configuration is read from `nagios_ndo.ini`.
"""
import os
import argparse
import sys
from ansible.module_utils.six.moves import configparser
import json
try:
from sqlalchemy import text
from sqlalchemy.engine import create_engine
except ImportError:
sys.exit("Error: SQLAlchemy is needed. Try something like: pip install sqlalchemy")
class NagiosNDOInventory(object):
def read_settings(self):
config = configparser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/nagios_ndo.ini')
if config.has_option('ndo', 'database_uri'):
self.ndo_database_uri = config.get('ndo', 'database_uri')
def read_cli(self):
parser = argparse.ArgumentParser()
parser.add_argument('--host', nargs=1)
parser.add_argument('--list', action='store_true')
self.options = parser.parse_args()
def get_hosts(self):
engine = create_engine(self.ndo_database_uri)
connection = engine.connect()
select_hosts = text("SELECT display_name \
FROM nagios_hosts")
select_hostgroups = text("SELECT alias \
FROM nagios_hostgroups")
select_hostgroup_hosts = text("SELECT h.display_name \
FROM nagios_hostgroup_members hgm, nagios_hosts h, nagios_hostgroups hg \
WHERE hgm.hostgroup_id = hg.hostgroup_id \
AND hgm.host_object_id = h.host_object_id \
AND hg.alias =:hostgroup_alias")
hosts = connection.execute(select_hosts)
self.result['all']['hosts'] = [host['display_name'] for host in hosts]
for hostgroup in connection.execute(select_hostgroups):
hostgroup_alias = hostgroup['alias']
self.result[hostgroup_alias] = {}
hosts = connection.execute(select_hostgroup_hosts, hostgroup_alias=hostgroup_alias)
self.result[hostgroup_alias]['hosts'] = [host['display_name'] for host in hosts]
def __init__(self):
self.defaultgroup = 'group_all'
self.ndo_database_uri = None
self.options = None
self.read_settings()
self.read_cli()
self.result = {}
self.result['all'] = {}
self.result['all']['hosts'] = []
self.result['_meta'] = {}
self.result['_meta']['hostvars'] = {}
if self.ndo_database_uri:
self.get_hosts()
if self.options.host:
print(json.dumps({}))
elif self.options.list:
print(json.dumps(self.result))
else:
sys.exit("usage: --list or --host HOSTNAME")
else:
sys.exit("Error: Database configuration is missing. See nagios_ndo.ini.")
NagiosNDOInventory()

@ -1,344 +0,0 @@
#!/usr/bin/env python
'''
nsot
====
Ansible Dynamic Inventory to pull hosts from NSoT, a flexible CMDB by Dropbox
Features
--------
* Define host groups in form of NSoT device attribute criteria
* All parameters defined by the spec as of 2015-09-05 are supported.
+ ``--list``: Returns JSON hash of host groups -> hosts and top-level
``_meta`` -> ``hostvars`` which correspond to all device attributes.
Group vars can be specified in the YAML configuration, noted below.
+ ``--host <hostname>``: Returns JSON hash where every item is a device
attribute.
* In addition to all attributes assigned to resource being returned, script
will also append ``site_id`` and ``id`` as facts to utilize.
Configuration
------------
Since it'd be annoying and failure prone to guess where you're configuration
file is, use ``NSOT_INVENTORY_CONFIG`` to specify the path to it.
This file should adhere to the YAML spec. All top-level variable must be
desired Ansible group-name hashed with single 'query' item to define the NSoT
attribute query.
Queries follow the normal NSoT query syntax, `shown here`_
.. _shown here: https://github.com/dropbox/pynsot#set-queries
.. code:: yaml
routers:
query: 'deviceType=ROUTER'
vars:
a: b
c: d
juniper_fw:
query: 'deviceType=FIREWALL manufacturer=JUNIPER'
not_f10:
query: '-manufacturer=FORCE10'
The inventory will automatically use your ``.pynsotrc`` like normal pynsot from
cli would, so make sure that's configured appropriately.
.. note::
Attributes I'm showing above are influenced from ones that the Trigger
project likes. As is the spirit of NSoT, use whichever attributes work best
for your workflow.
If config file is blank or absent, the following default groups will be
created:
* ``routers``: deviceType=ROUTER
* ``switches``: deviceType=SWITCH
* ``firewalls``: deviceType=FIREWALL
These are likely not useful for everyone so please use the configuration. :)
.. note::
By default, resources will only be returned for what your default
site is set for in your ``~/.pynsotrc``.
If you want to specify, add an extra key under the group for ``site: n``.
Output Examples
---------------
Here are some examples shown from just calling the command directly::
$ NSOT_INVENTORY_CONFIG=$PWD/test.yaml ansible_nsot --list | jq '.'
{
"routers": {
"hosts": [
"test1.example.com"
],
"vars": {
"cool_level": "very",
"group": "routers"
}
},
"firewalls": {
"hosts": [
"test2.example.com"
],
"vars": {
"cool_level": "enough",
"group": "firewalls"
}
},
"_meta": {
"hostvars": {
"test2.example.com": {
"make": "SRX",
"site_id": 1,
"id": 108
},
"test1.example.com": {
"make": "MX80",
"site_id": 1,
"id": 107
}
}
},
"rtr_and_fw": {
"hosts": [
"test1.example.com",
"test2.example.com"
],
"vars": {}
}
}
$ NSOT_INVENTORY_CONFIG=$PWD/test.yaml ansible_nsot --host test1 | jq '.'
{
"make": "MX80",
"site_id": 1,
"id": 107
}
'''
from __future__ import print_function
import sys
import os
import pkg_resources
import argparse
import json
import yaml
from textwrap import dedent
from pynsot.client import get_api_client
from pynsot.app import HttpServerError
from click.exceptions import UsageError
from ansible.module_utils.six import string_types
def warning(*objs):
print("WARNING: ", *objs, file=sys.stderr)
class NSoTInventory(object):
'''NSoT Client object for gather inventory'''
def __init__(self):
self.config = dict()
config_env = os.environ.get('NSOT_INVENTORY_CONFIG')
if config_env:
try:
config_file = os.path.abspath(config_env)
except IOError: # If file non-existent, use default config
self._config_default()
except Exception as e:
sys.exit('%s\n' % e)
with open(config_file) as f:
try:
self.config.update(yaml.safe_load(f))
except TypeError: # If empty file, use default config
warning('Empty config file')
self._config_default()
except Exception as e:
sys.exit('%s\n' % e)
else: # Use defaults if env var missing
self._config_default()
self.groups = self.config.keys()
self.client = get_api_client()
self._meta = {'hostvars': dict()}
def _config_default(self):
default_yaml = '''
---
routers:
query: deviceType=ROUTER
switches:
query: deviceType=SWITCH
firewalls:
query: deviceType=FIREWALL
'''
self.config = yaml.safe_load(dedent(default_yaml))
def do_list(self):
'''Direct callback for when ``--list`` is provided
Relies on the configuration generated from init to run
_inventory_group()
'''
inventory = dict()
for group, contents in self.config.items():
group_response = self._inventory_group(group, contents)
inventory.update(group_response)
inventory.update({'_meta': self._meta})
return json.dumps(inventory)
def do_host(self, host):
return json.dumps(self._hostvars(host))
def _hostvars(self, host):
'''Return dictionary of all device attributes
Depending on number of devices in NSoT, could be rather slow since this
has to request every device resource to filter through
'''
device = [i for i in self.client.devices.get()
if host in i['hostname']][0]
attributes = device['attributes']
attributes.update({'site_id': device['site_id'], 'id': device['id']})
return attributes
def _inventory_group(self, group, contents):
'''Takes a group and returns inventory for it as dict
:param group: Group name
:type group: str
:param contents: The contents of the group's YAML config
:type contents: dict
contents param should look like::
{
'query': 'xx',
'vars':
'a': 'b'
}
Will return something like::
{ group: {
hosts: [],
vars: {},
}
'''
query = contents.get('query')
hostvars = contents.get('vars', dict())
site = contents.get('site', dict())
obj = {group: dict()}
obj[group]['hosts'] = []
obj[group]['vars'] = hostvars
try:
assert isinstance(query, string_types)
except Exception:
sys.exit('ERR: Group queries must be a single string\n'
' Group: %s\n'
' Query: %s\n' % (group, query)
)
try:
if site:
site = self.client.sites(site)
devices = site.devices.query.get(query=query)
else:
devices = self.client.devices.query.get(query=query)
except HttpServerError as e:
if '500' in str(e.response):
_site = 'Correct site id?'
_attr = 'Queried attributes actually exist?'
questions = _site + '\n' + _attr
sys.exit('ERR: 500 from server.\n%s' % questions)
else:
raise
except UsageError:
sys.exit('ERR: Could not connect to server. Running?')
# Would do a list comprehension here, but would like to save code/time
# and also acquire attributes in this step
for host in devices:
# Iterate through each device that matches query, assign hostname
# to the group's hosts array and then use this single iteration as
# a chance to update self._meta which will be used in the final
# return
hostname = host['hostname']
obj[group]['hosts'].append(hostname)
attributes = host['attributes']
attributes.update({'site_id': host['site_id'], 'id': host['id']})
self._meta['hostvars'].update({hostname: attributes})
return obj
def parse_args():
desc = __doc__.splitlines()[4] # Just to avoid being redundant
# Establish parser with options and error out if no action provided
parser = argparse.ArgumentParser(
description=desc,
conflict_handler='resolve',
)
# Arguments
#
# Currently accepting (--list | -l) and (--host | -h)
# These must not be allowed together
parser.add_argument(
'--list', '-l',
help='Print JSON object containing hosts to STDOUT',
action='store_true',
dest='list_', # Avoiding syntax highlighting for list
)
parser.add_argument(
'--host', '-h',
help='Print JSON object containing hostvars for <host>',
action='store',
)
args = parser.parse_args()
if not args.list_ and not args.host: # Require at least one option
parser.exit(status=1, message='No action requested')
if args.list_ and args.host: # Do not allow multiple options
parser.exit(status=1, message='Too many actions requested')
return args
def main():
'''Set up argument handling and callback routing'''
args = parse_args()
client = NSoTInventory()
# Callback condition
if args.list_:
print(client.do_list())
elif args.host:
print(client.do_host(args.host))
if __name__ == '__main__':
main()

@ -1,22 +0,0 @@
---
juniper_routers:
query: 'deviceType=ROUTER manufacturer=JUNIPER'
vars:
group: juniper_routers
netconf: true
os: junos
cisco_asa:
query: 'manufacturer=CISCO deviceType=FIREWALL'
vars:
group: cisco_asa
routed_vpn: false
stateful: true
old_cisco_asa:
query: 'manufacturer=CISCO deviceType=FIREWALL -softwareVersion=8.3+'
vars:
old_nat: true
not_f10:
query: '-manufacturer=FORCE10'

@ -1,100 +0,0 @@
#!/usr/bin/env python
# (c) 2013, Michael Scherer <misc@zarb.org>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
inventory: openshift
short_description: Openshift gears external inventory script
description:
- Generates inventory of Openshift gears using the REST interface
- this permit to reuse playbook to setup an Openshift gear
version_added: None
author: Michael Scherer
'''
import json
import os
import os.path
import sys
import StringIO
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves import configparser as ConfigParser
configparser = None
def get_from_rhc_config(variable):
global configparser
CONF_FILE = os.path.expanduser('~/.openshift/express.conf')
if os.path.exists(CONF_FILE):
if not configparser:
ini_str = '[root]\n' + open(CONF_FILE, 'r').read()
configparser = ConfigParser.SafeConfigParser()
configparser.readfp(StringIO.StringIO(ini_str))
try:
return configparser.get('root', variable)
except ConfigParser.NoOptionError:
return None
def get_config(env_var, config_var):
result = os.getenv(env_var)
if not result:
result = get_from_rhc_config(config_var)
if not result:
sys.exit("failed=True msg='missing %s'" % env_var)
return result
def get_json_from_api(url, username, password):
headers = {'Accept': 'application/json; version=1.5'}
response = open_url(url, headers=headers, url_username=username, url_password=password)
return json.loads(response.read())['data']
username = get_config('ANSIBLE_OPENSHIFT_USERNAME', 'default_rhlogin')
password = get_config('ANSIBLE_OPENSHIFT_PASSWORD', 'password')
broker_url = 'https://%s/broker/rest/' % get_config('ANSIBLE_OPENSHIFT_BROKER', 'libra_server')
response = get_json_from_api(broker_url + '/domains', username, password)
response = get_json_from_api("%s/domains/%s/applications" %
(broker_url, response[0]['id']), username, password)
result = {}
for app in response:
# ssh://520311404832ce3e570000ff@blog-johndoe.example.org
(user, host) = app['ssh_url'][6:].split('@')
app_name = host.split('-')[0]
result[app_name] = {}
result[app_name]['hosts'] = []
result[app_name]['hosts'].append(host)
result[app_name]['vars'] = {}
result[app_name]['vars']['ansible_ssh_user'] = user
if len(sys.argv) == 2 and sys.argv[1] == '--list':
print(json.dumps(result))
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
print(json.dumps({}))
else:
print("Need an argument, either --list or --host <host>")

@ -1,24 +0,0 @@
clouds:
vexxhost:
profile: vexxhost
auth:
project_name: 39e296b2-fc96-42bf-8091-cb742fa13da9
username: fb886a9b-c37b-442a-9be3-964bed961e04
password: fantastic-password1
rax:
profile: rackspace
auth:
username: example
password: spectacular-password
project_id: 2352426
region_name: DFW,ORD,IAD
devstack:
auth:
auth_url: https://devstack.example.com
username: stack
password: stack
project_name: stack
ansible:
use_hostnames: True
expand_hostvars: False
fail_on_errors: True

@ -1,272 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2012, Marco Vito Moscaritolo <marco@agavee.com>
# Copyright (c) 2013, Jesse Keating <jesse.keating@rackspace.com>
# Copyright (c) 2015, Hewlett-Packard Development Company, L.P.
# Copyright (c) 2016, Rackspace Australia
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
# The OpenStack Inventory module uses os-client-config for configuration.
# https://github.com/openstack/os-client-config
# This means it will either:
# - Respect normal OS_* environment variables like other OpenStack tools
# - Read values from a clouds.yaml file.
# If you want to configure via clouds.yaml, you can put the file in:
# - Current directory
# - ~/.config/openstack/clouds.yaml
# - /etc/openstack/clouds.yaml
# - /etc/ansible/openstack.yml
# The clouds.yaml file can contain entries for multiple clouds and multiple
# regions of those clouds. If it does, this inventory module will by default
# connect to all of them and present them as one contiguous inventory. You
# can limit to one cloud by passing the `--cloud` parameter, or use the
# OS_CLOUD environment variable. If caching is enabled, and a cloud is
# selected, then per-cloud cache folders will be used.
#
# See the adjacent openstack.yml file for an example config file
# There are two ansible inventory specific options that can be set in
# the inventory section.
# expand_hostvars controls whether or not the inventory will make extra API
# calls to fill out additional information about each server
# use_hostnames changes the behavior from registering every host with its UUID
# and making a group of its hostname to only doing this if the
# hostname in question has more than one server
# fail_on_errors causes the inventory to fail and return no hosts if one cloud
# has failed (for example, bad credentials or being offline).
# When set to False, the inventory will return hosts from
# whichever other clouds it can contact. (Default: True)
#
# Also it is possible to pass the correct user by setting an ansible_user: $myuser
# metadata attribute.
import argparse
import collections
import os
import sys
import time
from distutils.version import StrictVersion
from io import StringIO
import json
import openstack as sdk
from openstack.cloud import inventory as sdk_inventory
from openstack.config import loader as cloud_config
CONFIG_FILES = ['/etc/ansible/openstack.yaml', '/etc/ansible/openstack.yml']
def get_groups_from_server(server_vars, namegroup=True):
groups = []
region = server_vars['region']
cloud = server_vars['cloud']
metadata = server_vars.get('metadata', {})
# Create a group for the cloud
groups.append(cloud)
# Create a group on region
if region:
groups.append(region)
# And one by cloud_region
groups.append("%s_%s" % (cloud, region))
# Check if group metadata key in servers' metadata
if 'group' in metadata:
groups.append(metadata['group'])
for extra_group in metadata.get('groups', '').split(','):
if extra_group:
groups.append(extra_group.strip())
groups.append('instance-%s' % server_vars['id'])
if namegroup:
groups.append(server_vars['name'])
for key in ('flavor', 'image'):
if 'name' in server_vars[key]:
groups.append('%s-%s' % (key, server_vars[key]['name']))
for key, value in iter(metadata.items()):
groups.append('meta-%s_%s' % (key, value))
az = server_vars.get('az', None)
if az:
# Make groups for az, region_az and cloud_region_az
groups.append(az)
groups.append('%s_%s' % (region, az))
groups.append('%s_%s_%s' % (cloud, region, az))
return groups
def get_host_groups(inventory, refresh=False, cloud=None):
(cache_file, cache_expiration_time) = get_cache_settings(cloud)
if is_cache_stale(cache_file, cache_expiration_time, refresh=refresh):
groups = to_json(get_host_groups_from_cloud(inventory))
with open(cache_file, 'w') as f:
f.write(groups)
else:
with open(cache_file, 'r') as f:
groups = f.read()
return groups
def append_hostvars(hostvars, groups, key, server, namegroup=False):
hostvars[key] = dict(
ansible_ssh_host=server['interface_ip'],
ansible_host=server['interface_ip'],
openstack=server)
metadata = server.get('metadata', {})
if 'ansible_user' in metadata:
hostvars[key]['ansible_user'] = metadata['ansible_user']
for group in get_groups_from_server(server, namegroup=namegroup):
groups[group].append(key)
def get_host_groups_from_cloud(inventory):
groups = collections.defaultdict(list)
firstpass = collections.defaultdict(list)
hostvars = {}
list_args = {}
if hasattr(inventory, 'extra_config'):
use_hostnames = inventory.extra_config['use_hostnames']
list_args['expand'] = inventory.extra_config['expand_hostvars']
if StrictVersion(sdk.version.__version__) >= StrictVersion("0.13.0"):
list_args['fail_on_cloud_config'] = \
inventory.extra_config['fail_on_errors']
else:
use_hostnames = False
for server in inventory.list_hosts(**list_args):
if 'interface_ip' not in server:
continue
firstpass[server['name']].append(server)
for name, servers in firstpass.items():
if len(servers) == 1 and use_hostnames:
append_hostvars(hostvars, groups, name, servers[0])
else:
server_ids = set()
# Trap for duplicate results
for server in servers:
server_ids.add(server['id'])
if len(server_ids) == 1 and use_hostnames:
append_hostvars(hostvars, groups, name, servers[0])
else:
for server in servers:
append_hostvars(
hostvars, groups, server['id'], server,
namegroup=True)
groups['_meta'] = {'hostvars': hostvars}
return groups
def is_cache_stale(cache_file, cache_expiration_time, refresh=False):
''' Determines if cache file has expired, or if it is still valid '''
if refresh:
return True
if os.path.isfile(cache_file) and os.path.getsize(cache_file) > 0:
mod_time = os.path.getmtime(cache_file)
current_time = time.time()
if (mod_time + cache_expiration_time) > current_time:
return False
return True
def get_cache_settings(cloud=None):
config_files = cloud_config.CONFIG_FILES + CONFIG_FILES
if cloud:
config = cloud_config.OpenStackConfig(
config_files=config_files).get_one(cloud=cloud)
else:
config = cloud_config.OpenStackConfig(
config_files=config_files).get_all()[0]
# For inventory-wide caching
cache_expiration_time = config.get_cache_expiration_time()
cache_path = config.get_cache_path()
if cloud:
cache_path = '{0}_{1}'.format(cache_path, cloud)
if not os.path.exists(cache_path):
os.makedirs(cache_path)
cache_file = os.path.join(cache_path, 'ansible-inventory.cache')
return (cache_file, cache_expiration_time)
def to_json(in_dict):
return json.dumps(in_dict, sort_keys=True, indent=2)
def parse_args():
parser = argparse.ArgumentParser(description='OpenStack Inventory Module')
parser.add_argument('--cloud', default=os.environ.get('OS_CLOUD'),
help='Cloud name (default: None')
parser.add_argument('--private',
action='store_true',
help='Use private address for ansible host')
parser.add_argument('--refresh', action='store_true',
help='Refresh cached information')
parser.add_argument('--debug', action='store_true', default=False,
help='Enable debug output')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', action='store_true',
help='List active servers')
group.add_argument('--host', help='List details about the specific host')
return parser.parse_args()
def main():
args = parse_args()
try:
# openstacksdk library may write to stdout, so redirect this
sys.stdout = StringIO()
config_files = cloud_config.CONFIG_FILES + CONFIG_FILES
sdk.enable_logging(debug=args.debug)
inventory_args = dict(
refresh=args.refresh,
config_files=config_files,
private=args.private,
cloud=args.cloud,
)
if hasattr(sdk_inventory.OpenStackInventory, 'extra_config'):
inventory_args.update(dict(
config_key='ansible',
config_defaults={
'use_hostnames': False,
'expand_hostvars': True,
'fail_on_errors': True,
}
))
inventory = sdk_inventory.OpenStackInventory(**inventory_args)
sys.stdout = sys.__stdout__
if args.list:
output = get_host_groups(inventory, refresh=args.refresh, cloud=args.cloud)
elif args.host:
output = to_json(inventory.get_host(args.host))
print(output)
except sdk.exceptions.OpenStackCloudException as e:
sys.stderr.write('%s\n' % e.message)
sys.exit(1)
sys.exit(0)
if __name__ == '__main__':
main()

@ -1,85 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# openvz.py
#
# Copyright 2014 jordonr <jordon@beamsyn.net>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Inspired by libvirt_lxc.py inventory script
# https://github.com/ansible/ansible/blob/e5ef0eca03cbb6c8950c06dc50d0ca22aa8902f4/plugins/inventory/libvirt_lxc.py
#
# Groups are determined by the description field of openvz guests
# multiple groups can be separated by commas: webserver,dbserver
from subprocess import Popen, PIPE
import sys
import json
# List openvz hosts
vzhosts = ['vzhost1', 'vzhost2', 'vzhost3']
# Add openvz hosts to the inventory and Add "_meta" trick
inventory = {'vzhosts': {'hosts': vzhosts}, '_meta': {'hostvars': {}}}
# default group, when description not defined
default_group = ['vzguest']
def get_guests():
# Loop through vzhosts
for h in vzhosts:
# SSH to vzhost and get the list of guests in json
pipe = Popen(['ssh', h, 'vzlist', '-j'], stdout=PIPE, universal_newlines=True)
# Load Json info of guests
json_data = json.loads(pipe.stdout.read())
# loop through guests
for j in json_data:
# Add information to host vars
inventory['_meta']['hostvars'][j['hostname']] = {
'ctid': j['ctid'],
'veid': j['veid'],
'vpsid': j['vpsid'],
'private_path': j['private'],
'root_path': j['root'],
'ip': j['ip']
}
# determine group from guest description
if j['description'] is not None:
groups = j['description'].split(",")
else:
groups = default_group
# add guest to inventory
for g in groups:
if g not in inventory:
inventory[g] = {'hosts': []}
inventory[g]['hosts'].append(j['hostname'])
return inventory
if len(sys.argv) == 2 and sys.argv[1] == '--list':
inv_json = get_guests()
print(json.dumps(inv_json, sort_keys=True))
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
print(json.dumps({}))
else:
print("Need an argument, either --list or --host <host>")

@ -1,35 +0,0 @@
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Author: Josha Inglis <jinglis@iix.net> based on the gce.ini by Eric Johnson <erjohnso@google.com>
[ovirt]
# For ovirt.py script, which can be used with Python SDK version 3
# Service Account configuration information can be stored in the
# libcloud 'secrets.py' file. Ideally, the 'secrets.py' file will already
# exist in your PYTHONPATH and be picked up automatically with an import
# statement in the inventory script. However, you can specify an absolute
# path to the secrets.py file with 'libcloud_secrets' parameter.
ovirt_api_secrets =
# If you are not going to use a 'secrets.py' file, you can set the necessary
# authorization parameters here.
ovirt_url =
ovirt_username =
ovirt_password =
ovirt_ca_file =

@ -1,289 +0,0 @@
#!/usr/bin/env python
# Copyright 2015 IIX Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
ovirt external inventory script
=================================
Generates inventory that Ansible can understand by making API requests to
oVirt via the ovirt-engine-sdk-python library.
When run against a specific host, this script returns the following variables
based on the data obtained from the ovirt_sdk Node object:
- ovirt_uuid
- ovirt_id
- ovirt_image
- ovirt_machine_type
- ovirt_ips
- ovirt_name
- ovirt_description
- ovirt_status
- ovirt_zone
- ovirt_tags
- ovirt_stats
When run in --list mode, instances are grouped by the following categories:
- zone:
zone group name.
- instance tags:
An entry is created for each tag. For example, if you have two instances
with a common tag called 'foo', they will both be grouped together under
the 'tag_foo' name.
- network name:
the name of the network is appended to 'network_' (e.g. the 'default'
network will result in a group named 'network_default')
- running status:
group name prefixed with 'status_' (e.g. status_up, status_down,..)
Examples:
Execute uname on all instances in the us-central1-a zone
$ ansible -i ovirt.py us-central1-a -m shell -a "/bin/uname -a"
Use the ovirt inventory script to print out instance specific information
$ contrib/inventory/ovirt.py --host my_instance
Author: Josha Inglis <jinglis@iix.net> based on the gce.py by Eric Johnson <erjohnso@google.com>
Version: 0.0.1
"""
USER_AGENT_PRODUCT = "Ansible-ovirt_inventory_plugin"
USER_AGENT_VERSION = "v1"
import sys
import os
import argparse
from collections import defaultdict
from ansible.module_utils.six.moves import configparser as ConfigParser
import json
try:
# noinspection PyUnresolvedReferences
from ovirtsdk.api import API
# noinspection PyUnresolvedReferences
from ovirtsdk.xml import params
except ImportError:
print("ovirt inventory script requires ovirt-engine-sdk-python")
sys.exit(1)
class OVirtInventory(object):
def __init__(self):
# Read settings and parse CLI arguments
self.args = self.parse_cli_args()
self.driver = self.get_ovirt_driver()
# Just display data for specific host
if self.args.host:
print(self.json_format_dict(
self.node_to_dict(self.get_instance(self.args.host)),
pretty=self.args.pretty
))
sys.exit(0)
# Otherwise, assume user wants all instances grouped
print(
self.json_format_dict(
data=self.group_instances(),
pretty=self.args.pretty
)
)
sys.exit(0)
@staticmethod
def get_ovirt_driver():
"""
Determine the ovirt authorization settings and return a ovirt_sdk driver.
:rtype : ovirtsdk.api.API
"""
kwargs = {}
ovirt_ini_default_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "ovirt.ini")
ovirt_ini_path = os.environ.get('OVIRT_INI_PATH', ovirt_ini_default_path)
# Create a ConfigParser.
# This provides empty defaults to each key, so that environment
# variable configuration (as opposed to INI configuration) is able
# to work.
config = ConfigParser.SafeConfigParser(defaults={
'ovirt_url': '',
'ovirt_username': '',
'ovirt_password': '',
'ovirt_api_secrets': '',
})
if 'ovirt' not in config.sections():
config.add_section('ovirt')
config.read(ovirt_ini_path)
# Attempt to get ovirt params from a configuration file, if one
# exists.
secrets_path = config.get('ovirt', 'ovirt_api_secrets')
secrets_found = False
try:
# noinspection PyUnresolvedReferences,PyPackageRequirements
import secrets
kwargs = getattr(secrets, 'OVIRT_KEYWORD_PARAMS', {})
secrets_found = True
except ImportError:
pass
if not secrets_found and secrets_path:
if not secrets_path.endswith('secrets.py'):
err = "Must specify ovirt_sdk secrets file as /absolute/path/to/secrets.py"
print(err)
sys.exit(1)
sys.path.append(os.path.dirname(secrets_path))
try:
# noinspection PyUnresolvedReferences,PyPackageRequirements
import secrets
kwargs = getattr(secrets, 'OVIRT_KEYWORD_PARAMS', {})
except ImportError:
pass
if not secrets_found:
kwargs = {
'url': config.get('ovirt', 'ovirt_url'),
'username': config.get('ovirt', 'ovirt_username'),
'password': config.get('ovirt', 'ovirt_password'),
}
# If the appropriate environment variables are set, they override
# other configuration; process those into our args and kwargs.
kwargs['url'] = os.environ.get('OVIRT_URL', kwargs['url'])
kwargs['username'] = next(val for val in [os.environ.get('OVIRT_EMAIL'), os.environ.get('OVIRT_USERNAME'), kwargs['username']] if val is not None)
kwargs['password'] = next(val for val in [os.environ.get('OVIRT_PASS'), os.environ.get('OVIRT_PASSWORD'), kwargs['password']] if val is not None)
# Retrieve and return the ovirt driver.
return API(insecure=True, **kwargs)
@staticmethod
def parse_cli_args():
"""
Command line argument processing
:rtype : argparse.Namespace
"""
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on ovirt')
parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
parser.add_argument('--host', action='store', help='Get all information about an instance')
parser.add_argument('--pretty', action='store_true', default=False, help='Pretty format (default: False)')
return parser.parse_args()
def node_to_dict(self, inst):
"""
:type inst: params.VM
"""
if inst is None:
return {}
inst.get_custom_properties()
ips = [ip.get_address() for ip in inst.get_guest_info().get_ips().get_ip()] \
if inst.get_guest_info() is not None else []
stats = {}
for stat in inst.get_statistics().list():
stats[stat.get_name()] = stat.get_values().get_value()[0].get_datum()
return {
'ovirt_uuid': inst.get_id(),
'ovirt_id': inst.get_id(),
'ovirt_image': inst.get_os().get_type(),
'ovirt_machine_type': self.get_machine_type(inst),
'ovirt_ips': ips,
'ovirt_name': inst.get_name(),
'ovirt_description': inst.get_description(),
'ovirt_status': inst.get_status().get_state(),
'ovirt_zone': inst.get_cluster().get_id(),
'ovirt_tags': self.get_tags(inst),
'ovirt_stats': stats,
# Hosts don't have a public name, so we add an IP
'ansible_ssh_host': ips[0] if len(ips) > 0 else None
}
@staticmethod
def get_tags(inst):
"""
:type inst: params.VM
"""
return [x.get_name() for x in inst.get_tags().list()]
def get_machine_type(self, inst):
inst_type = inst.get_instance_type()
if inst_type:
return self.driver.instancetypes.get(id=inst_type.id).name
# noinspection PyBroadException,PyUnusedLocal
def get_instance(self, instance_name):
"""Gets details about a specific instance """
try:
return self.driver.vms.get(name=instance_name)
except Exception as e:
return None
def group_instances(self):
"""Group all instances"""
groups = defaultdict(list)
meta = {"hostvars": {}}
for node in self.driver.vms.list():
assert isinstance(node, params.VM)
name = node.get_name()
meta["hostvars"][name] = self.node_to_dict(node)
zone = node.get_cluster().get_name()
groups[zone].append(name)
tags = self.get_tags(node)
for t in tags:
tag = 'tag_%s' % t
groups[tag].append(name)
nets = [x.get_name() for x in node.get_nics().list()]
for net in nets:
net = 'network_%s' % net
groups[net].append(name)
status = node.get_status().get_state()
stat = 'status_%s' % status.lower()
if stat in groups:
groups[stat].append(name)
else:
groups[stat] = [name]
groups["_meta"] = meta
return groups
@staticmethod
def json_format_dict(data, pretty=False):
""" Converts a dict to a JSON object and dumps it as a formatted
string """
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
OVirtInventory()

@ -1,257 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
"""
oVirt dynamic inventory script
=================================
Generates dynamic inventory file for oVirt.
Script will return following attributes for each virtual machine:
- id
- name
- host
- cluster
- status
- description
- fqdn
- os_type
- template
- tags
- statistics
- devices
When run in --list mode, virtual machines are grouped by the following categories:
- cluster
- tag
- status
Note: If there is some virtual machine which has has more tags it will be in both tag
records.
Examples:
# Execute update of system on webserver virtual machine:
$ ansible -i contrib/inventory/ovirt4.py webserver -m yum -a "name=* state=latest"
# Get webserver virtual machine information:
$ contrib/inventory/ovirt4.py --host webserver
Author: Ondra Machacek (@machacekondra)
"""
import argparse
import os
import sys
from collections import defaultdict
from ansible.module_utils.six.moves import configparser
import json
try:
import ovirtsdk4 as sdk
import ovirtsdk4.types as otypes
except ImportError:
print('oVirt inventory script requires ovirt-engine-sdk-python >= 4.0.0')
sys.exit(1)
def parse_args():
"""
Create command line parser for oVirt dynamic inventory script.
"""
parser = argparse.ArgumentParser(
description='Ansible dynamic inventory script for oVirt.',
)
parser.add_argument(
'--list',
action='store_true',
default=True,
help='Get data of all virtual machines (default: True).',
)
parser.add_argument(
'--host',
help='Get data of virtual machines running on specified host.',
)
parser.add_argument(
'--pretty',
action='store_true',
default=False,
help='Pretty format (default: False).',
)
return parser.parse_args()
def create_connection():
"""
Create a connection to oVirt engine API.
"""
# Get the path of the configuration file, by default use
# 'ovirt.ini' file in script directory:
default_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'ovirt.ini',
)
config_path = os.environ.get('OVIRT_INI_PATH', default_path)
# Create parser and add ovirt section if it doesn't exist:
config = configparser.SafeConfigParser(
defaults={
'ovirt_url': os.environ.get('OVIRT_URL'),
'ovirt_username': os.environ.get('OVIRT_USERNAME'),
'ovirt_password': os.environ.get('OVIRT_PASSWORD'),
'ovirt_ca_file': os.environ.get('OVIRT_CAFILE', ''),
}
)
if not config.has_section('ovirt'):
config.add_section('ovirt')
config.read(config_path)
# Create a connection with options defined in ini file:
return sdk.Connection(
url=config.get('ovirt', 'ovirt_url'),
username=config.get('ovirt', 'ovirt_username'),
password=config.get('ovirt', 'ovirt_password', raw=True),
ca_file=config.get('ovirt', 'ovirt_ca_file') or None,
insecure=not config.get('ovirt', 'ovirt_ca_file'),
)
def get_dict_of_struct(connection, vm):
"""
Transform SDK Vm Struct type to Python dictionary.
"""
if vm is None:
return dict()
vms_service = connection.system_service().vms_service()
clusters_service = connection.system_service().clusters_service()
vm_service = vms_service.vm_service(vm.id)
devices = vm_service.reported_devices_service().list()
tags = vm_service.tags_service().list()
stats = vm_service.statistics_service().list()
labels = vm_service.affinity_labels_service().list()
groups = clusters_service.cluster_service(
vm.cluster.id
).affinity_groups_service().list()
return {
'id': vm.id,
'name': vm.name,
'host': connection.follow_link(vm.host).name if vm.host else None,
'cluster': connection.follow_link(vm.cluster).name,
'status': str(vm.status),
'description': vm.description,
'fqdn': vm.fqdn,
'os_type': vm.os.type,
'template': connection.follow_link(vm.template).name,
'tags': [tag.name for tag in tags],
'affinity_labels': [label.name for label in labels],
'affinity_groups': [
group.name for group in groups
if vm.name in [vm.name for vm in connection.follow_link(group.vms)]
],
'statistics': dict(
(stat.name, stat.values[0].datum) for stat in stats if stat.values
),
'devices': dict(
(device.name, [ip.address for ip in device.ips]) for device in devices if device.ips
),
'ansible_host': next((device.ips[0].address for device in devices if device.ips), None)
}
def get_data(connection, vm_name=None):
"""
Obtain data of `vm_name` if specified, otherwise obtain data of all vms.
"""
vms_service = connection.system_service().vms_service()
clusters_service = connection.system_service().clusters_service()
if vm_name:
vm = vms_service.list(search='name=%s' % vm_name) or [None]
data = get_dict_of_struct(
connection=connection,
vm=vm[0],
)
else:
vms = dict()
data = defaultdict(list)
for vm in vms_service.list():
name = vm.name
vm_service = vms_service.vm_service(vm.id)
cluster_service = clusters_service.cluster_service(vm.cluster.id)
# Add vm to vms dict:
vms[name] = get_dict_of_struct(connection, vm)
# Add vm to cluster group:
cluster_name = connection.follow_link(vm.cluster).name
data['cluster_%s' % cluster_name].append(name)
# Add vm to tag group:
tags_service = vm_service.tags_service()
for tag in tags_service.list():
data['tag_%s' % tag.name].append(name)
# Add vm to status group:
data['status_%s' % vm.status].append(name)
# Add vm to affinity group:
for group in cluster_service.affinity_groups_service().list():
if vm.name in [
v.name for v in connection.follow_link(group.vms)
]:
data['affinity_group_%s' % group.name].append(vm.name)
# Add vm to affinity label group:
affinity_labels_service = vm_service.affinity_labels_service()
for label in affinity_labels_service.list():
data['affinity_label_%s' % label.name].append(name)
data["_meta"] = {
'hostvars': vms,
}
return data
def main():
args = parse_args()
connection = create_connection()
print(
json.dumps(
obj=get_data(
connection=connection,
vm_name=args.host,
),
sort_keys=args.pretty,
indent=args.pretty * 2,
)
)
if __name__ == '__main__':
main()

@ -1,53 +0,0 @@
# Ansible Packet.net external inventory script settings
#
[packet]
# Packet projects to get info for. Set this to 'all' to get info for all
# projects in Packet and merge the results together. Alternatively, set
# this to a comma separated list of projects. E.g. 'project-1,project-3,project-4'
projects = all
projects_exclude =
# By default, packet devices in all state are returned. Specify
# packet device states to return as a comma-separated list.
# device_states = active, inactive, queued, provisioning
# items per page to retrieve from packet api at a time
items_per_page = 999
# API calls to Packet are costly. For this reason, we cache the results of an API
# call. Set this to the path you want cache files to be written to. Two files
# will be written to this directory:
# - ansible-packet.cache
# - ansible-packet.index
cache_path = ~/.ansible/tmp
# The number of seconds a cache file is considered valid. After this many
# seconds, a new API call will be made, and the cache file will be updated.
# To disable the cache, set this value to 0
cache_max_age = 300
# Organize groups into a nested/hierarchy instead of a flat namespace.
nested_groups = False
# Replace - tags when creating groups to avoid issues with ansible
replace_dash_in_groups = True
# The packet inventory output can become very large. To manage its size,
# configure which groups should be created.
group_by_device_id = True
group_by_hostname = True
group_by_facility = True
group_by_project = True
group_by_operating_system = True
group_by_plan_type = True
group_by_tags = True
group_by_tag_none = True
# If you only want to include hosts that match a certain regular expression
# pattern_include = staging-*
# If you want to exclude any hosts that match a certain regular expression
# pattern_exclude = staging-*

@ -1,506 +0,0 @@
#!/usr/bin/env python
'''
Packet.net external inventory script
=================================
Generates inventory that Ansible can understand by making API request to
Packet.net using the Packet library.
NOTE: This script assumes Ansible is being executed where the environment
variable needed for Packet API Token already been set:
export PACKET_API_TOKEN=Bfse9F24SFtfs423Gsd3ifGsd43sSdfs
This script also assumes there is a packet_net.ini file alongside it. To specify a
different path to packet_net.ini, define the PACKET_NET_INI_PATH environment variable:
export PACKET_NET_INI_PATH=/path/to/my_packet_net.ini
'''
# (c) 2016, Peter Sankauskas
# (c) 2017, Tomas Karasek
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import sys
import os
import argparse
import re
from time import time
from ansible.module_utils import six
from ansible.module_utils.six.moves import configparser
try:
import packet
except ImportError as e:
sys.exit("failed=True msg='`packet-python` library required for this script'")
import traceback
import json
ini_section = 'packet'
class PacketInventory(object):
def _empty_inventory(self):
return {"_meta": {"hostvars": {}}}
def __init__(self):
''' Main execution path '''
# Inventory grouped by device IDs, tags, security groups, regions,
# and availability zones
self.inventory = self._empty_inventory()
# Index of hostname (address) to device ID
self.index = {}
# Read settings and parse CLI arguments
self.parse_cli_args()
self.read_settings()
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of devices for inventory
if self.inventory == self._empty_inventory():
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print(data_to_print)
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def read_settings(self):
''' Reads the settings from the packet_net.ini file '''
if six.PY3:
config = configparser.ConfigParser()
else:
config = configparser.SafeConfigParser()
_ini_path_raw = os.environ.get('PACKET_NET_INI_PATH')
if _ini_path_raw:
packet_ini_path = os.path.expanduser(os.path.expandvars(_ini_path_raw))
else:
packet_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'packet_net.ini')
config.read(packet_ini_path)
# items per page
self.items_per_page = 999
if config.has_option(ini_section, 'items_per_page'):
config.get(ini_section, 'items_per_page')
# Instance states to be gathered in inventory. Default is all of them.
packet_valid_device_states = [
'active',
'inactive',
'queued',
'provisioning'
]
self.packet_device_states = []
if config.has_option(ini_section, 'device_states'):
for device_state in config.get(ini_section, 'device_states').split(','):
device_state = device_state.strip()
if device_state not in packet_valid_device_states:
continue
self.packet_device_states.append(device_state)
else:
self.packet_device_states = packet_valid_device_states
# Cache related
cache_dir = os.path.expanduser(config.get(ini_section, 'cache_path'))
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
self.cache_path_cache = cache_dir + "/ansible-packet.cache"
self.cache_path_index = cache_dir + "/ansible-packet.index"
self.cache_max_age = config.getint(ini_section, 'cache_max_age')
# Configure nested groups instead of flat namespace.
if config.has_option(ini_section, 'nested_groups'):
self.nested_groups = config.getboolean(ini_section, 'nested_groups')
else:
self.nested_groups = False
# Replace dash or not in group names
if config.has_option(ini_section, 'replace_dash_in_groups'):
self.replace_dash_in_groups = config.getboolean(ini_section, 'replace_dash_in_groups')
else:
self.replace_dash_in_groups = True
# Configure which groups should be created.
group_by_options = [
'group_by_device_id',
'group_by_hostname',
'group_by_facility',
'group_by_project',
'group_by_operating_system',
'group_by_plan_type',
'group_by_tags',
'group_by_tag_none',
]
for option in group_by_options:
if config.has_option(ini_section, option):
setattr(self, option, config.getboolean(ini_section, option))
else:
setattr(self, option, True)
# Do we need to just include hosts that match a pattern?
try:
pattern_include = config.get(ini_section, 'pattern_include')
if pattern_include and len(pattern_include) > 0:
self.pattern_include = re.compile(pattern_include)
else:
self.pattern_include = None
except configparser.NoOptionError:
self.pattern_include = None
# Do we need to exclude hosts that match a pattern?
try:
pattern_exclude = config.get(ini_section, 'pattern_exclude')
if pattern_exclude and len(pattern_exclude) > 0:
self.pattern_exclude = re.compile(pattern_exclude)
else:
self.pattern_exclude = None
except configparser.NoOptionError:
self.pattern_exclude = None
# Projects
self.projects = []
configProjects = config.get(ini_section, 'projects')
configProjects_exclude = config.get(ini_section, 'projects_exclude')
if (configProjects == 'all'):
for projectInfo in self.get_projects():
if projectInfo.name not in configProjects_exclude:
self.projects.append(projectInfo.name)
else:
self.projects = configProjects.split(",")
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Packet')
parser.add_argument('--list', action='store_true', default=True,
help='List Devices (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific device')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to Packet (default: False - use cache files)')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
''' Do API calls to each region, and save data in cache files '''
for projectInfo in self.get_projects():
if projectInfo.name in self.projects:
self.get_devices_by_project(projectInfo)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def connect(self):
''' create connection to api server'''
token = os.environ.get('PACKET_API_TOKEN')
if token is None:
raise Exception("Error reading token from environment (PACKET_API_TOKEN)!")
manager = packet.Manager(auth_token=token)
return manager
def get_projects(self):
'''Makes a Packet API call to get the list of projects'''
params = {
'per_page': self.items_per_page
}
try:
manager = self.connect()
projects = manager.list_projects(params=params)
return projects
except Exception as e:
traceback.print_exc()
self.fail_with_error(e, 'getting Packet projects')
def get_devices_by_project(self, project):
''' Makes an Packet API call to the list of devices in a particular
project '''
params = {
'per_page': self.items_per_page
}
try:
manager = self.connect()
devices = manager.list_devices(project_id=project.id, params=params)
for device in devices:
self.add_device(device, project)
except Exception as e:
traceback.print_exc()
self.fail_with_error(e, 'getting Packet devices')
def fail_with_error(self, err_msg, err_operation=None):
'''log an error to std err for ansible-playbook to consume and exit'''
if err_operation:
err_msg = 'ERROR: "{err_msg}", while: {err_operation}\n'.format(
err_msg=err_msg, err_operation=err_operation)
sys.stderr.write(err_msg)
sys.exit(1)
def get_device(self, device_id):
manager = self.connect()
device = manager.get_device(device_id)
return device
def add_device(self, device, project):
''' Adds a device to the inventory and index, as long as it is
addressable '''
# Only return devices with desired device states
if device.state not in self.packet_device_states:
return
# Select the best destination address. Only include management
# addresses as non-management (elastic) addresses need manual
# host configuration to be routable.
# See https://help.packet.net/article/54-elastic-ips.
dest = None
for ip_address in device.ip_addresses:
if ip_address['public'] is True and \
ip_address['address_family'] == 4 and \
ip_address['management'] is True:
dest = ip_address['address']
if not dest:
# Skip devices we cannot address (e.g. private VPC subnet)
return
# if we only want to include hosts that match a pattern, skip those that don't
if self.pattern_include and not self.pattern_include.match(device.hostname):
return
# if we need to exclude hosts that match a pattern, skip those
if self.pattern_exclude and self.pattern_exclude.match(device.hostname):
return
# Add to index
self.index[dest] = [project.id, device.id]
# Inventory: Group by device ID (always a group of 1)
if self.group_by_device_id:
self.inventory[device.id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'devices', device.id)
# Inventory: Group by device name (hopefully a group of 1)
if self.group_by_hostname:
self.push(self.inventory, device.hostname, dest)
if self.nested_groups:
self.push_group(self.inventory, 'hostnames', project.name)
# Inventory: Group by project
if self.group_by_project:
self.push(self.inventory, project.name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'projects', project.name)
# Inventory: Group by facility
if self.group_by_facility:
self.push(self.inventory, device.facility['code'], dest)
if self.nested_groups:
if self.group_by_facility:
self.push_group(self.inventory, project.name, device.facility['code'])
# Inventory: Group by OS
if self.group_by_operating_system:
self.push(self.inventory, device.operating_system['slug'], dest)
if self.nested_groups:
self.push_group(self.inventory, 'operating_systems', device.operating_system['slug'])
# Inventory: Group by plan type
if self.group_by_plan_type:
self.push(self.inventory, device.plan['slug'], dest)
if self.nested_groups:
self.push_group(self.inventory, 'plans', device.plan['slug'])
# Inventory: Group by tag keys
if self.group_by_tags:
for k in device.tags:
key = self.to_safe("tag_" + k)
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
# Global Tag: devices without tags
if self.group_by_tag_none and len(device.tags) == 0:
self.push(self.inventory, 'tag_none', dest)
if self.nested_groups:
self.push_group(self.inventory, 'tags', 'tag_none')
# Global Tag: tag all Packet devices
self.push(self.inventory, 'packet', dest)
self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_device(device)
def get_host_info_dict_from_device(self, device):
device_vars = {}
for key in vars(device):
value = getattr(device, key)
key = self.to_safe('packet_' + key)
# Handle complex types
if key == 'packet_state':
device_vars[key] = device.state or ''
elif key == 'packet_hostname':
device_vars[key] = value
elif isinstance(value, (int, bool)):
device_vars[key] = value
elif isinstance(value, six.string_types):
device_vars[key] = value.strip()
elif value is None:
device_vars[key] = ''
elif key == 'packet_facility':
device_vars[key] = value['code']
elif key == 'packet_operating_system':
device_vars[key] = value['slug']
elif key == 'packet_plan':
device_vars[key] = value['slug']
elif key == 'packet_tags':
for k in value:
key = self.to_safe('packet_tag_' + k)
device_vars[key] = k
else:
pass
# print key
# print type(value)
# print value
return device_vars
def get_host_info(self):
''' Get variables about a specific host '''
if len(self.index) == 0:
# Need to load index from cache
self.load_index_from_cache()
if self.args.host not in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if self.args.host not in self.index:
# host might not exist anymore
return self.json_format_dict({}, True)
(project_id, device_id) = self.index[self.args.host]
device = self.get_device(device_id)
return self.json_format_dict(self.get_host_info_dict_from_device(device), True)
def push(self, my_dict, key, element):
''' Push an element onto an array that may not have been defined in
the dict '''
group_info = my_dict.setdefault(key, [])
if isinstance(group_info, dict):
host_list = group_info.setdefault('hosts', [])
host_list.append(element)
else:
group_info.append(element)
def push_group(self, my_dict, key, element):
''' Push a group as a child of another group. '''
parent_group = my_dict.setdefault(key, {})
if not isinstance(parent_group, dict):
parent_group = my_dict[key] = {'hosts': parent_group}
child_groups = parent_group.setdefault('children', [])
if element not in child_groups:
child_groups.append(element)
def get_inventory_from_cache(self):
''' Reads the inventory from the cache file and returns it as a JSON
object '''
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory
def load_index_from_cache(self):
''' Reads the index from the cache file sets self.index '''
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index)
def write_to_cache(self, data, filename):
''' Writes data in JSON format to a file '''
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def uncammelize(self, key):
temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower()
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
regex = r"[^A-Za-z0-9\_"
if not self.replace_dash_in_groups:
regex += r"\-"
return re.sub(regex + "]", "_", word)
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
PacketInventory()

@ -1,248 +0,0 @@
#!/usr/bin/env python
# Copyright (C) 2014 Mathieu GAUTHIER-LAFAYE <gauthierl@lapth.cnrs.fr>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Updated 2016 by Matt Harris <matthaeus.harris@gmail.com>
#
# Added support for Proxmox VE 4.x
# Added support for using the Notes field of a VM to define groups and variables:
# A well-formatted JSON object in the Notes field will be added to the _meta
# section for that VM. In addition, the "groups" key of this JSON object may be
# used to specify group membership:
#
# { "groups": ["utility", "databases"], "a": false, "b": true }
import json
import os
import sys
from optparse import OptionParser
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.module_utils.urls import open_url
class ProxmoxNodeList(list):
def get_names(self):
return [node['node'] for node in self]
class ProxmoxVM(dict):
def get_variables(self):
variables = {}
for key, value in iteritems(self):
variables['proxmox_' + key] = value
return variables
class ProxmoxVMList(list):
def __init__(self, data=None):
data = [] if data is None else data
for item in data:
self.append(ProxmoxVM(item))
def get_names(self):
return [vm['name'] for vm in self if vm['template'] != 1]
def get_by_name(self, name):
results = [vm for vm in self if vm['name'] == name]
return results[0] if len(results) > 0 else None
def get_variables(self):
variables = {}
for vm in self:
variables[vm['name']] = vm.get_variables()
return variables
class ProxmoxPoolList(list):
def get_names(self):
return [pool['poolid'] for pool in self]
class ProxmoxPool(dict):
def get_members_name(self):
return [member['name'] for member in self['members'] if member['template'] != 1]
class ProxmoxAPI(object):
def __init__(self, options):
self.options = options
self.credentials = None
if not options.url:
raise Exception('Missing mandatory parameter --url (or PROXMOX_URL).')
elif not options.username:
raise Exception('Missing mandatory parameter --username (or PROXMOX_USERNAME).')
elif not options.password:
raise Exception('Missing mandatory parameter --password (or PROXMOX_PASSWORD).')
def auth(self):
request_path = '{0}api2/json/access/ticket'.format(self.options.url)
request_params = urlencode({
'username': self.options.username,
'password': self.options.password,
})
data = json.load(open_url(request_path, data=request_params))
self.credentials = {
'ticket': data['data']['ticket'],
'CSRFPreventionToken': data['data']['CSRFPreventionToken'],
}
def get(self, url, data=None):
request_path = '{0}{1}'.format(self.options.url, url)
headers = {'Cookie': 'PVEAuthCookie={0}'.format(self.credentials['ticket'])}
request = open_url(request_path, data=data, headers=headers)
response = json.load(request)
return response['data']
def nodes(self):
return ProxmoxNodeList(self.get('api2/json/nodes'))
def vms_by_type(self, node, type):
return ProxmoxVMList(self.get('api2/json/nodes/{0}/{1}'.format(node, type)))
def vm_description_by_type(self, node, vm, type):
return self.get('api2/json/nodes/{0}/{1}/{2}/config'.format(node, type, vm))
def node_qemu(self, node):
return self.vms_by_type(node, 'qemu')
def node_qemu_description(self, node, vm):
return self.vm_description_by_type(node, vm, 'qemu')
def node_lxc(self, node):
return self.vms_by_type(node, 'lxc')
def node_lxc_description(self, node, vm):
return self.vm_description_by_type(node, vm, 'lxc')
def pools(self):
return ProxmoxPoolList(self.get('api2/json/pools'))
def pool(self, poolid):
return ProxmoxPool(self.get('api2/json/pools/{0}'.format(poolid)))
def main_list(options):
results = {
'all': {
'hosts': [],
},
'_meta': {
'hostvars': {},
}
}
proxmox_api = ProxmoxAPI(options)
proxmox_api.auth()
for node in proxmox_api.nodes().get_names():
qemu_list = proxmox_api.node_qemu(node)
results['all']['hosts'] += qemu_list.get_names()
results['_meta']['hostvars'].update(qemu_list.get_variables())
lxc_list = proxmox_api.node_lxc(node)
results['all']['hosts'] += lxc_list.get_names()
results['_meta']['hostvars'].update(lxc_list.get_variables())
for vm in results['_meta']['hostvars']:
vmid = results['_meta']['hostvars'][vm]['proxmox_vmid']
try:
type = results['_meta']['hostvars'][vm]['proxmox_type']
except KeyError:
type = 'qemu'
try:
description = proxmox_api.vm_description_by_type(node, vmid, type)['description']
except KeyError:
description = None
try:
metadata = json.loads(description)
except TypeError:
metadata = {}
except ValueError:
metadata = {
'notes': description
}
if 'groups' in metadata:
# print metadata
for group in metadata['groups']:
if group not in results:
results[group] = {
'hosts': []
}
results[group]['hosts'] += [vm]
results['_meta']['hostvars'][vm].update(metadata)
# pools
for pool in proxmox_api.pools().get_names():
results[pool] = {
'hosts': proxmox_api.pool(pool).get_members_name(),
}
return results
def main_host(options):
proxmox_api = ProxmoxAPI(options)
proxmox_api.auth()
for node in proxmox_api.nodes().get_names():
qemu_list = proxmox_api.node_qemu(node)
qemu = qemu_list.get_by_name(options.host)
if qemu:
return qemu.get_variables()
return {}
def main():
parser = OptionParser(usage='%prog [options] --list | --host HOSTNAME')
parser.add_option('--list', action="store_true", default=False, dest="list")
parser.add_option('--host', dest="host")
parser.add_option('--url', default=os.environ.get('PROXMOX_URL'), dest='url')
parser.add_option('--username', default=os.environ.get('PROXMOX_USERNAME'), dest='username')
parser.add_option('--password', default=os.environ.get('PROXMOX_PASSWORD'), dest='password')
parser.add_option('--pretty', action="store_true", default=False, dest='pretty')
(options, args) = parser.parse_args()
if options.list:
data = main_list(options)
elif options.host:
data = main_host(options)
else:
parser.print_help()
sys.exit(1)
indent = None
if options.pretty:
indent = 2
print(json.dumps(data, indent=indent))
if __name__ == '__main__':
main()

@ -1,96 +0,0 @@
#!/usr/bin/env python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import json
import os
import requests
import argparse
RACKHD_URL = 'http://localhost:8080'
class RackhdInventory(object):
def __init__(self, nodeids):
self._inventory = {}
for nodeid in nodeids:
self._load_inventory_data(nodeid)
inventory = {}
for (nodeid, info) in self._inventory.items():
inventory[nodeid] = (self._format_output(nodeid, info))
print(json.dumps(inventory))
def _load_inventory_data(self, nodeid):
info = {}
info['ohai'] = RACKHD_URL + '/api/common/nodes/{0}/catalogs/ohai'.format(nodeid)
info['lookup'] = RACKHD_URL + '/api/common/lookups/?q={0}'.format(nodeid)
results = {}
for (key, url) in info.items():
r = requests.get(url, verify=False)
results[key] = r.text
self._inventory[nodeid] = results
def _format_output(self, nodeid, info):
try:
node_info = json.loads(info['lookup'])
ipaddress = ''
if len(node_info) > 0:
ipaddress = node_info[0]['ipAddress']
output = {'hosts': [ipaddress], 'vars': {}}
for (key, result) in info.items():
output['vars'][key] = json.loads(result)
output['vars']['ansible_ssh_user'] = 'monorail'
except KeyError:
pass
return output
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--host')
parser.add_argument('--list', action='store_true')
return parser.parse_args()
try:
# check if rackhd url(ie:10.1.1.45:8080) is specified in the environment
RACKHD_URL = 'http://' + str(os.environ['RACKHD_URL'])
except Exception:
# use default values
pass
# Use the nodeid specified in the environment to limit the data returned
# or return data for all available nodes
nodeids = []
if (parse_args().host):
try:
nodeids += parse_args().host.split(',')
RackhdInventory(nodeids)
except Exception:
pass
if (parse_args().list):
try:
url = RACKHD_URL + '/api/common/nodes'
r = requests.get(url, verify=False)
data = json.loads(r.text)
for entry in data:
if entry['type'] == 'compute':
nodeids.append(entry['id'])
RackhdInventory(nodeids)
except Exception:
pass

@ -1,66 +0,0 @@
# Ansible Rackspace external inventory script settings
#
[rax]
# Environment Variable: RAX_CREDS_FILE
#
# An optional configuration that points to a pyrax-compatible credentials
# file.
#
# If not supplied, rax.py will look for a credentials file
# at ~/.rackspace_cloud_credentials. It uses the Rackspace Python SDK,
# and therefore requires a file formatted per the SDK's specifications.
#
# https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md
# creds_file = ~/.rackspace_cloud_credentials
# Environment Variable: RAX_REGION
#
# An optional environment variable to narrow inventory search
# scope. If used, needs a value like ORD, DFW, SYD (a Rackspace
# datacenter) and optionally accepts a comma-separated list.
# regions = IAD,ORD,DFW
# Environment Variable: RAX_ENV
#
# A configuration that will use an environment as configured in
# ~/.pyrax.cfg, see
# https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md
# env = prod
# Environment Variable: RAX_META_PREFIX
# Default: meta
#
# A configuration that changes the prefix used for meta key/value groups.
# For compatibility with ec2.py set to "tag"
# meta_prefix = meta
# Environment Variable: RAX_ACCESS_NETWORK
# Default: public
#
# A configuration that will tell the inventory script to use a specific
# server network to determine the ansible_ssh_host value. If no address
# is found, ansible_ssh_host will not be set. Accepts a comma-separated
# list of network names, the first found wins.
# access_network = public
# Environment Variable: RAX_ACCESS_IP_VERSION
# Default: 4
#
# A configuration related to "access_network" that will attempt to
# determine the ansible_ssh_host value for either IPv4 or IPv6. If no
# address is found, ansible_ssh_host will not be set.
# Acceptable values are: 4 or 6. Values other than 4 or 6
# will be ignored, and 4 will be used. Accepts a comma separated list,
# the first found wins.
# access_ip_version = 4
# Environment Variable: RAX_CACHE_MAX_AGE
# Default: 600
#
# A configuration the changes the behavior or the inventory cache.
# Inventory listing performed before this value will be returned from
# the cache instead of making a full request for all inventory. Setting
# this value to 0 will force a full request.
# cache_max_age = 600

@ -1,470 +0,0 @@
#!/usr/bin/env python
# (c) 2013, Jesse Keating <jesse.keating@rackspace.com,
# Paul Durivage <paul.durivage@rackspace.com>,
# Matt Martz <matt@sivel.net>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
Rackspace Cloud Inventory
Authors:
Jesse Keating <jesse.keating@rackspace.com,
Paul Durivage <paul.durivage@rackspace.com>,
Matt Martz <matt@sivel.net>
Description:
Generates inventory that Ansible can understand by making API request to
Rackspace Public Cloud API
When run against a specific host, this script returns variables similar to:
rax_os-ext-sts_task_state
rax_addresses
rax_links
rax_image
rax_os-ext-sts_vm_state
rax_flavor
rax_id
rax_rax-bandwidth_bandwidth
rax_user_id
rax_os-dcf_diskconfig
rax_accessipv4
rax_accessipv6
rax_progress
rax_os-ext-sts_power_state
rax_metadata
rax_status
rax_updated
rax_hostid
rax_name
rax_created
rax_tenant_id
rax_loaded
Configuration:
rax.py can be configured using a rax.ini file or via environment
variables. The rax.ini file should live in the same directory along side
this script.
The section header for configuration values related to this
inventory plugin is [rax]
[rax]
creds_file = ~/.rackspace_cloud_credentials
regions = IAD,ORD,DFW
env = prod
meta_prefix = meta
access_network = public
access_ip_version = 4
Each of these configurations also has a corresponding environment variable.
An environment variable will override a configuration file value.
creds_file:
Environment Variable: RAX_CREDS_FILE
An optional configuration that points to a pyrax-compatible credentials
file.
If not supplied, rax.py will look for a credentials file
at ~/.rackspace_cloud_credentials. It uses the Rackspace Python SDK,
and therefore requires a file formatted per the SDK's specifications.
https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md
regions:
Environment Variable: RAX_REGION
An optional environment variable to narrow inventory search
scope. If used, needs a value like ORD, DFW, SYD (a Rackspace
datacenter) and optionally accepts a comma-separated list.
environment:
Environment Variable: RAX_ENV
A configuration that will use an environment as configured in
~/.pyrax.cfg, see
https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md
meta_prefix:
Environment Variable: RAX_META_PREFIX
Default: meta
A configuration that changes the prefix used for meta key/value groups.
For compatibility with ec2.py set to "tag"
access_network:
Environment Variable: RAX_ACCESS_NETWORK
Default: public
A configuration that will tell the inventory script to use a specific
server network to determine the ansible_ssh_host value. If no address
is found, ansible_ssh_host will not be set. Accepts a comma-separated
list of network names, the first found wins.
access_ip_version:
Environment Variable: RAX_ACCESS_IP_VERSION
Default: 4
A configuration related to "access_network" that will attempt to
determine the ansible_ssh_host value for either IPv4 or IPv6. If no
address is found, ansible_ssh_host will not be set.
Acceptable values are: 4 or 6. Values other than 4 or 6
will be ignored, and 4 will be used. Accepts a comma-separated list,
the first found wins.
Examples:
List server instances
$ RAX_CREDS_FILE=~/.raxpub rax.py --list
List servers in ORD datacenter only
$ RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD rax.py --list
List servers in ORD and DFW datacenters
$ RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD,DFW rax.py --list
Get server details for server named "server.example.com"
$ RAX_CREDS_FILE=~/.raxpub rax.py --host server.example.com
Use the instance private IP to connect (instead of public IP)
$ RAX_CREDS_FILE=~/.raxpub RAX_ACCESS_NETWORK=private rax.py --list
"""
import os
import re
import sys
import argparse
import warnings
import collections
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import configparser as ConfigParser
import json
try:
import pyrax
from pyrax.utils import slugify
except ImportError:
sys.exit('pyrax is required for this module')
from time import time
from ansible.constants import get_config
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.six import text_type
NON_CALLABLES = (text_type, str, bool, dict, int, list, type(None))
def load_config_file():
p = ConfigParser.ConfigParser()
config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'rax.ini')
try:
p.read(config_file)
except ConfigParser.Error:
return None
else:
return p
def rax_slugify(value):
return 'rax_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_'))
def to_dict(obj):
instance = {}
for key in dir(obj):
value = getattr(obj, key)
if isinstance(value, NON_CALLABLES) and not key.startswith('_'):
key = rax_slugify(key)
instance[key] = value
return instance
def host(regions, hostname):
hostvars = {}
for region in regions:
# Connect to the region
cs = pyrax.connect_to_cloudservers(region=region)
for server in cs.servers.list():
if server.name == hostname:
for key, value in to_dict(server).items():
hostvars[key] = value
# And finally, add an IP address
hostvars['ansible_ssh_host'] = server.accessIPv4
print(json.dumps(hostvars, sort_keys=True, indent=4))
def _list_into_cache(regions):
groups = collections.defaultdict(list)
hostvars = collections.defaultdict(dict)
images = {}
cbs_attachments = collections.defaultdict(dict)
prefix = get_config(p, 'rax', 'meta_prefix', 'RAX_META_PREFIX', 'meta')
try:
# Ansible 2.3+
networks = get_config(p, 'rax', 'access_network',
'RAX_ACCESS_NETWORK', 'public', value_type='list')
except TypeError:
# Ansible 2.2.x and below
# pylint: disable=unexpected-keyword-arg
networks = get_config(p, 'rax', 'access_network',
'RAX_ACCESS_NETWORK', 'public', islist=True)
try:
try:
# Ansible 2.3+
ip_versions = map(int, get_config(p, 'rax', 'access_ip_version',
'RAX_ACCESS_IP_VERSION', 4, value_type='list'))
except TypeError:
# Ansible 2.2.x and below
# pylint: disable=unexpected-keyword-arg
ip_versions = map(int, get_config(p, 'rax', 'access_ip_version',
'RAX_ACCESS_IP_VERSION', 4, islist=True))
except Exception:
ip_versions = [4]
else:
ip_versions = [v for v in ip_versions if v in [4, 6]]
if not ip_versions:
ip_versions = [4]
# Go through all the regions looking for servers
for region in regions:
# Connect to the region
cs = pyrax.connect_to_cloudservers(region=region)
if cs is None:
warnings.warn(
'Connecting to Rackspace region "%s" has caused Pyrax to '
'return None. Is this a valid region?' % region,
RuntimeWarning)
continue
for server in cs.servers.list():
# Create a group on region
groups[region].append(server.name)
# Check if group metadata key in servers' metadata
group = server.metadata.get('group')
if group:
groups[group].append(server.name)
for extra_group in server.metadata.get('groups', '').split(','):
if extra_group:
groups[extra_group].append(server.name)
# Add host metadata
for key, value in to_dict(server).items():
hostvars[server.name][key] = value
hostvars[server.name]['rax_region'] = region
for key, value in iteritems(server.metadata):
groups['%s_%s_%s' % (prefix, key, value)].append(server.name)
groups['instance-%s' % server.id].append(server.name)
groups['flavor-%s' % server.flavor['id']].append(server.name)
# Handle boot from volume
if not server.image:
if not cbs_attachments[region]:
cbs = pyrax.connect_to_cloud_blockstorage(region)
for vol in cbs.list():
if boolean(vol.bootable, strict=False):
for attachment in vol.attachments:
metadata = vol.volume_image_metadata
server_id = attachment['server_id']
cbs_attachments[region][server_id] = {
'id': metadata['image_id'],
'name': slugify(metadata['image_name'])
}
image = cbs_attachments[region].get(server.id)
if image:
server.image = {'id': image['id']}
hostvars[server.name]['rax_image'] = server.image
hostvars[server.name]['rax_boot_source'] = 'volume'
images[image['id']] = image['name']
else:
hostvars[server.name]['rax_boot_source'] = 'local'
try:
imagegroup = 'image-%s' % images[server.image['id']]
groups[imagegroup].append(server.name)
groups['image-%s' % server.image['id']].append(server.name)
except KeyError:
try:
image = cs.images.get(server.image['id'])
except cs.exceptions.NotFound:
groups['image-%s' % server.image['id']].append(server.name)
else:
images[image.id] = image.human_id
groups['image-%s' % image.human_id].append(server.name)
groups['image-%s' % server.image['id']].append(server.name)
# And finally, add an IP address
ansible_ssh_host = None
# use accessIPv[46] instead of looping address for 'public'
for network_name in networks:
if ansible_ssh_host:
break
if network_name == 'public':
for version_name in ip_versions:
if ansible_ssh_host:
break
if version_name == 6 and server.accessIPv6:
ansible_ssh_host = server.accessIPv6
elif server.accessIPv4:
ansible_ssh_host = server.accessIPv4
if not ansible_ssh_host:
addresses = server.addresses.get(network_name, [])
for address in addresses:
for version_name in ip_versions:
if ansible_ssh_host:
break
if address.get('version') == version_name:
ansible_ssh_host = address.get('addr')
break
if ansible_ssh_host:
hostvars[server.name]['ansible_ssh_host'] = ansible_ssh_host
if hostvars:
groups['_meta'] = {'hostvars': hostvars}
with open(get_cache_file_path(regions), 'w') as cache_file:
json.dump(groups, cache_file)
def get_cache_file_path(regions):
regions_str = '.'.join([reg.strip().lower() for reg in regions])
ansible_tmp_path = os.path.join(os.path.expanduser("~"), '.ansible', 'tmp')
if not os.path.exists(ansible_tmp_path):
os.makedirs(ansible_tmp_path)
return os.path.join(ansible_tmp_path,
'ansible-rax-%s-%s.cache' % (
pyrax.identity.username, regions_str))
def _list(regions, refresh_cache=True):
cache_max_age = int(get_config(p, 'rax', 'cache_max_age',
'RAX_CACHE_MAX_AGE', 600))
if (not os.path.exists(get_cache_file_path(regions)) or
refresh_cache or
(time() - os.stat(get_cache_file_path(regions))[-1]) > cache_max_age):
# Cache file doesn't exist or older than 10m or refresh cache requested
_list_into_cache(regions)
with open(get_cache_file_path(regions), 'r') as cache_file:
groups = json.load(cache_file)
print(json.dumps(groups, sort_keys=True, indent=4))
def parse_args():
parser = argparse.ArgumentParser(description='Ansible Rackspace Cloud '
'inventory module')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', action='store_true',
help='List active servers')
group.add_argument('--host', help='List details about the specific host')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help=('Force refresh of cache, making API requests to'
'RackSpace (default: False - use cache files)'))
return parser.parse_args()
def setup():
default_creds_file = os.path.expanduser('~/.rackspace_cloud_credentials')
env = get_config(p, 'rax', 'environment', 'RAX_ENV', None)
if env:
pyrax.set_environment(env)
keyring_username = pyrax.get_setting('keyring_username')
# Attempt to grab credentials from environment first
creds_file = get_config(p, 'rax', 'creds_file',
'RAX_CREDS_FILE', None)
if creds_file is not None:
creds_file = os.path.expanduser(creds_file)
else:
# But if that fails, use the default location of
# ~/.rackspace_cloud_credentials
if os.path.isfile(default_creds_file):
creds_file = default_creds_file
elif not keyring_username:
sys.exit('No value in environment variable %s and/or no '
'credentials file at %s'
% ('RAX_CREDS_FILE', default_creds_file))
identity_type = pyrax.get_setting('identity_type')
pyrax.set_setting('identity_type', identity_type or 'rackspace')
region = pyrax.get_setting('region')
try:
if keyring_username:
pyrax.keyring_auth(keyring_username, region=region)
else:
pyrax.set_credential_file(creds_file, region=region)
except Exception as e:
sys.exit("%s: %s" % (e, e.message))
regions = []
if region:
regions.append(region)
else:
try:
# Ansible 2.3+
region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all',
value_type='list')
except TypeError:
# Ansible 2.2.x and below
# pylint: disable=unexpected-keyword-arg
region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all',
islist=True)
for region in region_list:
region = region.strip().upper()
if region == 'ALL':
regions = pyrax.regions
break
elif region not in pyrax.regions:
sys.exit('Unsupported region %s' % region)
elif region not in regions:
regions.append(region)
return regions
def main():
args = parse_args()
regions = setup()
if args.list:
_list(regions, refresh_cache=args.refresh_cache)
elif args.host:
host(regions, args.host)
sys.exit(0)
p = load_config_file()
if __name__ == '__main__':
main()

@ -1,35 +0,0 @@
# Rudder external inventory script settings
#
[rudder]
# Your Rudder server API URL, typically:
# https://rudder.local/rudder/api
uri = https://rudder.local/rudder/api
# By default, Rudder uses a self-signed certificate. Set this to True
# to disable certificate validation.
disable_ssl_certificate_validation = True
# Your Rudder API token, created in the Web interface.
token = aaabbbccc
# Rudder API version to use, use "latest" for latest available
# version.
version = latest
# Property to use as group name in the output.
# Can generally be "id" or "displayName".
group_name = displayName
# Fail if there are two groups with the same name or two hosts with the
# same hostname in the output.
fail_if_name_collision = True
# We cache the results of Rudder API in a local file
cache_path = /tmp/ansible-rudder.cache
# The number of seconds a cache file is considered valid. After this many
# seconds, a new API call will be made, and the cache file will be updated.
# Set to 0 to disable cache.
cache_max_age = 500

@ -1,296 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2015, Normation SAS
#
# Inspired by the EC2 inventory plugin:
# https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
'''
Rudder external inventory script
=================================
Generates inventory that Ansible can understand by making API request to
a Rudder server. This script is compatible with Rudder 2.10 or later.
The output JSON includes all your Rudder groups, containing the hostnames of
their nodes. Groups and nodes have a variable called rudder_group_id and
rudder_node_id, which is the Rudder internal id of the item, allowing to identify
them uniquely. Hosts variables also include your node properties, which are
key => value properties set by the API and specific to each node.
This script assumes there is an rudder.ini file alongside it. To specify a
different path to rudder.ini, define the RUDDER_INI_PATH environment variable:
export RUDDER_INI_PATH=/path/to/my_rudder.ini
You have to configure your Rudder server information, either in rudder.ini or
by overriding it with environment variables:
export RUDDER_API_VERSION='latest'
export RUDDER_API_TOKEN='my_token'
export RUDDER_API_URI='https://rudder.local/rudder/api'
'''
import sys
import os
import re
import argparse
import httplib2 as http
from time import time
from ansible.module_utils import six
from ansible.module_utils.six.moves import configparser
from ansible.module_utils.six.moves.urllib.parse import urlparse
import json
class RudderInventory(object):
def __init__(self):
''' Main execution path '''
# Empty inventory by default
self.inventory = {}
# Read settings and parse CLI arguments
self.read_settings()
self.parse_cli_args()
# Create connection
self.conn = http.Http(disable_ssl_certificate_validation=self.disable_ssl_validation)
# Cache
if self.args.refresh_cache:
self.update_cache()
elif not self.is_cache_valid():
self.update_cache()
else:
self.load_cache()
data_to_print = {}
if self.args.host:
data_to_print = self.get_host_info(self.args.host)
elif self.args.list:
data_to_print = self.get_list_info()
print(self.json_format_dict(data_to_print, True))
def read_settings(self):
''' Reads the settings from the rudder.ini file '''
if six.PY2:
config = configparser.SafeConfigParser()
else:
config = configparser.ConfigParser()
rudder_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'rudder.ini')
rudder_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('RUDDER_INI_PATH', rudder_default_ini_path)))
config.read(rudder_ini_path)
self.token = os.environ.get('RUDDER_API_TOKEN', config.get('rudder', 'token'))
self.version = os.environ.get('RUDDER_API_VERSION', config.get('rudder', 'version'))
self.uri = os.environ.get('RUDDER_API_URI', config.get('rudder', 'uri'))
self.disable_ssl_validation = config.getboolean('rudder', 'disable_ssl_certificate_validation')
self.group_name = config.get('rudder', 'group_name')
self.fail_if_name_collision = config.getboolean('rudder', 'fail_if_name_collision')
self.cache_path = config.get('rudder', 'cache_path')
self.cache_max_age = config.getint('rudder', 'cache_max_age')
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Rudder inventory')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to Rudder (default: False - use cache files)')
self.args = parser.parse_args()
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path):
mod_time = os.path.getmtime(self.cache_path)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
return True
return False
def load_cache(self):
''' Reads the cache from the cache file sets self.cache '''
cache = open(self.cache_path, 'r')
json_cache = cache.read()
try:
self.inventory = json.loads(json_cache)
except ValueError as e:
self.fail_with_error('Could not parse JSON response from local cache', 'parsing local cache')
def write_cache(self):
''' Writes data in JSON format to a file '''
json_data = self.json_format_dict(self.inventory, True)
cache = open(self.cache_path, 'w')
cache.write(json_data)
cache.close()
def get_nodes(self):
''' Gets the nodes list from Rudder '''
path = '/nodes?select=nodeAndPolicyServer'
result = self.api_call(path)
nodes = {}
for node in result['data']['nodes']:
nodes[node['id']] = {}
nodes[node['id']]['hostname'] = node['hostname']
if 'properties' in node:
nodes[node['id']]['properties'] = node['properties']
else:
nodes[node['id']]['properties'] = []
return nodes
def get_groups(self):
''' Gets the groups list from Rudder '''
path = '/groups'
result = self.api_call(path)
groups = {}
for group in result['data']['groups']:
groups[group['id']] = {'hosts': group['nodeIds'], 'name': self.to_safe(group[self.group_name])}
return groups
def update_cache(self):
''' Fetches the inventory information from Rudder and creates the inventory '''
nodes = self.get_nodes()
groups = self.get_groups()
inventory = {}
for group in groups:
# Check for name collision
if self.fail_if_name_collision:
if groups[group]['name'] in inventory:
self.fail_with_error('Name collision on groups: "%s" appears twice' % groups[group]['name'], 'creating groups')
# Add group to inventory
inventory[groups[group]['name']] = {}
inventory[groups[group]['name']]['hosts'] = []
inventory[groups[group]['name']]['vars'] = {}
inventory[groups[group]['name']]['vars']['rudder_group_id'] = group
for node in groups[group]['hosts']:
# Add node to group
inventory[groups[group]['name']]['hosts'].append(nodes[node]['hostname'])
properties = {}
for node in nodes:
# Check for name collision
if self.fail_if_name_collision:
if nodes[node]['hostname'] in properties:
self.fail_with_error('Name collision on hosts: "%s" appears twice' % nodes[node]['hostname'], 'creating hosts')
# Add node properties to inventory
properties[nodes[node]['hostname']] = {}
properties[nodes[node]['hostname']]['rudder_node_id'] = node
for node_property in nodes[node]['properties']:
properties[nodes[node]['hostname']][self.to_safe(node_property['name'])] = node_property['value']
inventory['_meta'] = {}
inventory['_meta']['hostvars'] = properties
self.inventory = inventory
if self.cache_max_age > 0:
self.write_cache()
def get_list_info(self):
''' Gets inventory information from local cache '''
return self.inventory
def get_host_info(self, hostname):
''' Gets information about a specific host from local cache '''
if hostname in self.inventory['_meta']['hostvars']:
return self.inventory['_meta']['hostvars'][hostname]
else:
return {}
def api_call(self, path):
''' Performs an API request '''
headers = {
'X-API-Token': self.token,
'X-API-Version': self.version,
'Content-Type': 'application/json;charset=utf-8'
}
target = urlparse(self.uri + path)
method = 'GET'
body = ''
try:
response, content = self.conn.request(target.geturl(), method, body, headers)
except Exception:
self.fail_with_error('Error connecting to Rudder server')
try:
data = json.loads(content)
except ValueError as e:
self.fail_with_error('Could not parse JSON response from Rudder API', 'reading API response')
return data
def fail_with_error(self, err_msg, err_operation=None):
''' Logs an error to std err for ansible-playbook to consume and exit '''
if err_operation:
err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format(
err_msg=err_msg, err_operation=err_operation)
sys.stderr.write(err_msg)
sys.exit(1)
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be
used as Ansible variable names '''
return re.sub(r'[^A-Za-z0-9\_]', '_', word)
# Run the script
RudderInventory()

@ -1,37 +0,0 @@
# Ansible dynamic inventory script for Scaleway cloud provider
#
[compute]
# Fetch inventory for regions. If not defined will read the SCALEWAY_REGION environment variable
#
# regions = all
# regions = ams1
# regions = par1, ams1
regions = par1
# Define a Scaleway token to perform required queries on the API
# in order to generate inventory output.
#
[auth]
# Token to authenticate with Scaleway's API.
# If not defined will read the SCALEWAY_TOKEN environment variable
#
api_token = mysecrettoken
# To avoid performing excessive calls to Scaleway API you can define a
# cache for the plugin output. Within the time defined in seconds, latest
# output will be reused. After that time, the cache will be refreshed.
#
[cache]
cache_max_age = 60
cache_dir = '~/.ansible/tmp'
[defaults]
# You may want to use only public IP addresses or private IP addresses.
# You can set public_ip_only configuration to get public IPs only.
# If not defined defaults to retrieving private IP addresses.
#
public_ip_only = false

@ -1,230 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
External inventory script for Scaleway
====================================
Shamelessly copied from an existing inventory script.
This script generates an inventory that Ansible can understand by making API requests to Scaleway API
Requires some python libraries, ensure to have them installed when using this script. (pip install requests https://pypi.org/project/requests/)
Before using this script you may want to modify scaleway.ini config file.
This script generates an Ansible hosts file with these host groups:
<hostname>: Defines host itself with Scaleway's hostname as group name.
<tag>: Contains all hosts which has "<tag>" as tag.
<region>: Contains all hosts which are in the "<region>" region.
all: Contains all hosts defined in Scaleway.
'''
# (c) 2017, Paul B. <paul@bonaud.fr>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import copy
import os
import requests
from ansible.module_utils import six
from ansible.module_utils.six.moves import configparser
import sys
import time
import traceback
import json
EMPTY_GROUP = {
'children': [],
'hosts': []
}
class ScalewayAPI:
REGIONS = ['par1', 'ams1']
def __init__(self, auth_token, region):
self.session = requests.session()
self.session.headers.update({
'User-Agent': 'Ansible Python/%s' % (sys.version.split(' ')[0])
})
self.session.headers.update({
'X-Auth-Token': auth_token.encode('latin1')
})
self.base_url = 'https://cp-%s.scaleway.com' % (region)
def servers(self):
raw = self.session.get('/'.join([self.base_url, 'servers']))
try:
response = raw.json()
return self.get_resource('servers', response, raw)
except ValueError:
return []
def get_resource(self, resource, response, raw):
raw.raise_for_status()
if resource in response:
return response[resource]
else:
raise ValueError(
"Resource %s not found in Scaleway API response" % (resource))
def env_or_param(env_key, param=None, fallback=None):
env_value = os.environ.get(env_key)
if (param, env_value) == (None, None):
return fallback
elif env_value is not None:
return env_value
else:
return param
def save_cache(data, config):
''' saves item to cache '''
dpath = config.get('cache', 'cache_dir')
try:
cache = open('/'.join([dpath, 'scaleway_ansible_inventory.json']), 'w')
cache.write(json.dumps(data))
cache.close()
except IOError as e:
pass # not really sure what to do here
def get_cache(cache_item, config):
''' returns cached item '''
dpath = config.get('cache', 'cache_dir')
inv = {}
try:
cache = open('/'.join([dpath, 'scaleway_ansible_inventory.json']), 'r')
inv = cache.read()
cache.close()
except IOError as e:
pass # not really sure what to do here
return inv
def cache_available(config):
''' checks if we have a 'fresh' cache available for item requested '''
if config.has_option('cache', 'cache_dir'):
dpath = config.get('cache', 'cache_dir')
try:
existing = os.stat(
'/'.join([dpath, 'scaleway_ansible_inventory.json']))
except OSError:
return False
if config.has_option('cache', 'cache_max_age'):
maxage = config.get('cache', 'cache_max_age')
else:
maxage = 60
if (int(time.time()) - int(existing.st_mtime)) <= int(maxage):
return True
return False
def generate_inv_from_api(config):
try:
inventory['scaleway'] = copy.deepcopy(EMPTY_GROUP)
auth_token = None
if config.has_option('auth', 'api_token'):
auth_token = config.get('auth', 'api_token')
auth_token = env_or_param('SCALEWAY_TOKEN', param=auth_token)
if auth_token is None:
sys.stderr.write('ERROR: missing authentication token for Scaleway API')
sys.exit(1)
if config.has_option('compute', 'regions'):
regions = config.get('compute', 'regions')
if regions == 'all':
regions = ScalewayAPI.REGIONS
else:
regions = map(str.strip, regions.split(','))
else:
regions = [
env_or_param('SCALEWAY_REGION', fallback='par1')
]
for region in regions:
api = ScalewayAPI(auth_token, region)
for server in api.servers():
hostname = server['hostname']
if config.has_option('defaults', 'public_ip_only') and config.getboolean('defaults', 'public_ip_only'):
ip = server['public_ip']['address']
else:
ip = server['private_ip']
for server_tag in server['tags']:
if server_tag not in inventory:
inventory[server_tag] = copy.deepcopy(EMPTY_GROUP)
inventory[server_tag]['children'].append(hostname)
if region not in inventory:
inventory[region] = copy.deepcopy(EMPTY_GROUP)
inventory[region]['children'].append(hostname)
inventory['scaleway']['children'].append(hostname)
inventory[hostname] = []
inventory[hostname].append(ip)
return inventory
except Exception:
# Return empty hosts output
traceback.print_exc()
return {'scaleway': {'hosts': []}, '_meta': {'hostvars': {}}}
def get_inventory(config):
''' Reads the inventory from cache or Scaleway api '''
if cache_available(config):
inv = get_cache('scaleway_ansible_inventory.json', config)
else:
inv = generate_inv_from_api(config)
save_cache(inv, config)
return json.dumps(inv)
if __name__ == '__main__':
inventory = {}
# Read config
if six.PY3:
config = configparser.ConfigParser()
else:
config = configparser.SafeConfigParser()
for configfilename in [os.path.abspath(sys.argv[0]).rsplit('.py')[0] + '.ini', 'scaleway.ini']:
if os.path.exists(configfilename):
config.read(configfilename)
break
if cache_available(config):
inventory = get_cache('scaleway_ansible_inventory.json', config)
else:
inventory = get_inventory(config)
# return to ansible
sys.stdout.write(str(inventory))
sys.stdout.flush()

@ -1,112 +0,0 @@
#!/usr/bin/env python
# (c) 2015, Marc Abramowitz <marca@surveymonkey.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Dynamic inventory script which lets you use nodes discovered by Serf
# (https://serfdom.io/).
#
# Requires the `serfclient` Python module from
# https://pypi.org/project/serfclient/
#
# Environment variables
# ---------------------
# - `SERF_RPC_ADDR`
# - `SERF_RPC_AUTH`
#
# These variables are described at https://www.serfdom.io/docs/commands/members.html#_rpc_addr
import argparse
import collections
import os
import sys
# https://pypi.org/project/serfclient/
from serfclient import SerfClient, EnvironmentConfig
import json
_key = 'serf'
def _serf_client():
env = EnvironmentConfig()
return SerfClient(host=env.host, port=env.port, rpc_auth=env.auth_key)
def get_serf_members_data():
return _serf_client().members().body['Members']
def get_nodes(data):
return [node['Name'] for node in data]
def get_groups(data):
groups = collections.defaultdict(list)
for node in data:
for key, value in node['Tags'].items():
groups[value].append(node['Name'])
return groups
def get_meta(data):
meta = {'hostvars': {}}
for node in data:
meta['hostvars'][node['Name']] = node['Tags']
return meta
def print_list():
data = get_serf_members_data()
nodes = get_nodes(data)
groups = get_groups(data)
meta = get_meta(data)
inventory_data = {_key: nodes, '_meta': meta}
inventory_data.update(groups)
print(json.dumps(inventory_data))
def print_host(host):
data = get_serf_members_data()
meta = get_meta(data)
print(json.dumps(meta['hostvars'][host]))
def get_args(args_list):
parser = argparse.ArgumentParser(
description='ansible inventory script reading from serf cluster')
mutex_group = parser.add_mutually_exclusive_group(required=True)
help_list = 'list all hosts from serf cluster'
mutex_group.add_argument('--list', action='store_true', help=help_list)
help_host = 'display variables for a host'
mutex_group.add_argument('--host', help=help_host)
return parser.parse_args(args_list)
def main(args_list):
args = get_args(args_list)
if args.list:
print_list()
if args.host:
print_host(args.host)
if __name__ == '__main__':
main(sys.argv[1:])

@ -1,204 +0,0 @@
#!/usr/bin/env python
"""
SoftLayer external inventory script.
The SoftLayer Python API client is required. Use `pip install softlayer` to install it.
You have a few different options for configuring your username and api_key. You can pass
environment variables (SL_USERNAME and SL_API_KEY). You can also write INI file to
~/.softlayer or /etc/softlayer.conf. For more information see the SL API at:
- https://softlayer-python.readthedocs.io/en/latest/config_file.html
The SoftLayer Python client has a built in command for saving this configuration file
via the command `sl config setup`.
"""
# Copyright (C) 2014 AJ Bourg <aj@ajbourg.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# I found the structure of the ec2.py script very helpful as an example
# as I put this together. Thanks to whoever wrote that script!
#
import SoftLayer
import re
import argparse
import itertools
import json
class SoftLayerInventory(object):
common_items = [
'id',
'globalIdentifier',
'hostname',
'domain',
'fullyQualifiedDomainName',
'primaryBackendIpAddress',
'primaryIpAddress',
'datacenter',
'tagReferences',
'userData.value',
]
vs_items = [
'lastKnownPowerState.name',
'powerState',
'maxCpu',
'maxMemory',
'activeTransaction.transactionStatus[friendlyName,name]',
'status',
]
hw_items = [
'hardwareStatusId',
'processorPhysicalCoreAmount',
'memoryCapacity',
]
def _empty_inventory(self):
return {"_meta": {"hostvars": {}}}
def __init__(self):
'''Main path'''
self.inventory = self._empty_inventory()
self.parse_options()
if self.args.list:
self.get_all_servers()
print(self.json_format_dict(self.inventory, True))
elif self.args.host:
self.get_all_servers()
print(self.json_format_dict(self.inventory["_meta"]["hostvars"][self.args.host], True))
def to_safe(self, word):
'''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups'''
return re.sub(r"[^A-Za-z0-9\-\.]", "_", word)
def push(self, my_dict, key, element):
'''Push an element onto an array that may not have been defined in the dict'''
if key in my_dict:
my_dict[key].append(element)
else:
my_dict[key] = [element]
def parse_options(self):
'''Parse all the arguments from the CLI'''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on SoftLayer')
parser.add_argument('--list', action='store_true', default=False,
help='List instances (default: False)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
self.args = parser.parse_args()
def json_format_dict(self, data, pretty=False):
'''Converts a dict to a JSON object and dumps it as a formatted string'''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
def process_instance(self, instance, instance_type="virtual"):
'''Populate the inventory dictionary with any instance information'''
# only want active instances
if 'status' in instance and instance['status']['name'] != 'Active':
return
# and powered on instances
if 'powerState' in instance and instance['powerState']['name'] != 'Running':
return
# 5 is active for hardware... see https://forums.softlayer.com/forum/softlayer-developer-network/general-discussion/2955-hardwarestatusid
if 'hardwareStatusId' in instance and instance['hardwareStatusId'] != 5:
return
# if there's no IP address, we can't reach it
if 'primaryIpAddress' not in instance:
return
instance['userData'] = instance['userData'][0]['value'] if instance['userData'] else ''
dest = instance['primaryIpAddress']
instance['tags'] = list()
for tag in instance['tagReferences']:
instance['tags'].append(tag['tag']['name'])
del instance['tagReferences']
self.inventory["_meta"]["hostvars"][dest] = instance
# Inventory: group by memory
if 'maxMemory' in instance:
self.push(self.inventory, self.to_safe('memory_' + str(instance['maxMemory'])), dest)
elif 'memoryCapacity' in instance:
self.push(self.inventory, self.to_safe('memory_' + str(instance['memoryCapacity'])), dest)
# Inventory: group by cpu count
if 'maxCpu' in instance:
self.push(self.inventory, self.to_safe('cpu_' + str(instance['maxCpu'])), dest)
elif 'processorPhysicalCoreAmount' in instance:
self.push(self.inventory, self.to_safe('cpu_' + str(instance['processorPhysicalCoreAmount'])), dest)
# Inventory: group by datacenter
self.push(self.inventory, self.to_safe('datacenter_' + instance['datacenter']['name']), dest)
# Inventory: group by hostname
self.push(self.inventory, self.to_safe(instance['hostname']), dest)
# Inventory: group by FQDN
self.push(self.inventory, self.to_safe(instance['fullyQualifiedDomainName']), dest)
# Inventory: group by domain
self.push(self.inventory, self.to_safe(instance['domain']), dest)
# Inventory: group by type (hardware/virtual)
self.push(self.inventory, instance_type, dest)
for tag in instance['tags']:
self.push(self.inventory, tag, dest)
def get_virtual_servers(self):
'''Get all the CCI instances'''
vs = SoftLayer.VSManager(self.client)
mask = "mask[%s]" % ','.join(itertools.chain(self.common_items, self.vs_items))
instances = vs.list_instances(mask=mask)
for instance in instances:
self.process_instance(instance)
def get_physical_servers(self):
'''Get all the hardware instances'''
hw = SoftLayer.HardwareManager(self.client)
mask = "mask[%s]" % ','.join(itertools.chain(self.common_items, self.hw_items))
instances = hw.list_hardware(mask=mask)
for instance in instances:
self.process_instance(instance, 'hardware')
def get_all_servers(self):
self.client = SoftLayer.Client()
self.get_virtual_servers()
self.get_physical_servers()
SoftLayerInventory()

@ -1,16 +0,0 @@
# Put this ini-file in the same directory as spacewalk.py
# Command line options have precedence over options defined in here.
[spacewalk]
# To limit the script on one organization in spacewalk, uncomment org_number
# and fill in the organization ID:
# org_number=2
# To prefix the group names with the organization ID set prefix_org_name=true.
# This is convenient when org_number is not set and you have the same group names
# in multiple organizations within spacewalk
# The prefix is "org_number-"
prefix_org_name=false
# Default cache_age for files created with spacewalk-report is 300sec.
cache_age=300

@ -1,237 +0,0 @@
#!/usr/bin/env python
"""
Spacewalk external inventory script
=================================
Ansible has a feature where instead of reading from /etc/ansible/hosts
as a text file, it can query external programs to obtain the list
of hosts, groups the hosts are in, and even variables to assign to each host.
To use this, copy this file over /etc/ansible/hosts and chmod +x the file.
This, more or less, allows you to keep one central database containing
info about all of your managed instances.
This script is dependent upon the spacealk-reports package being installed
on the same machine. It is basically a CSV-to-JSON converter from the
output of "spacewalk-report system-groups-systems|inventory".
Tested with Ansible 1.9.2 and spacewalk 2.3
"""
#
# Author:: Jon Miller <jonEbird@gmail.com>
# Copyright:: Copyright (c) 2013, Jon Miller
#
# Extended for support of multiple organizations and
# adding the "_meta" dictionary to --list output by
# Bernhard Lichtinger <bernhard.lichtinger@lrz.de> 2015
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import sys
import os
import time
from optparse import OptionParser
import subprocess
import json
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import configparser as ConfigParser
base_dir = os.path.dirname(os.path.realpath(__file__))
default_ini_file = os.path.join(base_dir, "spacewalk.ini")
SW_REPORT = '/usr/bin/spacewalk-report'
CACHE_DIR = os.path.join(base_dir, ".spacewalk_reports")
CACHE_AGE = 300 # 5min
INI_FILE = os.path.expanduser(os.path.expandvars(os.environ.get("SPACEWALK_INI_PATH", default_ini_file)))
# Sanity check
if not os.path.exists(SW_REPORT):
print('Error: %s is required for operation.' % (SW_REPORT), file=sys.stderr)
sys.exit(1)
# Pre-startup work
if not os.path.exists(CACHE_DIR):
os.mkdir(CACHE_DIR)
os.chmod(CACHE_DIR, 0o2775)
# Helper functions
# ------------------------------
def spacewalk_report(name):
"""Yield a dictionary form of each CSV output produced by the specified
spacewalk-report
"""
cache_filename = os.path.join(CACHE_DIR, name)
if not os.path.exists(cache_filename) or \
(time.time() - os.stat(cache_filename).st_mtime) > CACHE_AGE:
# Update the cache
fh = open(cache_filename, 'w')
p = subprocess.Popen([SW_REPORT, name], stdout=fh)
p.wait()
fh.close()
with open(cache_filename, 'r') as f:
lines = f.readlines()
keys = lines[0].strip().split(',')
# add 'spacewalk_' prefix to the keys
keys = ['spacewalk_' + key for key in keys]
for line in lines[1:]:
values = line.strip().split(',')
if len(keys) == len(values):
yield dict(zip(keys, values))
# Options
# ------------------------------
parser = OptionParser(usage="%prog [options] --list | --host <machine>")
parser.add_option('--list', default=False, dest="list", action="store_true",
help="Produce a JSON consumable grouping of servers for Ansible")
parser.add_option('--host', default=None, dest="host",
help="Generate additional host specific details for given host for Ansible")
parser.add_option('-H', '--human', dest="human",
default=False, action="store_true",
help="Produce a friendlier version of either server list or host detail")
parser.add_option('-o', '--org', default=None, dest="org_number",
help="Limit to spacewalk organization number")
parser.add_option('-p', default=False, dest="prefix_org_name", action="store_true",
help="Prefix the group name with the organization number")
(options, args) = parser.parse_args()
# read spacewalk.ini if present
# ------------------------------
if os.path.exists(INI_FILE):
config = ConfigParser.SafeConfigParser()
config.read(INI_FILE)
if config.has_option('spacewalk', 'cache_age'):
CACHE_AGE = config.get('spacewalk', 'cache_age')
if not options.org_number and config.has_option('spacewalk', 'org_number'):
options.org_number = config.get('spacewalk', 'org_number')
if not options.prefix_org_name and config.has_option('spacewalk', 'prefix_org_name'):
options.prefix_org_name = config.getboolean('spacewalk', 'prefix_org_name')
# Generate dictionary for mapping group_id to org_id
# ------------------------------
org_groups = {}
try:
for group in spacewalk_report('system-groups'):
org_groups[group['spacewalk_group_id']] = group['spacewalk_org_id']
except (OSError) as e:
print('Problem executing the command "%s system-groups": %s' %
(SW_REPORT, str(e)), file=sys.stderr)
sys.exit(2)
# List out the known server from Spacewalk
# ------------------------------
if options.list:
# to build the "_meta"-Group with hostvars first create dictionary for later use
host_vars = {}
try:
for item in spacewalk_report('inventory'):
host_vars[item['spacewalk_profile_name']] = dict((key, (value.split(';') if ';' in value else value)) for key, value in item.items())
except (OSError) as e:
print('Problem executing the command "%s inventory": %s' %
(SW_REPORT, str(e)), file=sys.stderr)
sys.exit(2)
groups = {}
meta = {"hostvars": {}}
try:
for system in spacewalk_report('system-groups-systems'):
# first get org_id of system
org_id = org_groups[system['spacewalk_group_id']]
# shall we add the org_id as prefix to the group name:
if options.prefix_org_name:
prefix = org_id + "-"
group_name = prefix + system['spacewalk_group_name']
else:
group_name = system['spacewalk_group_name']
# if we are limited to one organization:
if options.org_number:
if org_id == options.org_number:
if group_name not in groups:
groups[group_name] = set()
groups[group_name].add(system['spacewalk_server_name'])
if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta["hostvars"]:
meta["hostvars"][system['spacewalk_server_name']] = host_vars[system['spacewalk_server_name']]
# or we list all groups and systems:
else:
if group_name not in groups:
groups[group_name] = set()
groups[group_name].add(system['spacewalk_server_name'])
if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta["hostvars"]:
meta["hostvars"][system['spacewalk_server_name']] = host_vars[system['spacewalk_server_name']]
except (OSError) as e:
print('Problem executing the command "%s system-groups-systems": %s' %
(SW_REPORT, str(e)), file=sys.stderr)
sys.exit(2)
if options.human:
for group, systems in iteritems(groups):
print('[%s]\n%s\n' % (group, '\n'.join(systems)))
else:
final = dict([(k, list(s)) for k, s in iteritems(groups)])
final["_meta"] = meta
print(json.dumps(final))
# print(json.dumps(groups))
sys.exit(0)
# Return a details information concerning the spacewalk server
# ------------------------------
elif options.host:
host_details = {}
try:
for system in spacewalk_report('inventory'):
if system['spacewalk_hostname'] == options.host:
host_details = system
break
except (OSError) as e:
print('Problem executing the command "%s inventory": %s' %
(SW_REPORT, str(e)), file=sys.stderr)
sys.exit(2)
if options.human:
print('Host: %s' % options.host)
for k, v in iteritems(host_details):
print(' %s: %s' % (k, '\n '.join(v.split(';'))))
else:
print(json.dumps(dict((key, (value.split(';') if ';' in value else value)) for key, value in host_details.items())))
sys.exit(0)
else:
parser.print_help()
sys.exit(1)

@ -1,131 +0,0 @@
#!/usr/bin/env python
# (c) 2014, Tomas Karasek <tomas.karasek@digile.fi>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Dynamic inventory script which lets you use aliases from ~/.ssh/config.
#
# There were some issues with various Paramiko versions. I took a deeper look
# and tested heavily. Now, ansible parses this alright with Paramiko versions
# 1.7.2 to 1.15.2.
#
# It prints inventory based on parsed ~/.ssh/config. You can refer to hosts
# with their alias, rather than with the IP or hostname. It takes advantage
# of the ansible_ssh_{host,port,user,private_key_file}.
#
# If you have in your .ssh/config:
# Host git
# HostName git.domain.org
# User tkarasek
# IdentityFile /home/tomk/keys/thekey
#
# You can do
# $ ansible git -m ping
#
# Example invocation:
# ssh_config.py --list
# ssh_config.py --host <alias>
import argparse
import os.path
import sys
import json
import paramiko
from ansible.module_utils.common._collections_compat import MutableSequence
SSH_CONF = '~/.ssh/config'
_key = 'ssh_config'
_ssh_to_ansible = [('user', 'ansible_ssh_user'),
('hostname', 'ansible_ssh_host'),
('identityfile', 'ansible_ssh_private_key_file'),
('port', 'ansible_ssh_port')]
def get_config():
if not os.path.isfile(os.path.expanduser(SSH_CONF)):
return {}
with open(os.path.expanduser(SSH_CONF)) as f:
cfg = paramiko.SSHConfig()
cfg.parse(f)
ret_dict = {}
for d in cfg._config:
if isinstance(d['host'], MutableSequence):
alias = d['host'][0]
else:
alias = d['host']
if ('?' in alias) or ('*' in alias):
continue
_copy = dict(d)
del _copy['host']
if 'config' in _copy:
ret_dict[alias] = _copy['config']
else:
ret_dict[alias] = _copy
return ret_dict
def print_list():
cfg = get_config()
meta = {'hostvars': {}}
for alias, attributes in cfg.items():
tmp_dict = {}
for ssh_opt, ans_opt in _ssh_to_ansible:
if ssh_opt in attributes:
# If the attribute is a list, just take the first element.
# Private key is returned in a list for some reason.
attr = attributes[ssh_opt]
if isinstance(attr, MutableSequence):
attr = attr[0]
tmp_dict[ans_opt] = attr
if tmp_dict:
meta['hostvars'][alias] = tmp_dict
print(json.dumps({_key: list(set(meta['hostvars'].keys())), '_meta': meta}))
def print_host(host):
cfg = get_config()
print(json.dumps(cfg[host]))
def get_args(args_list):
parser = argparse.ArgumentParser(
description='ansible inventory script parsing .ssh/config')
mutex_group = parser.add_mutually_exclusive_group(required=True)
help_list = 'list all hosts from .ssh/config inventory'
mutex_group.add_argument('--list', action='store_true', help=help_list)
help_host = 'display variables for a host'
mutex_group.add_argument('--host', help=help_host)
return parser.parse_args(args_list)
def main(args_list):
args = get_args(args_list)
if args.list:
print_list()
if args.host:
print_host(args.host)
if __name__ == '__main__':
main(sys.argv[1:])

@ -1,188 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2016, Hugh Ma <hugh.ma@flextronics.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
# Stacki inventory script
# Configure stacki.yml with proper auth information and place in the following:
# - ../inventory/stacki.yml
# - /etc/stacki/stacki.yml
# - /etc/ansible/stacki.yml
# The stacki.yml file can contain entries for authentication information
# regarding the Stacki front-end node.
#
# use_hostnames uses hostname rather than interface ip as connection
#
#
"""
Example Usage:
List Stacki Nodes
$ ./stack.py --list
Example Configuration:
---
stacki:
auth:
stacki_user: admin
stacki_password: abc12345678910
stacki_endpoint: http://192.168.200.50/stack
use_hostnames: false
"""
import argparse
import os
import sys
import yaml
from distutils.version import StrictVersion
import json
try:
import requests
except Exception:
sys.exit('requests package is required for this inventory script')
CONFIG_FILES = ['/etc/stacki/stacki.yml', '/etc/ansible/stacki.yml']
def stack_auth(params):
endpoint = params['stacki_endpoint']
auth_creds = {'USERNAME': params['stacki_user'],
'PASSWORD': params['stacki_password']}
client = requests.session()
client.get(endpoint)
init_csrf = client.cookies['csrftoken']
header = {'csrftoken': init_csrf, 'X-CSRFToken': init_csrf,
'Content-type': 'application/x-www-form-urlencoded'}
login_endpoint = endpoint + "/login"
login_req = client.post(login_endpoint, data=auth_creds, headers=header)
csrftoken = login_req.cookies['csrftoken']
sessionid = login_req.cookies['sessionid']
auth_creds.update(CSRFTOKEN=csrftoken, SESSIONID=sessionid)
return client, auth_creds
def stack_build_header(auth_creds):
header = {'csrftoken': auth_creds['CSRFTOKEN'],
'X-CSRFToken': auth_creds['CSRFTOKEN'],
'sessionid': auth_creds['SESSIONID'],
'Content-type': 'application/json'}
return header
def stack_host_list(endpoint, header, client):
stack_r = client.post(endpoint, data=json.dumps({"cmd": "list host"}),
headers=header)
return json.loads(stack_r.json())
def stack_net_list(endpoint, header, client):
stack_r = client.post(endpoint, data=json.dumps({"cmd": "list host interface"}),
headers=header)
return json.loads(stack_r.json())
def format_meta(hostdata, intfdata, config):
use_hostnames = config['use_hostnames']
meta = dict(all=dict(hosts=list()),
frontends=dict(hosts=list()),
backends=dict(hosts=list()),
_meta=dict(hostvars=dict()))
# Iterate through list of dicts of hosts and remove
# environment key as it causes conflicts
for host in hostdata:
del host['environment']
meta['_meta']['hostvars'][host['host']] = host
meta['_meta']['hostvars'][host['host']]['interfaces'] = list()
# @bbyhuy to improve readability in next iteration
for intf in intfdata:
if intf['host'] in meta['_meta']['hostvars']:
meta['_meta']['hostvars'][intf['host']]['interfaces'].append(intf)
if intf['default'] is True:
meta['_meta']['hostvars'][intf['host']]['ansible_host'] = intf['ip']
if not use_hostnames:
meta['all']['hosts'].append(intf['ip'])
if meta['_meta']['hostvars'][intf['host']]['appliance'] != 'frontend':
meta['backends']['hosts'].append(intf['ip'])
else:
meta['frontends']['hosts'].append(intf['ip'])
else:
meta['all']['hosts'].append(intf['host'])
if meta['_meta']['hostvars'][intf['host']]['appliance'] != 'frontend':
meta['backends']['hosts'].append(intf['host'])
else:
meta['frontends']['hosts'].append(intf['host'])
return meta
def parse_args():
parser = argparse.ArgumentParser(description='Stacki Inventory Module')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', action='store_true',
help='List active hosts')
group.add_argument('--host', help='List details about the specific host')
return parser.parse_args()
def main():
args = parse_args()
if StrictVersion(requests.__version__) < StrictVersion("2.4.3"):
sys.exit('requests>=2.4.3 is required for this inventory script')
try:
config_files = CONFIG_FILES
config_files.append(os.path.dirname(os.path.realpath(__file__)) + '/stacki.yml')
config = None
for cfg_file in config_files:
if os.path.isfile(cfg_file):
stream = open(cfg_file, 'r')
config = yaml.safe_load(stream)
break
if not config:
sys.stderr.write("No config file found at {0}\n".format(config_files))
sys.exit(1)
client, auth_creds = stack_auth(config['stacki']['auth'])
header = stack_build_header(auth_creds)
host_list = stack_host_list(config['stacki']['auth']['stacki_endpoint'], header, client)
intf_list = stack_net_list(config['stacki']['auth']['stacki_endpoint'], header, client)
final_meta = format_meta(host_list, intf_list, config)
print(json.dumps(final_meta, indent=4))
except Exception as e:
sys.stderr.write('%s\n' % e.message)
sys.exit(1)
sys.exit(0)
if __name__ == '__main__':
main()

@ -1,7 +0,0 @@
---
stacki:
auth:
stacki_user: admin
stacki_password: GhYgWut1hfGbbnstmbW3m-bJbeME-3EvC20rF1LHrDM
stacki_endpoint: http://192.168.200.50/stack
use_hostnames: false

@ -1,131 +0,0 @@
#!/usr/bin/env python
"""
Vagrant external inventory script. Automatically finds the IP of the booted vagrant vm(s), and
returns it under the host group 'vagrant'
Example Vagrant configuration using this script:
config.vm.provision :ansible do |ansible|
ansible.playbook = "./provision/your_playbook.yml"
ansible.inventory_path = "./provision/inventory/vagrant.py"
ansible.verbose = true
end
"""
# Copyright (C) 2013 Mark Mandel <mark@compoundtheory.com>
# 2015 Igor Khomyakov <homyakov@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Thanks to the spacewalk.py inventory script for giving me the basic structure
# of this.
#
import sys
import os.path
import subprocess
import re
from paramiko import SSHConfig
from optparse import OptionParser
from collections import defaultdict
import json
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves import StringIO
_group = 'vagrant' # a default group
_ssh_to_ansible = [('user', 'ansible_user'),
('hostname', 'ansible_host'),
('identityfile', 'ansible_ssh_private_key_file'),
('port', 'ansible_port')]
# Options
# ------------------------------
parser = OptionParser(usage="%prog [options] --list | --host <machine>")
parser.add_option('--list', default=False, dest="list", action="store_true",
help="Produce a JSON consumable grouping of Vagrant servers for Ansible")
parser.add_option('--host', default=None, dest="host",
help="Generate additional host specific details for given host for Ansible")
(options, args) = parser.parse_args()
#
# helper functions
#
# get all the ssh configs for all boxes in an array of dictionaries.
def get_ssh_config():
return dict((k, get_a_ssh_config(k)) for k in list_running_boxes())
# list all the running boxes
def list_running_boxes():
output = to_text(subprocess.check_output(["vagrant", "status"]), errors='surrogate_or_strict').split('\n')
boxes = []
for line in output:
matcher = re.search(r"([^\s]+)[\s]+running \(.+", line)
if matcher:
boxes.append(matcher.group(1))
return boxes
# get the ssh config for a single box
def get_a_ssh_config(box_name):
"""Gives back a map of all the machine's ssh configurations"""
output = to_text(subprocess.check_output(["vagrant", "ssh-config", box_name]), errors='surrogate_or_strict')
config = SSHConfig()
config.parse(StringIO(output))
host_config = config.lookup(box_name)
# man 5 ssh_config:
# > It is possible to have multiple identity files ...
# > all these identities will be tried in sequence.
for id in host_config['identityfile']:
if os.path.isfile(id):
host_config['identityfile'] = id
return dict((v, host_config[k]) for k, v in _ssh_to_ansible)
# List out servers that vagrant has running
# ------------------------------
if options.list:
ssh_config = get_ssh_config()
meta = defaultdict(dict)
for host in ssh_config:
meta['hostvars'][host] = ssh_config[host]
print(json.dumps({_group: list(ssh_config.keys()), '_meta': meta}))
sys.exit(0)
# Get out the host details
# ------------------------------
elif options.host:
print(json.dumps(get_a_ssh_config(options.host)))
sys.exit(0)
# Print out help
# ------------------------------
else:
parser.print_help()
sys.exit(0)

@ -1,117 +0,0 @@
#!/usr/bin/env python
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import sys
from subprocess import Popen, PIPE
import json
class SetEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
return json.JSONEncoder.default(self, obj)
VBOX = "VBoxManage"
def get_hosts(host=None):
returned = {}
try:
if host:
p = Popen([VBOX, 'showvminfo', host], stdout=PIPE)
else:
returned = {'all': set(), '_metadata': {}}
p = Popen([VBOX, 'list', '-l', 'vms'], stdout=PIPE)
except Exception:
sys.exit(1)
hostvars = {}
prevkey = pref_k = ''
for line in p.stdout.readlines():
try:
k, v = line.split(':', 1)
except Exception:
continue
if k == '':
continue
v = v.strip()
if k.startswith('Name'):
if v not in hostvars:
curname = v
hostvars[curname] = {}
try: # try to get network info
x = Popen([VBOX, 'guestproperty', 'get', curname, "/VirtualBox/GuestInfo/Net/0/V4/IP"], stdout=PIPE)
ipinfo = x.stdout.read()
if 'Value' in ipinfo:
a, ip = ipinfo.split(':', 1)
hostvars[curname]['ansible_ssh_host'] = ip.strip()
except Exception:
pass
continue
if not host:
if k == 'Groups':
for group in v.split('/'):
if group:
if group not in returned:
returned[group] = set()
returned[group].add(curname)
returned['all'].add(curname)
continue
pref_k = 'vbox_' + k.strip().replace(' ', '_')
if k.startswith(' '):
if prevkey not in hostvars[curname]:
hostvars[curname][prevkey] = {}
hostvars[curname][prevkey][pref_k] = v
else:
if v != '':
hostvars[curname][pref_k] = v
prevkey = pref_k
if not host:
returned['_metadata']['hostvars'] = hostvars
else:
returned = hostvars[host]
return returned
if __name__ == '__main__':
inventory = {}
hostname = None
if len(sys.argv) > 1:
if sys.argv[1] == "--host":
hostname = sys.argv[2]
if hostname:
inventory = get_hosts(hostname)
else:
inventory = get_hosts()
sys.stdout.write(json.dumps(inventory, indent=2, cls=SetEncoder))

@ -1,20 +0,0 @@
# Ansible Zabbix external inventory script settings
#
[zabbix]
# Server location
server = http://zabbix.example.com/zabbix
# Login
username = admin
password = zabbix
# Verify the server's SSL certificate
validate_certs = True
# Read zabbix inventory per host
read_host_inventory = True
# Set ansible_ssh_host based on first interface settings
use_host_interface = True

@ -1,196 +0,0 @@
#!/usr/bin/env python
# (c) 2013, Greg Buehler
# (c) 2018, Filippo Ferrazini
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
"""
Zabbix Server external inventory script.
========================================
Returns hosts and hostgroups from Zabbix Server.
If you want to run with --limit against a host group with space in the
name, use asterisk. For example --limit="Linux*servers".
Configuration is read from `zabbix.ini`.
Tested with Zabbix Server 2.0.6, 3.2.3 and 3.4.
"""
from __future__ import print_function
import os
import sys
import argparse
from ansible.module_utils.six.moves import configparser
try:
from zabbix_api import ZabbixAPI
except Exception:
print("Error: Zabbix API library must be installed: pip install zabbix-api.",
file=sys.stderr)
sys.exit(1)
import json
class ZabbixInventory(object):
def read_settings(self):
config = configparser.SafeConfigParser()
conf_path = './zabbix.ini'
if not os.path.exists(conf_path):
conf_path = os.path.dirname(os.path.realpath(__file__)) + '/zabbix.ini'
if os.path.exists(conf_path):
config.read(conf_path)
# server
if config.has_option('zabbix', 'server'):
self.zabbix_server = config.get('zabbix', 'server')
# login
if config.has_option('zabbix', 'username'):
self.zabbix_username = config.get('zabbix', 'username')
if config.has_option('zabbix', 'password'):
self.zabbix_password = config.get('zabbix', 'password')
# ssl certs
if config.has_option('zabbix', 'validate_certs'):
if config.get('zabbix', 'validate_certs') in ['false', 'False', False]:
self.validate_certs = False
# host inventory
if config.has_option('zabbix', 'read_host_inventory'):
if config.get('zabbix', 'read_host_inventory') in ['true', 'True', True]:
self.read_host_inventory = True
# host interface
if config.has_option('zabbix', 'use_host_interface'):
if config.get('zabbix', 'use_host_interface') in ['false', 'False', False]:
self.use_host_interface = False
def read_cli(self):
parser = argparse.ArgumentParser()
parser.add_argument('--host')
parser.add_argument('--list', action='store_true')
self.options = parser.parse_args()
def hoststub(self):
return {
'hosts': []
}
def get_host(self, api, name):
api_query = {'output': 'extend', 'selectGroups': 'extend', "filter": {"host": [name]}}
if self.use_host_interface:
api_query['selectInterfaces'] = ['useip', 'ip', 'dns']
if self.read_host_inventory:
api_query['selectInventory'] = "extend"
data = {'ansible_ssh_host': name}
if self.use_host_interface or self.read_host_inventory:
try:
hosts_data = api.host.get(api_query)[0]
if 'interfaces' in hosts_data:
# use first interface only
if hosts_data['interfaces'][0]['useip'] == 0:
data['ansible_ssh_host'] = hosts_data['interfaces'][0]['dns']
else:
data['ansible_ssh_host'] = hosts_data['interfaces'][0]['ip']
if ('inventory' in hosts_data) and (hosts_data['inventory']):
data.update(hosts_data['inventory'])
except IndexError:
# Host not found in zabbix
pass
return data
def get_list(self, api):
api_query = {'output': 'extend', 'selectGroups': 'extend'}
if self.use_host_interface:
api_query['selectInterfaces'] = ['useip', 'ip', 'dns']
if self.read_host_inventory:
api_query['selectInventory'] = "extend"
hosts_data = api.host.get(api_query)
data = {'_meta': {'hostvars': {}}}
data[self.defaultgroup] = self.hoststub()
for host in hosts_data:
hostname = host['name']
hostvars = dict()
data[self.defaultgroup]['hosts'].append(hostname)
for group in host['groups']:
groupname = group['name']
if groupname not in data:
data[groupname] = self.hoststub()
data[groupname]['hosts'].append(hostname)
if 'interfaces' in host:
# use first interface only
if host['interfaces'][0]['useip'] == 0:
hostvars['ansible_ssh_host'] = host['interfaces'][0]['dns']
else:
hostvars['ansible_ssh_host'] = host['interfaces'][0]['ip']
if ('inventory' in host) and (host['inventory']):
hostvars.update(host['inventory'])
data['_meta']['hostvars'][hostname] = hostvars
return data
def __init__(self):
self.defaultgroup = 'group_all'
self.zabbix_server = None
self.zabbix_username = None
self.zabbix_password = None
self.validate_certs = True
self.read_host_inventory = False
self.use_host_interface = True
self.meta = {}
self.read_settings()
self.read_cli()
if self.zabbix_server and self.zabbix_username:
try:
api = ZabbixAPI(server=self.zabbix_server, validate_certs=self.validate_certs)
api.login(user=self.zabbix_username, password=self.zabbix_password)
# zabbix_api tries to exit if it cannot parse what the zabbix server returned
# so we have to use SystemExit here
except (Exception, SystemExit) as e:
print("Error: Could not login to Zabbix server. Check your zabbix.ini.", file=sys.stderr)
sys.exit(1)
if self.options.host:
data = self.get_host(api, self.options.host)
print(json.dumps(data, indent=2))
elif self.options.list:
data = self.get_list(api)
print(json.dumps(data, indent=2))
else:
print("usage: --list ..OR.. --host <hostname>", file=sys.stderr)
sys.exit(1)
else:
print("Error: Configuration of server and credentials are required. See zabbix.ini.", file=sys.stderr)
sys.exit(1)
ZabbixInventory()

@ -1,43 +0,0 @@
#!/usr/bin/env python
# (c) 2015, Dagobert Michelsen <dam@baltic-online.de>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from subprocess import Popen, PIPE
import sys
import json
result = {}
result['all'] = {}
pipe = Popen(['zoneadm', 'list', '-ip'], stdout=PIPE, universal_newlines=True)
result['all']['hosts'] = []
for l in pipe.stdout.readlines():
# 1:work:running:/zones/work:3126dc59-9a07-4829-cde9-a816e4c5040e:native:shared
s = l.split(':')
if s[1] != 'global':
result['all']['hosts'].append(s[1])
result['all']['vars'] = {}
result['all']['vars']['ansible_connection'] = 'zone'
if len(sys.argv) == 2 and sys.argv[1] == '--list':
print(json.dumps(result))
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
print(json.dumps({'ansible_connection': 'zone'}))
else:
sys.stderr.write("Need an argument, either --list or --host <host>\n")

@ -1,10 +0,0 @@
[azure_keyvault] # Used with Azure KeyVault
vault_name=django-keyvault
secret_name=vaultpw
secret_version=9k1e6c7367b33eac8ee241b3698009f3
[azure] # Used by Dynamic Inventory
group_by_resource_group=yes
group_by_location=yes
group_by_security_group=yes
group_by_tag=yes

@ -1,592 +0,0 @@
#!/usr/bin/env python
#
# This script borrows a great deal of code from the azure_rm.py dynamic inventory script
# that is packaged with Ansible. This can be found in the Ansible GitHub project at:
# https://github.com/ansible/ansible/blob/devel/contrib/inventory/azure_rm.py
#
# The Azure Dynamic Inventory script was written by:
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
# Altered/Added for Vault functionality:
# Austin Hobbs, GitHub: @OxHobbs
'''
Ansible Vault Password with Azure Key Vault Secret Script
=========================================================
This script is designed to be used with Ansible Vault. It provides the
capability to provide this script as the password file to the ansible-vault
command. This script uses the Azure Python SDK. For instruction on installing
the Azure Python SDK see http://azure-sdk-for-python.readthedocs.org/
Authentication
--------------
The order of precedence is command line arguments, environment variables,
and finally the [default] profile found in ~/.azure/credentials for all
authentication parameters.
If using a credentials file, it should be an ini formatted file with one or
more sections, which we refer to as profiles. The script looks for a
[default] section, if a profile is not specified either on the command line
or with an environment variable. The keys in a profile will match the
list of command line arguments below.
For command line arguments and environment variables specify a profile found
in your ~/.azure/credentials file, or a service principal or Active Directory
user.
Command line arguments:
- profile
- client_id
- secret
- subscription_id
- tenant
- ad_user
- password
- cloud_environment
- adfs_authority_url
- vault-name
- secret-name
- secret-version
Environment variables:
- AZURE_PROFILE
- AZURE_CLIENT_ID
- AZURE_SECRET
- AZURE_SUBSCRIPTION_ID
- AZURE_TENANT
- AZURE_AD_USER
- AZURE_PASSWORD
- AZURE_CLOUD_ENVIRONMENT
- AZURE_ADFS_AUTHORITY_URL
- AZURE_VAULT_NAME
- AZURE_VAULT_SECRET_NAME
- AZURE_VAULT_SECRET_VERSION
Vault
-----
The order of precedence of Azure Key Vault Secret information is the same.
Command line arguments, environment variables, and finally the azure_vault.ini
file with the [azure_keyvault] section.
azure_vault.ini (or azure_rm.ini if merged with Azure Dynamic Inventory Script)
------------------------------------------------------------------------------
As mentioned above, you can control execution using environment variables or a .ini file. A sample
azure_vault.ini is included. The name of the .ini file is the basename of the inventory script (in this case
'azure_vault') with a .ini extension. It also assumes the .ini file is alongside the script. To specify
a different path for the .ini file, define the AZURE_VAULT_INI_PATH environment variable:
export AZURE_VAULT_INI_PATH=/path/to/custom.ini
or
export AZURE_VAULT_INI_PATH=[same path as azure_rm.ini if merged]
__NOTE__: If using the azure_rm.py dynamic inventory script, it is possible to use the same .ini
file for both the azure_rm dynamic inventory and the azure_vault password file. Simply add a section
named [azure_keyvault] to the ini file with the following properties: vault_name, secret_name and
secret_version.
Examples:
---------
Validate the vault_pw script with Python
$ python azure_vault.py -n mydjangovault -s vaultpw -v 6b6w7f7252b44eac8ee726b3698009f3
$ python azure_vault.py --vault-name 'mydjangovault' --secret-name 'vaultpw' \
--secret-version 6b6w7f7252b44eac8ee726b3698009f3
Use with a playbook
$ ansible-playbook -i ./azure_rm.py my_playbook.yml --limit galaxy-qa --vault-password-file ./azure_vault.py
Insecure Platform Warning
-------------------------
If you receive InsecurePlatformWarning from urllib3, install the
requests security packages:
pip install requests[security]
author:
- Chris Houseknecht (@chouseknecht)
- Matt Davis (@nitzmahone)
- Austin Hobbs (@OxHobbs)
Company: Ansible by Red Hat, Microsoft
Version: 0.1.0
'''
import argparse
import os
import re
import sys
import inspect
from azure.keyvault import KeyVaultClient
from ansible.module_utils.six.moves import configparser as cp
from os.path import expanduser
import ansible.module_utils.six.moves.urllib.parse as urlparse
HAS_AZURE = True
HAS_AZURE_EXC = None
HAS_AZURE_CLI_CORE = True
CLIError = None
try:
from msrestazure.azure_active_directory import AADTokenCredentials
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_active_directory import MSIAuthentication
from msrestazure import azure_cloud
from azure.mgmt.compute import __version__ as azure_compute_version
from azure.common import AzureMissingResourceHttpError, AzureHttpError
from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials
from azure.mgmt.network import NetworkManagementClient
from azure.mgmt.resource.resources import ResourceManagementClient
from azure.mgmt.resource.subscriptions import SubscriptionClient
from azure.mgmt.compute import ComputeManagementClient
from adal.authentication_context import AuthenticationContext
except ImportError as exc:
HAS_AZURE_EXC = exc
HAS_AZURE = False
try:
from azure.cli.core.util import CLIError
from azure.common.credentials import get_azure_cli_credentials, get_cli_profile
from azure.common.cloud import get_cli_active_cloud
except ImportError:
HAS_AZURE_CLI_CORE = False
CLIError = Exception
try:
from ansible.release import __version__ as ansible_version
except ImportError:
ansible_version = 'unknown'
AZURE_CREDENTIAL_ENV_MAPPING = dict(
profile='AZURE_PROFILE',
subscription_id='AZURE_SUBSCRIPTION_ID',
client_id='AZURE_CLIENT_ID',
secret='AZURE_SECRET',
tenant='AZURE_TENANT',
ad_user='AZURE_AD_USER',
password='AZURE_PASSWORD',
cloud_environment='AZURE_CLOUD_ENVIRONMENT',
adfs_authority_url='AZURE_ADFS_AUTHORITY_URL'
)
AZURE_VAULT_SETTINGS = dict(
vault_name='AZURE_VAULT_NAME',
secret_name='AZURE_VAULT_SECRET_NAME',
secret_version='AZURE_VAULT_SECRET_VERSION',
)
AZURE_MIN_VERSION = "2.0.0"
ANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ansible_version)
class AzureRM(object):
def __init__(self, args):
self._args = args
self._cloud_environment = None
self._compute_client = None
self._resource_client = None
self._network_client = None
self._adfs_authority_url = None
self._vault_client = None
self._resource = None
self.debug = False
if args.debug:
self.debug = True
self.credentials = self._get_credentials(args)
if not self.credentials:
self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
"or define a profile in ~/.azure/credentials.")
# if cloud_environment specified, look up/build Cloud object
raw_cloud_env = self.credentials.get('cloud_environment')
if not raw_cloud_env:
self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD # SDK default
else:
# try to look up "well-known" values via the name attribute on azure_cloud members
all_clouds = [x[1] for x in inspect.getmembers(azure_cloud) if isinstance(x[1], azure_cloud.Cloud)]
matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env]
if len(matched_clouds) == 1:
self._cloud_environment = matched_clouds[0]
elif len(matched_clouds) > 1:
self.fail("Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'".format(
raw_cloud_env))
else:
if not urlparse.urlparse(raw_cloud_env).scheme:
self.fail("cloud_environment must be an endpoint discovery URL or one of {0}".format(
[x.name for x in all_clouds]))
try:
self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(raw_cloud_env)
except Exception as e:
self.fail("cloud_environment {0} could not be resolved: {1}".format(raw_cloud_env, e.message))
if self.credentials.get('subscription_id', None) is None:
self.fail("Credentials did not include a subscription_id value.")
self.log("setting subscription_id")
self.subscription_id = self.credentials['subscription_id']
# get authentication authority
# for adfs, user could pass in authority or not.
# for others, use default authority from cloud environment
if self.credentials.get('adfs_authority_url'):
self._adfs_authority_url = self.credentials.get('adfs_authority_url')
else:
self._adfs_authority_url = self._cloud_environment.endpoints.active_directory
# get resource from cloud environment
self._resource = self._cloud_environment.endpoints.active_directory_resource_id
if self.credentials.get('credentials'):
self.azure_credentials = self.credentials.get('credentials')
elif self.credentials.get('client_id') and self.credentials.get('secret') and self.credentials.get('tenant'):
self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'],
secret=self.credentials['secret'],
tenant=self.credentials['tenant'],
cloud_environment=self._cloud_environment)
elif self.credentials.get('ad_user') is not None and \
self.credentials.get('password') is not None and \
self.credentials.get('client_id') is not None and \
self.credentials.get('tenant') is not None:
self.azure_credentials = self.acquire_token_with_username_password(
self._adfs_authority_url,
self._resource,
self.credentials['ad_user'],
self.credentials['password'],
self.credentials['client_id'],
self.credentials['tenant'])
elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None:
tenant = self.credentials.get('tenant')
if not tenant:
tenant = 'common'
self.azure_credentials = UserPassCredentials(self.credentials['ad_user'],
self.credentials['password'],
tenant=tenant,
cloud_environment=self._cloud_environment)
else:
self.fail("Failed to authenticate with provided credentials. Some attributes were missing. "
"Credentials must include client_id, secret and tenant or ad_user and password, or "
"ad_user, password, client_id, tenant and adfs_authority_url(optional) for ADFS authentication, "
"or be logged in using AzureCLI.")
def log(self, msg):
if self.debug:
print(msg + u'\n')
def fail(self, msg):
raise Exception(msg)
def _get_profile(self, profile="default"):
path = expanduser("~")
path += "/.azure/credentials"
try:
config = cp.ConfigParser()
config.read(path)
except Exception as exc:
self.fail("Failed to access {0}. Check that the file exists and you have read "
"access. {1}".format(path, str(exc)))
credentials = dict()
for key in AZURE_CREDENTIAL_ENV_MAPPING:
try:
credentials[key] = config.get(profile, key, raw=True)
except Exception:
pass
if credentials.get('client_id') is not None or credentials.get('ad_user') is not None:
return credentials
return None
def _get_env_credentials(self):
env_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
env_credentials[attribute] = os.environ.get(env_variable, None)
if env_credentials['profile'] is not None:
credentials = self._get_profile(env_credentials['profile'])
return credentials
if env_credentials['client_id'] is not None or env_credentials['ad_user'] is not None:
return env_credentials
return None
def _get_azure_cli_credentials(self):
credentials, subscription_id = get_azure_cli_credentials()
cloud_environment = get_cli_active_cloud()
cli_credentials = {
'credentials': credentials,
'subscription_id': subscription_id,
'cloud_environment': cloud_environment
}
return cli_credentials
def _get_msi_credentials(self, subscription_id_param=None):
credentials = MSIAuthentication()
try:
# try to get the subscription in MSI to test whether MSI is enabled
subscription_client = SubscriptionClient(credentials)
subscription = next(subscription_client.subscriptions.list())
subscription_id = str(subscription.subscription_id)
return {
'credentials': credentials,
'subscription_id': subscription_id_param or subscription_id
}
except Exception as exc:
return None
def _get_credentials(self, params):
# Get authentication credentials.
# Precedence: cmd line parameters-> environment variables-> default profile in ~/.azure/credentials.
self.log('Getting credentials')
arg_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
arg_credentials[attribute] = getattr(params, attribute)
# try module params
if arg_credentials['profile'] is not None:
self.log('Retrieving credentials with profile parameter.')
credentials = self._get_profile(arg_credentials['profile'])
return credentials
if arg_credentials['client_id'] is not None:
self.log('Received credentials from parameters.')
return arg_credentials
if arg_credentials['ad_user'] is not None:
self.log('Received credentials from parameters.')
return arg_credentials
# try environment
env_credentials = self._get_env_credentials()
if env_credentials:
self.log('Received credentials from env.')
return env_credentials
# try default profile from ~./azure/credentials
default_credentials = self._get_profile()
if default_credentials:
self.log('Retrieved default profile credentials from ~/.azure/credentials.')
return default_credentials
msi_credentials = self._get_msi_credentials(arg_credentials.get('subscription_id'))
if msi_credentials:
self.log('Retrieved credentials from MSI.')
return msi_credentials
try:
if HAS_AZURE_CLI_CORE:
self.log('Retrieving credentials from AzureCLI profile')
cli_credentials = self._get_azure_cli_credentials()
return cli_credentials
except CLIError as ce:
self.log('Error getting AzureCLI profile credentials - {0}'.format(ce))
return None
def acquire_token_with_username_password(self, authority, resource, username, password, client_id, tenant):
authority_uri = authority
if tenant is not None:
authority_uri = authority + '/' + tenant
context = AuthenticationContext(authority_uri)
token_response = context.acquire_token_with_username_password(resource, username, password, client_id)
return AADTokenCredentials(token_response)
def _register(self, key):
try:
# We have to perform the one-time registration here. Otherwise, we receive an error the first
# time we attempt to use the requested client.
resource_client = self.rm_client
resource_client.providers.register(key)
except Exception as exc:
self.log("One-time registration of {0} failed - {1}".format(key, str(exc)))
self.log("You might need to register {0} using an admin account".format(key))
self.log(("To register a provider using the Python CLI: "
"https://docs.microsoft.com/azure/azure-resource-manager/"
"resource-manager-common-deployment-errors#noregisteredproviderfound"))
def get_mgmt_svc_client(self, client_type, base_url, api_version):
client = client_type(self.azure_credentials,
self.subscription_id,
base_url=base_url,
api_version=api_version)
client.config.add_user_agent(ANSIBLE_USER_AGENT)
return client
def get_vault_client(self):
return KeyVaultClient(self.azure_credentials)
def get_vault_suffix(self):
return self._cloud_environment.suffixes.keyvault_dns
@property
def network_client(self):
self.log('Getting network client')
if not self._network_client:
self._network_client = self.get_mgmt_svc_client(NetworkManagementClient,
self._cloud_environment.endpoints.resource_manager,
'2017-06-01')
self._register('Microsoft.Network')
return self._network_client
@property
def rm_client(self):
self.log('Getting resource manager client')
if not self._resource_client:
self._resource_client = self.get_mgmt_svc_client(ResourceManagementClient,
self._cloud_environment.endpoints.resource_manager,
'2017-05-10')
return self._resource_client
@property
def compute_client(self):
self.log('Getting compute client')
if not self._compute_client:
self._compute_client = self.get_mgmt_svc_client(ComputeManagementClient,
self._cloud_environment.endpoints.resource_manager,
'2017-03-30')
self._register('Microsoft.Compute')
return self._compute_client
@property
def vault_client(self):
self.log('Getting the Key Vault client')
if not self._vault_client:
self._vault_client = self.get_vault_client()
return self._vault_client
class AzureKeyVaultSecret:
def __init__(self):
self._args = self._parse_cli_args()
try:
rm = AzureRM(self._args)
except Exception as e:
sys.exit("{0}".format(str(e)))
self._get_vault_settings()
if self._args.vault_name:
self.vault_name = self._args.vault_name
if self._args.secret_name:
self.secret_name = self._args.secret_name
if self._args.secret_version:
self.secret_version = self._args.secret_version
self._vault_suffix = rm.get_vault_suffix()
self._vault_client = rm.vault_client
print(self.get_password_from_vault())
def _parse_cli_args(self):
parser = argparse.ArgumentParser(
description='Obtain the vault password used to secure your Ansilbe secrets'
)
parser.add_argument('-n', '--vault-name', action='store', help='Name of Azure Key Vault')
parser.add_argument('-s', '--secret-name', action='store',
help='Name of the secret stored in Azure Key Vault')
parser.add_argument('-v', '--secret-version', action='store',
help='Version of the secret to be retrieved')
parser.add_argument('--debug', action='store_true', default=False,
help='Send the debug messages to STDOUT')
parser.add_argument('--profile', action='store',
help='Azure profile contained in ~/.azure/credentials')
parser.add_argument('--subscription_id', action='store',
help='Azure Subscription Id')
parser.add_argument('--client_id', action='store',
help='Azure Client Id ')
parser.add_argument('--secret', action='store',
help='Azure Client Secret')
parser.add_argument('--tenant', action='store',
help='Azure Tenant Id')
parser.add_argument('--ad_user', action='store',
help='Active Directory User')
parser.add_argument('--password', action='store',
help='password')
parser.add_argument('--adfs_authority_url', action='store',
help='Azure ADFS authority url')
parser.add_argument('--cloud_environment', action='store',
help='Azure Cloud Environment name or metadata discovery URL')
return parser.parse_args()
def get_password_from_vault(self):
vault_url = 'https://{0}{1}'.format(self.vault_name, self._vault_suffix)
secret = self._vault_client.get_secret(vault_url, self.secret_name, self.secret_version)
return secret.value
def _get_vault_settings(self):
env_settings = self._get_vault_env_settings()
if None not in set(env_settings.values()):
for key in AZURE_VAULT_SETTINGS:
setattr(self, key, env_settings.get(key, None))
else:
file_settings = self._load_vault_settings()
if not file_settings:
return
for key in AZURE_VAULT_SETTINGS:
if file_settings.get(key):
setattr(self, key, file_settings.get(key))
def _get_vault_env_settings(self):
env_settings = dict()
for attribute, env_variable in AZURE_VAULT_SETTINGS.items():
env_settings[attribute] = os.environ.get(env_variable, None)
return env_settings
def _load_vault_settings(self):
basename = os.path.splitext(os.path.basename(__file__))[0]
default_path = os.path.join(os.path.dirname(__file__), (basename + '.ini'))
path = os.path.expanduser(os.path.expandvars(os.environ.get('AZURE_VAULT_INI_PATH', default_path)))
config = None
settings = None
try:
config = cp.ConfigParser()
config.read(path)
except Exception:
pass
if config is not None:
settings = dict()
for key in AZURE_VAULT_SETTINGS:
try:
settings[key] = config.get('azure_keyvault', key, raw=True)
except Exception:
pass
return settings
def main():
if not HAS_AZURE:
sys.exit("The Azure python sdk is not installed (try `pip install 'azure>={0}' --upgrade`) - {1}".format(
AZURE_MIN_VERSION, HAS_AZURE_EXC))
AzureKeyVaultSecret()
if __name__ == '__main__':
main()

@ -1,147 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2014, Matt Martz <matt@sivel.net>
# (c) 2016, Justin Mayer <https://justinmayer.com/>
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# =============================================================================
#
# This script is to be used with ansible-vault's --vault-id arg
# to retrieve the vault password via your OS's native keyring application.
#
# This file *MUST* be saved with executable permissions. Otherwise, Ansible
# will try to parse as a password file and display: "ERROR! Decryption failed"
#
# The `keyring` Python module is required: https://pypi.org/project/keyring/
#
# By default, this script will store the specified password in the keyring of
# the user that invokes the script. To specify a user keyring, add a [vault]
# section to your ansible.cfg file with a 'username' option. Example:
#
# [vault]
# username = 'ansible-vault'
#
# In useage like:
#
# ansible-vault --vault-id keyring_id@contrib/vault/vault-keyring-client.py view some_encrypted_file
#
# --vault-id will call this script like:
#
# contrib/vault/vault-keyring-client.py --vault-id keyring_id
#
# That will retrieve the password from users keyring for the
# keyring service 'keyring_id'. The equilivent of:
#
# keyring get keyring_id $USER
#
# If no vault-id name is specified to ansible command line, the vault-keyring-client.py
# script will be called without a '--vault-id' and will default to the keyring service 'ansible'
# This is equilivent to:
#
# keyring get ansible $USER
#
# You can configure the `vault_password_file` option in ansible.cfg:
#
# [defaults]
# ...
# vault_password_file = /path/to/vault-keyring-client.py
# ...
#
# To set your password, `cd` to your project directory and run:
#
# # will use default keyring service / vault-id of 'ansible'
# /path/to/vault-keyring-client.py --set
#
# or to specify the keyring service / vault-id of 'my_ansible_secret':
#
# /path/to/vault-keyring-client.py --vault-id my_ansible_secret --set
#
# If you choose not to configure the path to `vault_password_file` in
# ansible.cfg, your `ansible-playbook` command might look like:
#
# ansible-playbook --vault-id=keyring_id@/path/to/vault-keyring-client.py site.yml
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
import argparse
import sys
import getpass
import keyring
from ansible.config.manager import ConfigManager
KEYNAME_UNKNOWN_RC = 2
def build_arg_parser():
parser = argparse.ArgumentParser(description='Get a vault password from user keyring')
parser.add_argument('--vault-id', action='store', default=None,
dest='vault_id',
help='name of the vault secret to get from keyring')
parser.add_argument('--username', action='store', default=None,
help='the username whose keyring is queried')
parser.add_argument('--set', action='store_true', default=False,
dest='set_password',
help='set the password instead of getting it')
return parser
def main():
config_manager = ConfigManager()
username = config_manager.data.get_setting('vault.username')
if not username:
username = getpass.getuser()
keyname = config_manager.data.get_setting('vault.keyname')
if not keyname:
keyname = 'ansible'
arg_parser = build_arg_parser()
args = arg_parser.parse_args()
username = args.username or username
keyname = args.vault_id or keyname
# print('username: %s keyname: %s' % (username, keyname))
if args.set_password:
intro = 'Storing password in "{}" user keyring using key name: {}\n'
sys.stdout.write(intro.format(username, keyname))
password = getpass.getpass()
confirm = getpass.getpass('Confirm password: ')
if password == confirm:
keyring.set_password(keyname, username, password)
else:
sys.stderr.write('Passwords do not match\n')
sys.exit(1)
else:
secret = keyring.get_password(keyname, username)
if secret is None:
sys.stderr.write('vault-keyring-client could not find key="%s" for user="%s" via backend="%s"\n' %
(keyname, username, keyring.get_keyring().name))
sys.exit(KEYNAME_UNKNOWN_RC)
# print('secret: %s' % secret)
sys.stdout.write('%s\n' % secret)
sys.exit(0)
if __name__ == '__main__':
main()

@ -1,101 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2014, Matt Martz <matt@sivel.net>
# (c) 2016, Justin Mayer <https://justinmayer.com/>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# =============================================================================
#
# This script is to be used with vault_password_file or --vault-password-file
# to retrieve the vault password via your OS's native keyring application.
#
# This file *MUST* be saved with executable permissions. Otherwise, Ansible
# will try to parse as a password file and display: "ERROR! Decryption failed"
#
# The `keyring` Python module is required: https://pypi.org/project/keyring/
#
# By default, this script will store the specified password in the keyring of
# the user that invokes the script. To specify a user keyring, add a [vault]
# section to your ansible.cfg file with a 'username' option. Example:
#
# [vault]
# username = 'ansible-vault'
#
# Another optional setting is for the key name, which allows you to use this
# script to handle multiple project vaults with different passwords:
#
# [vault]
# keyname = 'ansible-vault-yourproject'
#
# You can configure the `vault_password_file` option in ansible.cfg:
#
# [defaults]
# ...
# vault_password_file = /path/to/vault-keyring.py
# ...
#
# To set your password, `cd` to your project directory and run:
#
# python /path/to/vault-keyring.py set
#
# If you choose not to configure the path to `vault_password_file` in
# ansible.cfg, your `ansible-playbook` command might look like:
#
# ansible-playbook --vault-password-file=/path/to/vault-keyring.py site.yml
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
import sys
import getpass
import keyring
from ansible.config.manager import ConfigManager, get_ini_config_value
def main():
config = ConfigManager()
username = get_ini_config_value(
config._parsers[config._config_file],
dict(section='vault', key='username')
) or getpass.getuser()
keyname = get_ini_config_value(
config._parsers[config._config_file],
dict(section='vault', key='keyname')
) or 'ansible'
if len(sys.argv) == 2 and sys.argv[1] == 'set':
intro = 'Storing password in "{}" user keyring using key name: {}\n'
sys.stdout.write(intro.format(username, keyname))
password = getpass.getpass()
confirm = getpass.getpass('Confirm password: ')
if password == confirm:
keyring.set_password(keyname, username, password)
else:
sys.stderr.write('Passwords do not match\n')
sys.exit(1)
else:
sys.stdout.write('{0}\n'.format(keyring.get_password(keyname,
username)))
sys.exit(0)
if __name__ == '__main__':
main()

@ -1,158 +0,0 @@
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2017 Alibaba Group Holding Limited. He Guimin <heguimin36@163.com>
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from ansible.module_utils.basic import env_fallback
try:
import footmark
import footmark.ecs
import footmark.slb
import footmark.vpc
import footmark.rds
import footmark.ess
HAS_FOOTMARK = True
except ImportError:
HAS_FOOTMARK = False
class AnsibleACSError(Exception):
pass
def acs_common_argument_spec():
return dict(
alicloud_access_key=dict(required=True, aliases=['access_key_id', 'access_key'], no_log=True,
fallback=(env_fallback, ['ALICLOUD_ACCESS_KEY', 'ALICLOUD_ACCESS_KEY_ID'])),
alicloud_secret_key=dict(required=True, aliases=['secret_access_key', 'secret_key'], no_log=True,
fallback=(env_fallback, ['ALICLOUD_SECRET_KEY', 'ALICLOUD_SECRET_ACCESS_KEY'])),
alicloud_security_token=dict(aliases=['security_token'], no_log=True,
fallback=(env_fallback, ['ALICLOUD_SECURITY_TOKEN'])),
)
def ecs_argument_spec():
spec = acs_common_argument_spec()
spec.update(
dict(
alicloud_region=dict(required=True, aliases=['region', 'region_id'],
fallback=(env_fallback, ['ALICLOUD_REGION', 'ALICLOUD_REGION_ID'])),
)
)
return spec
def get_acs_connection_info(module):
ecs_params = dict(acs_access_key_id=module.params.get('alicloud_access_key'),
acs_secret_access_key=module.params.get('alicloud_secret_key'),
security_token=module.params.get('alicloud_security_token'),
user_agent='Ansible-Provider-Alicloud')
return module.params.get('alicloud_region'), ecs_params
def connect_to_acs(acs_module, region, **params):
conn = acs_module.connect_to_region(region, **params)
if not conn:
if region not in [acs_module_region.id for acs_module_region in acs_module.regions()]:
raise AnsibleACSError(
"Region %s does not seem to be available for acs module %s." % (region, acs_module.__name__))
else:
raise AnsibleACSError(
"Unknown problem connecting to region %s for acs module %s." % (region, acs_module.__name__))
return conn
def ecs_connect(module):
""" Return an ecs connection"""
region, ecs_params = get_acs_connection_info(module)
# If we have a region specified, connect to its endpoint.
if region:
try:
ecs = connect_to_acs(footmark.ecs, region, **ecs_params)
except AnsibleACSError as e:
module.fail_json(msg=str(e))
# Otherwise, no region so we fallback to the old connection method
return ecs
def slb_connect(module):
""" Return an slb connection"""
region, slb_params = get_acs_connection_info(module)
# If we have a region specified, connect to its endpoint.
if region:
try:
slb = connect_to_acs(footmark.slb, region, **slb_params)
except AnsibleACSError as e:
module.fail_json(msg=str(e))
# Otherwise, no region so we fallback to the old connection method
return slb
def vpc_connect(module):
""" Return an vpc connection"""
region, vpc_params = get_acs_connection_info(module)
# If we have a region specified, connect to its endpoint.
if region:
try:
vpc = connect_to_acs(footmark.vpc, region, **vpc_params)
except AnsibleACSError as e:
module.fail_json(msg=str(e))
# Otherwise, no region so we fallback to the old connection method
return vpc
def rds_connect(module):
""" Return an rds connection"""
region, rds_params = get_acs_connection_info(module)
# If we have a region specified, connect to its endpoint.
if region:
try:
rds = connect_to_acs(footmark.rds, region, **rds_params)
except AnsibleACSError as e:
module.fail_json(msg=str(e))
# Otherwise, no region so we fallback to the old connection method
return rds
def ess_connect(module):
""" Return an ess connection"""
region, ess_params = get_acs_connection_info(module)
# If we have a region specified, connect to its endpoint.
if region:
try:
ess = connect_to_acs(footmark.ess, region, **ess_params)
except AnsibleACSError as e:
module.fail_json(msg=str(e))
# Otherwise, no region so we fallback to the old connection method
return ess

@ -1,217 +0,0 @@
#
# (c) 2016 Allen Sanabria, <asanabria@linuxdynasty.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
"""
This module adds shared support for generic cloud modules
In order to use this module, include it as part of a custom
module as shown below.
from ansible.module_utils.cloud import CloudRetry
The 'cloud' module provides the following common classes:
* CloudRetry
- The base class to be used by other cloud providers, in order to
provide a backoff/retry decorator based on status codes.
- Example using the AWSRetry class which inherits from CloudRetry.
@AWSRetry.exponential_backoff(retries=10, delay=3)
get_ec2_security_group_ids_from_names()
@AWSRetry.jittered_backoff()
get_ec2_security_group_ids_from_names()
"""
import random
from functools import wraps
import syslog
import time
def _exponential_backoff(retries=10, delay=2, backoff=2, max_delay=60):
""" Customizable exponential backoff strategy.
Args:
retries (int): Maximum number of times to retry a request.
delay (float): Initial (base) delay.
backoff (float): base of the exponent to use for exponential
backoff.
max_delay (int): Optional. If provided each delay generated is capped
at this amount. Defaults to 60 seconds.
Returns:
Callable that returns a generator. This generator yields durations in
seconds to be used as delays for an exponential backoff strategy.
Usage:
>>> backoff = _exponential_backoff()
>>> backoff
<function backoff_backoff at 0x7f0d939facf8>
>>> list(backoff())
[2, 4, 8, 16, 32, 60, 60, 60, 60, 60]
"""
def backoff_gen():
for retry in range(0, retries):
sleep = delay * backoff ** retry
yield sleep if max_delay is None else min(sleep, max_delay)
return backoff_gen
def _full_jitter_backoff(retries=10, delay=3, max_delay=60, _random=random):
""" Implements the "Full Jitter" backoff strategy described here
https://www.awsarchitectureblog.com/2015/03/backoff.html
Args:
retries (int): Maximum number of times to retry a request.
delay (float): Approximate number of seconds to sleep for the first
retry.
max_delay (int): The maximum number of seconds to sleep for any retry.
_random (random.Random or None): Makes this generator testable by
allowing developers to explicitly pass in the a seeded Random.
Returns:
Callable that returns a generator. This generator yields durations in
seconds to be used as delays for a full jitter backoff strategy.
Usage:
>>> backoff = _full_jitter_backoff(retries=5)
>>> backoff
<function backoff_backoff at 0x7f0d939facf8>
>>> list(backoff())
[3, 6, 5, 23, 38]
>>> list(backoff())
[2, 1, 6, 6, 31]
"""
def backoff_gen():
for retry in range(0, retries):
yield _random.randint(0, min(max_delay, delay * 2 ** retry))
return backoff_gen
class CloudRetry(object):
""" CloudRetry can be used by any cloud provider, in order to implement a
backoff algorithm/retry effect based on Status Code from Exceptions.
"""
# This is the base class of the exception.
# AWS Example botocore.exceptions.ClientError
base_class = None
@staticmethod
def status_code_from_exception(error):
""" Return the status code from the exception object
Args:
error (object): The exception itself.
"""
pass
@staticmethod
def found(response_code, catch_extra_error_codes=None):
""" Return True if the Response Code to retry on was found.
Args:
response_code (str): This is the Response Code that is being matched against.
"""
pass
@classmethod
def _backoff(cls, backoff_strategy, catch_extra_error_codes=None):
""" Retry calling the Cloud decorated function using the provided
backoff strategy.
Args:
backoff_strategy (callable): Callable that returns a generator. The
generator should yield sleep times for each retry of the decorated
function.
"""
def deco(f):
@wraps(f)
def retry_func(*args, **kwargs):
for delay in backoff_strategy():
try:
return f(*args, **kwargs)
except Exception as e:
if isinstance(e, cls.base_class):
response_code = cls.status_code_from_exception(e)
if cls.found(response_code, catch_extra_error_codes):
msg = "{0}: Retrying in {1} seconds...".format(str(e), delay)
syslog.syslog(syslog.LOG_INFO, msg)
time.sleep(delay)
else:
# Return original exception if exception is not a ClientError
raise e
else:
# Return original exception if exception is not a ClientError
raise e
return f(*args, **kwargs)
return retry_func # true decorator
return deco
@classmethod
def exponential_backoff(cls, retries=10, delay=3, backoff=2, max_delay=60, catch_extra_error_codes=None):
"""
Retry calling the Cloud decorated function using an exponential backoff.
Kwargs:
retries (int): Number of times to retry a failed request before giving up
default=10
delay (int or float): Initial delay between retries in seconds
default=3
backoff (int or float): backoff multiplier e.g. value of 2 will
double the delay each retry
default=1.1
max_delay (int or None): maximum amount of time to wait between retries.
default=60
"""
return cls._backoff(_exponential_backoff(
retries=retries, delay=delay, backoff=backoff, max_delay=max_delay), catch_extra_error_codes)
@classmethod
def jittered_backoff(cls, retries=10, delay=3, max_delay=60, catch_extra_error_codes=None):
"""
Retry calling the Cloud decorated function using a jittered backoff
strategy. More on this strategy here:
https://www.awsarchitectureblog.com/2015/03/backoff.html
Kwargs:
retries (int): Number of times to retry a failed request before giving up
default=10
delay (int): Initial delay between retries in seconds
default=3
max_delay (int): maximum amount of time to wait between retries.
default=60
"""
return cls._backoff(_full_jitter_backoff(
retries=retries, delay=delay, max_delay=max_delay), catch_extra_error_codes)
@classmethod
def backoff(cls, tries=10, delay=3, backoff=1.1, catch_extra_error_codes=None):
"""
Retry calling the Cloud decorated function using an exponential backoff.
Compatibility for the original implementation of CloudRetry.backoff that
did not provide configurable backoff strategies. Developers should use
CloudRetry.exponential_backoff instead.
Kwargs:
tries (int): Number of times to try (not retry) before giving up
default=10
delay (int or float): Initial delay between retries in seconds
default=3
backoff (int or float): backoff multiplier e.g. value of 2 will
double the delay each retry
default=1.1
"""
return cls.exponential_backoff(
retries=tries - 1, delay=delay, backoff=backoff, max_delay=None, catch_extra_error_codes=catch_extra_error_codes)

@ -1,132 +0,0 @@
# -*- coding: utf-8 -*-
#
# (c) 2017, Gaudenz Steinlin <gaudenz.steinlin@cloudscale.ch>
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from copy import deepcopy
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_text
API_URL = 'https://api.cloudscale.ch/v1/'
def cloudscale_argument_spec():
return dict(
api_token=dict(fallback=(env_fallback, ['CLOUDSCALE_API_TOKEN']),
no_log=True,
required=True,
type='str'),
api_timeout=dict(default=30, type='int'),
)
class AnsibleCloudscaleBase(object):
def __init__(self, module):
self._module = module
self._auth_header = {'Authorization': 'Bearer %s' % module.params['api_token']}
self._result = {
'changed': False,
'diff': dict(before=dict(), after=dict()),
}
def _get(self, api_call):
resp, info = fetch_url(self._module, API_URL + api_call,
headers=self._auth_header,
timeout=self._module.params['api_timeout'])
if info['status'] == 200:
return self._module.from_json(to_text(resp.read(), errors='surrogate_or_strict'))
elif info['status'] == 404:
return None
else:
self._module.fail_json(msg='Failure while calling the cloudscale.ch API with GET for '
'"%s".' % api_call, fetch_url_info=info)
def _post_or_patch(self, api_call, method, data):
# This helps with tags when we have the full API resource href to update.
if API_URL not in api_call:
api_endpoint = API_URL + api_call
else:
api_endpoint = api_call
headers = self._auth_header.copy()
if data is not None:
# Sanitize data dictionary
# Deepcopy: Duplicate the data object for iteration, because
# iterating an object and changing it at the same time is insecure
for k, v in deepcopy(data).items():
if v is None:
del data[k]
data = self._module.jsonify(data)
headers['Content-type'] = 'application/json'
resp, info = fetch_url(self._module,
api_endpoint,
headers=headers,
method=method,
data=data,
timeout=self._module.params['api_timeout'])
if info['status'] in (200, 201):
return self._module.from_json(to_text(resp.read(), errors='surrogate_or_strict'))
elif info['status'] == 204:
return None
else:
self._module.fail_json(msg='Failure while calling the cloudscale.ch API with %s for '
'"%s".' % (method, api_call), fetch_url_info=info)
def _post(self, api_call, data=None):
return self._post_or_patch(api_call, 'POST', data)
def _patch(self, api_call, data=None):
return self._post_or_patch(api_call, 'PATCH', data)
def _delete(self, api_call):
resp, info = fetch_url(self._module,
API_URL + api_call,
headers=self._auth_header,
method='DELETE',
timeout=self._module.params['api_timeout'])
if info['status'] == 204:
return None
else:
self._module.fail_json(msg='Failure while calling the cloudscale.ch API with DELETE for '
'"%s".' % api_call, fetch_url_info=info)
def _param_updated(self, key, resource):
param = self._module.params.get(key)
if param is None:
return False
if resource and key in resource:
if param != resource[key]:
self._result['changed'] = True
patch_data = {
key: param
}
self._result['diff']['before'].update({key: resource[key]})
self._result['diff']['after'].update(patch_data)
if not self._module.check_mode:
href = resource.get('href')
if not href:
self._module.fail_json(msg='Unable to update %s, no href found.' % key)
self._patch(href, patch_data)
return True
return False
def get_result(self, resource):
if resource:
for k, v in resource.items():
self._result[k] = v
return self._result

@ -1,664 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2015, René Moser <mail@renemoser.net>
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
import sys
import time
import traceback
from ansible.module_utils._text import to_text, to_native
from ansible.module_utils.basic import missing_required_lib
CS_IMP_ERR = None
try:
from cs import CloudStack, CloudStackException, read_config
HAS_LIB_CS = True
except ImportError:
CS_IMP_ERR = traceback.format_exc()
HAS_LIB_CS = False
if sys.version_info > (3,):
long = int
def cs_argument_spec():
return dict(
api_key=dict(default=os.environ.get('CLOUDSTACK_KEY')),
api_secret=dict(default=os.environ.get('CLOUDSTACK_SECRET'), no_log=True),
api_url=dict(default=os.environ.get('CLOUDSTACK_ENDPOINT')),
api_http_method=dict(choices=['get', 'post'], default=os.environ.get('CLOUDSTACK_METHOD')),
api_timeout=dict(type='int', default=os.environ.get('CLOUDSTACK_TIMEOUT')),
api_region=dict(default=os.environ.get('CLOUDSTACK_REGION') or 'cloudstack'),
)
def cs_required_together():
return [['api_key', 'api_secret']]
class AnsibleCloudStack:
def __init__(self, module):
if not HAS_LIB_CS:
module.fail_json(msg=missing_required_lib('cs'), exception=CS_IMP_ERR)
self.result = {
'changed': False,
'diff': {
'before': dict(),
'after': dict()
}
}
# Common returns, will be merged with self.returns
# search_for_key: replace_with_key
self.common_returns = {
'id': 'id',
'name': 'name',
'created': 'created',
'zonename': 'zone',
'state': 'state',
'project': 'project',
'account': 'account',
'domain': 'domain',
'displaytext': 'display_text',
'displayname': 'display_name',
'description': 'description',
}
# Init returns dict for use in subclasses
self.returns = {}
# these values will be casted to int
self.returns_to_int = {}
# these keys will be compared case sensitive in self.has_changed()
self.case_sensitive_keys = [
'id',
'displaytext',
'displayname',
'description',
]
self.module = module
self._cs = None
# Helper for VPCs
self._vpc_networks_ids = None
self.domain = None
self.account = None
self.project = None
self.ip_address = None
self.network = None
self.physical_network = None
self.vpc = None
self.zone = None
self.vm = None
self.vm_default_nic = None
self.os_type = None
self.hypervisor = None
self.capabilities = None
self.network_acl = None
@property
def cs(self):
if self._cs is None:
api_config = self.get_api_config()
self._cs = CloudStack(**api_config)
return self._cs
def get_api_config(self):
api_region = self.module.params.get('api_region') or os.environ.get('CLOUDSTACK_REGION')
try:
config = read_config(api_region)
except KeyError:
config = {}
api_config = {
'endpoint': self.module.params.get('api_url') or config.get('endpoint'),
'key': self.module.params.get('api_key') or config.get('key'),
'secret': self.module.params.get('api_secret') or config.get('secret'),
'timeout': self.module.params.get('api_timeout') or config.get('timeout') or 10,
'method': self.module.params.get('api_http_method') or config.get('method') or 'get',
}
self.result.update({
'api_region': api_region,
'api_url': api_config['endpoint'],
'api_key': api_config['key'],
'api_timeout': int(api_config['timeout']),
'api_http_method': api_config['method'],
})
if not all([api_config['endpoint'], api_config['key'], api_config['secret']]):
self.fail_json(msg="Missing api credentials: can not authenticate")
return api_config
def fail_json(self, **kwargs):
self.result.update(kwargs)
self.module.fail_json(**self.result)
def get_or_fallback(self, key=None, fallback_key=None):
value = self.module.params.get(key)
if not value:
value = self.module.params.get(fallback_key)
return value
def has_changed(self, want_dict, current_dict, only_keys=None, skip_diff_for_keys=None):
result = False
for key, value in want_dict.items():
# Optionally limit by a list of keys
if only_keys and key not in only_keys:
continue
# Skip None values
if value is None:
continue
if key in current_dict:
if isinstance(value, (int, float, long, complex)):
# ensure we compare the same type
if isinstance(value, int):
current_dict[key] = int(current_dict[key])
elif isinstance(value, float):
current_dict[key] = float(current_dict[key])
elif isinstance(value, long):
current_dict[key] = long(current_dict[key])
elif isinstance(value, complex):
current_dict[key] = complex(current_dict[key])
if value != current_dict[key]:
if skip_diff_for_keys and key not in skip_diff_for_keys:
self.result['diff']['before'][key] = current_dict[key]
self.result['diff']['after'][key] = value
result = True
else:
before_value = to_text(current_dict[key])
after_value = to_text(value)
if self.case_sensitive_keys and key in self.case_sensitive_keys:
if before_value != after_value:
if skip_diff_for_keys and key not in skip_diff_for_keys:
self.result['diff']['before'][key] = before_value
self.result['diff']['after'][key] = after_value
result = True
# Test for diff in case insensitive way
elif before_value.lower() != after_value.lower():
if skip_diff_for_keys and key not in skip_diff_for_keys:
self.result['diff']['before'][key] = before_value
self.result['diff']['after'][key] = after_value
result = True
else:
if skip_diff_for_keys and key not in skip_diff_for_keys:
self.result['diff']['before'][key] = None
self.result['diff']['after'][key] = to_text(value)
result = True
return result
def _get_by_key(self, key=None, my_dict=None):
if my_dict is None:
my_dict = {}
if key:
if key in my_dict:
return my_dict[key]
self.fail_json(msg="Something went wrong: %s not found" % key)
return my_dict
def query_api(self, command, **args):
try:
res = getattr(self.cs, command)(**args)
if 'errortext' in res:
self.fail_json(msg="Failed: '%s'" % res['errortext'])
except CloudStackException as e:
self.fail_json(msg='CloudStackException: %s' % to_native(e))
except Exception as e:
self.fail_json(msg=to_native(e))
return res
def get_network_acl(self, key=None):
if self.network_acl is None:
args = {
'name': self.module.params.get('network_acl'),
'vpcid': self.get_vpc(key='id'),
}
network_acls = self.query_api('listNetworkACLLists', **args)
if network_acls:
self.network_acl = network_acls['networkacllist'][0]
self.result['network_acl'] = self.network_acl['name']
if self.network_acl:
return self._get_by_key(key, self.network_acl)
else:
self.fail_json(msg="Network ACL %s not found" % self.module.params.get('network_acl'))
def get_vpc(self, key=None):
"""Return a VPC dictionary or the value of given key of."""
if self.vpc:
return self._get_by_key(key, self.vpc)
vpc = self.module.params.get('vpc')
if not vpc:
vpc = os.environ.get('CLOUDSTACK_VPC')
if not vpc:
return None
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id'),
}
vpcs = self.query_api('listVPCs', **args)
if not vpcs:
self.fail_json(msg="No VPCs available.")
for v in vpcs['vpc']:
if vpc in [v['name'], v['displaytext'], v['id']]:
# Fail if the identifyer matches more than one VPC
if self.vpc:
self.fail_json(msg="More than one VPC found with the provided identifyer '%s'" % vpc)
else:
self.vpc = v
self.result['vpc'] = v['name']
if self.vpc:
return self._get_by_key(key, self.vpc)
self.fail_json(msg="VPC '%s' not found" % vpc)
def is_vpc_network(self, network_id):
"""Returns True if network is in VPC."""
# This is an efficient way to query a lot of networks at a time
if self._vpc_networks_ids is None:
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id'),
}
vpcs = self.query_api('listVPCs', **args)
self._vpc_networks_ids = []
if vpcs:
for vpc in vpcs['vpc']:
for n in vpc.get('network', []):
self._vpc_networks_ids.append(n['id'])
return network_id in self._vpc_networks_ids
def get_physical_network(self, key=None):
if self.physical_network:
return self._get_by_key(key, self.physical_network)
physical_network = self.module.params.get('physical_network')
args = {
'zoneid': self.get_zone(key='id')
}
physical_networks = self.query_api('listPhysicalNetworks', **args)
if not physical_networks:
self.fail_json(msg="No physical networks available.")
for net in physical_networks['physicalnetwork']:
if physical_network in [net['name'], net['id']]:
self.physical_network = net
self.result['physical_network'] = net['name']
return self._get_by_key(key, self.physical_network)
self.fail_json(msg="Physical Network '%s' not found" % physical_network)
def get_network(self, key=None):
"""Return a network dictionary or the value of given key of."""
if self.network:
return self._get_by_key(key, self.network)
network = self.module.params.get('network')
if not network:
vpc_name = self.get_vpc(key='name')
if vpc_name:
self.fail_json(msg="Could not find network for VPC '%s' due missing argument: network" % vpc_name)
return None
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id'),
'vpcid': self.get_vpc(key='id')
}
networks = self.query_api('listNetworks', **args)
if not networks:
self.fail_json(msg="No networks available.")
for n in networks['network']:
# ignore any VPC network if vpc param is not given
if 'vpcid' in n and not self.get_vpc(key='id'):
continue
if network in [n['displaytext'], n['name'], n['id']]:
self.result['network'] = n['name']
self.network = n
return self._get_by_key(key, self.network)
self.fail_json(msg="Network '%s' not found" % network)
def get_project(self, key=None):
if self.project:
return self._get_by_key(key, self.project)
project = self.module.params.get('project')
if not project:
project = os.environ.get('CLOUDSTACK_PROJECT')
if not project:
return None
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id')
}
projects = self.query_api('listProjects', **args)
if projects:
for p in projects['project']:
if project.lower() in [p['name'].lower(), p['id']]:
self.result['project'] = p['name']
self.project = p
return self._get_by_key(key, self.project)
self.fail_json(msg="project '%s' not found" % project)
def get_ip_address(self, key=None):
if self.ip_address:
return self._get_by_key(key, self.ip_address)
ip_address = self.module.params.get('ip_address')
if not ip_address:
self.fail_json(msg="IP address param 'ip_address' is required")
args = {
'ipaddress': ip_address,
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'vpcid': self.get_vpc(key='id'),
}
ip_addresses = self.query_api('listPublicIpAddresses', **args)
if not ip_addresses:
self.fail_json(msg="IP address '%s' not found" % args['ipaddress'])
self.ip_address = ip_addresses['publicipaddress'][0]
return self._get_by_key(key, self.ip_address)
def get_vm_guest_ip(self):
vm_guest_ip = self.module.params.get('vm_guest_ip')
default_nic = self.get_vm_default_nic()
if not vm_guest_ip:
return default_nic['ipaddress']
for secondary_ip in default_nic['secondaryip']:
if vm_guest_ip == secondary_ip['ipaddress']:
return vm_guest_ip
self.fail_json(msg="Secondary IP '%s' not assigned to VM" % vm_guest_ip)
def get_vm_default_nic(self):
if self.vm_default_nic:
return self.vm_default_nic
nics = self.query_api('listNics', virtualmachineid=self.get_vm(key='id'))
if nics:
for n in nics['nic']:
if n['isdefault']:
self.vm_default_nic = n
return self.vm_default_nic
self.fail_json(msg="No default IP address of VM '%s' found" % self.module.params.get('vm'))
def get_vm(self, key=None, filter_zone=True):
if self.vm:
return self._get_by_key(key, self.vm)
vm = self.module.params.get('vm')
if not vm:
self.fail_json(msg="Virtual machine param 'vm' is required")
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id') if filter_zone else None,
'fetch_list': True,
}
vms = self.query_api('listVirtualMachines', **args)
if vms:
for v in vms:
if vm.lower() in [v['name'].lower(), v['displayname'].lower(), v['id']]:
self.vm = v
return self._get_by_key(key, self.vm)
self.fail_json(msg="Virtual machine '%s' not found" % vm)
def get_disk_offering(self, key=None):
disk_offering = self.module.params.get('disk_offering')
if not disk_offering:
return None
# Do not add domain filter for disk offering listing.
disk_offerings = self.query_api('listDiskOfferings')
if disk_offerings:
for d in disk_offerings['diskoffering']:
if disk_offering in [d['displaytext'], d['name'], d['id']]:
return self._get_by_key(key, d)
self.fail_json(msg="Disk offering '%s' not found" % disk_offering)
def get_zone(self, key=None):
if self.zone:
return self._get_by_key(key, self.zone)
zone = self.module.params.get('zone')
if not zone:
zone = os.environ.get('CLOUDSTACK_ZONE')
zones = self.query_api('listZones')
if not zones:
self.fail_json(msg="No zones available. Please create a zone first")
# use the first zone if no zone param given
if not zone:
self.zone = zones['zone'][0]
self.result['zone'] = self.zone['name']
return self._get_by_key(key, self.zone)
if zones:
for z in zones['zone']:
if zone.lower() in [z['name'].lower(), z['id']]:
self.result['zone'] = z['name']
self.zone = z
return self._get_by_key(key, self.zone)
self.fail_json(msg="zone '%s' not found" % zone)
def get_os_type(self, key=None):
if self.os_type:
return self._get_by_key(key, self.zone)
os_type = self.module.params.get('os_type')
if not os_type:
return None
os_types = self.query_api('listOsTypes')
if os_types:
for o in os_types['ostype']:
if os_type in [o['description'], o['id']]:
self.os_type = o
return self._get_by_key(key, self.os_type)
self.fail_json(msg="OS type '%s' not found" % os_type)
def get_hypervisor(self):
if self.hypervisor:
return self.hypervisor
hypervisor = self.module.params.get('hypervisor')
hypervisors = self.query_api('listHypervisors')
# use the first hypervisor if no hypervisor param given
if not hypervisor:
self.hypervisor = hypervisors['hypervisor'][0]['name']
return self.hypervisor
for h in hypervisors['hypervisor']:
if hypervisor.lower() == h['name'].lower():
self.hypervisor = h['name']
return self.hypervisor
self.fail_json(msg="Hypervisor '%s' not found" % hypervisor)
def get_account(self, key=None):
if self.account:
return self._get_by_key(key, self.account)
account = self.module.params.get('account')
if not account:
account = os.environ.get('CLOUDSTACK_ACCOUNT')
if not account:
return None
domain = self.module.params.get('domain')
if not domain:
self.fail_json(msg="Account must be specified with Domain")
args = {
'name': account,
'domainid': self.get_domain(key='id'),
'listall': True
}
accounts = self.query_api('listAccounts', **args)
if accounts:
self.account = accounts['account'][0]
self.result['account'] = self.account['name']
return self._get_by_key(key, self.account)
self.fail_json(msg="Account '%s' not found" % account)
def get_domain(self, key=None):
if self.domain:
return self._get_by_key(key, self.domain)
domain = self.module.params.get('domain')
if not domain:
domain = os.environ.get('CLOUDSTACK_DOMAIN')
if not domain:
return None
args = {
'listall': True,
}
domains = self.query_api('listDomains', **args)
if domains:
for d in domains['domain']:
if d['path'].lower() in [domain.lower(), "root/" + domain.lower(), "root" + domain.lower()]:
self.domain = d
self.result['domain'] = d['path']
return self._get_by_key(key, self.domain)
self.fail_json(msg="Domain '%s' not found" % domain)
def query_tags(self, resource, resource_type):
args = {
'resourceid': resource['id'],
'resourcetype': resource_type,
}
tags = self.query_api('listTags', **args)
return self.get_tags(resource=tags, key='tag')
def get_tags(self, resource=None, key='tags'):
existing_tags = []
for tag in resource.get(key) or []:
existing_tags.append({'key': tag['key'], 'value': tag['value']})
return existing_tags
def _process_tags(self, resource, resource_type, tags, operation="create"):
if tags:
self.result['changed'] = True
if not self.module.check_mode:
args = {
'resourceids': resource['id'],
'resourcetype': resource_type,
'tags': tags,
}
if operation == "create":
response = self.query_api('createTags', **args)
else:
response = self.query_api('deleteTags', **args)
self.poll_job(response)
def _tags_that_should_exist_or_be_updated(self, resource, tags):
existing_tags = self.get_tags(resource)
return [tag for tag in tags if tag not in existing_tags]
def _tags_that_should_not_exist(self, resource, tags):
existing_tags = self.get_tags(resource)
return [tag for tag in existing_tags if tag not in tags]
def ensure_tags(self, resource, resource_type=None):
if not resource_type or not resource:
self.fail_json(msg="Error: Missing resource or resource_type for tags.")
if 'tags' in resource:
tags = self.module.params.get('tags')
if tags is not None:
self._process_tags(resource, resource_type, self._tags_that_should_not_exist(resource, tags), operation="delete")
self._process_tags(resource, resource_type, self._tags_that_should_exist_or_be_updated(resource, tags))
resource['tags'] = self.query_tags(resource=resource, resource_type=resource_type)
return resource
def get_capabilities(self, key=None):
if self.capabilities:
return self._get_by_key(key, self.capabilities)
capabilities = self.query_api('listCapabilities')
self.capabilities = capabilities['capability']
return self._get_by_key(key, self.capabilities)
def poll_job(self, job=None, key=None):
if 'jobid' in job:
while True:
res = self.query_api('queryAsyncJobResult', jobid=job['jobid'])
if res['jobstatus'] != 0 and 'jobresult' in res:
if 'errortext' in res['jobresult']:
self.fail_json(msg="Failed: '%s'" % res['jobresult']['errortext'])
if key and key in res['jobresult']:
job = res['jobresult'][key]
break
time.sleep(2)
return job
def update_result(self, resource, result=None):
if result is None:
result = dict()
if resource:
returns = self.common_returns.copy()
returns.update(self.returns)
for search_key, return_key in returns.items():
if search_key in resource:
result[return_key] = resource[search_key]
# Bad bad API does not always return int when it should.
for search_key, return_key in self.returns_to_int.items():
if search_key in resource:
result[return_key] = int(resource[search_key])
if 'tags' in resource:
result['tags'] = resource['tags']
return result
def get_result(self, resource):
return self.update_result(resource, self.result)
def get_result_and_facts(self, facts_name, resource):
result = self.get_result(resource)
ansible_facts = {
facts_name: result.copy()
}
for k in ['diff', 'changed']:
if k in ansible_facts[facts_name]:
del ansible_facts[facts_name][k]
result.update(ansible_facts=ansible_facts)
return result

@ -1,142 +0,0 @@
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class SQLParseError(Exception):
pass
class UnclosedQuoteError(SQLParseError):
pass
# maps a type of identifier to the maximum number of dot levels that are
# allowed to specify that identifier. For example, a database column can be
# specified by up to 4 levels: database.schema.table.column
_PG_IDENTIFIER_TO_DOT_LEVEL = dict(
database=1,
schema=2,
table=3,
column=4,
role=1,
tablespace=1,
sequence=3,
publication=1,
)
_MYSQL_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, table=2, column=3, role=1, vars=1)
def _find_end_quote(identifier, quote_char):
accumulate = 0
while True:
try:
quote = identifier.index(quote_char)
except ValueError:
raise UnclosedQuoteError
accumulate = accumulate + quote
try:
next_char = identifier[quote + 1]
except IndexError:
return accumulate
if next_char == quote_char:
try:
identifier = identifier[quote + 2:]
accumulate = accumulate + 2
except IndexError:
raise UnclosedQuoteError
else:
return accumulate
def _identifier_parse(identifier, quote_char):
if not identifier:
raise SQLParseError('Identifier name unspecified or unquoted trailing dot')
already_quoted = False
if identifier.startswith(quote_char):
already_quoted = True
try:
end_quote = _find_end_quote(identifier[1:], quote_char=quote_char) + 1
except UnclosedQuoteError:
already_quoted = False
else:
if end_quote < len(identifier) - 1:
if identifier[end_quote + 1] == '.':
dot = end_quote + 1
first_identifier = identifier[:dot]
next_identifier = identifier[dot + 1:]
further_identifiers = _identifier_parse(next_identifier, quote_char)
further_identifiers.insert(0, first_identifier)
else:
raise SQLParseError('User escaped identifiers must escape extra quotes')
else:
further_identifiers = [identifier]
if not already_quoted:
try:
dot = identifier.index('.')
except ValueError:
identifier = identifier.replace(quote_char, quote_char * 2)
identifier = ''.join((quote_char, identifier, quote_char))
further_identifiers = [identifier]
else:
if dot == 0 or dot >= len(identifier) - 1:
identifier = identifier.replace(quote_char, quote_char * 2)
identifier = ''.join((quote_char, identifier, quote_char))
further_identifiers = [identifier]
else:
first_identifier = identifier[:dot]
next_identifier = identifier[dot + 1:]
further_identifiers = _identifier_parse(next_identifier, quote_char)
first_identifier = first_identifier.replace(quote_char, quote_char * 2)
first_identifier = ''.join((quote_char, first_identifier, quote_char))
further_identifiers.insert(0, first_identifier)
return further_identifiers
def pg_quote_identifier(identifier, id_type):
identifier_fragments = _identifier_parse(identifier, quote_char='"')
if len(identifier_fragments) > _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]:
raise SQLParseError('PostgreSQL does not support %s with more than %i dots' % (id_type, _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]))
return '.'.join(identifier_fragments)
def mysql_quote_identifier(identifier, id_type):
identifier_fragments = _identifier_parse(identifier, quote_char='`')
if (len(identifier_fragments) - 1) > _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]:
raise SQLParseError('MySQL does not support %s with more than %i dots' % (id_type, _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]))
special_cased_fragments = []
for fragment in identifier_fragments:
if fragment == '`*`':
special_cased_fragments.append('*')
else:
special_cased_fragments.append(fragment)
return '.'.join(special_cased_fragments)

@ -1,147 +0,0 @@
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Ansible Project 2017
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import os
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback
class Response(object):
def __init__(self, resp, info):
self.body = None
if resp:
self.body = resp.read()
self.info = info
@property
def json(self):
if not self.body:
if "body" in self.info:
return json.loads(to_text(self.info["body"]))
return None
try:
return json.loads(to_text(self.body))
except ValueError:
return None
@property
def status_code(self):
return self.info["status"]
class DigitalOceanHelper:
def __init__(self, module):
self.module = module
self.baseurl = 'https://api.digitalocean.com/v2'
self.timeout = module.params.get('timeout', 30)
self.oauth_token = module.params.get('oauth_token')
self.headers = {'Authorization': 'Bearer {0}'.format(self.oauth_token),
'Content-type': 'application/json'}
# Check if api_token is valid or not
response = self.get('account')
if response.status_code == 401:
self.module.fail_json(msg='Failed to login using API token, please verify validity of API token.')
def _url_builder(self, path):
if path[0] == '/':
path = path[1:]
return '%s/%s' % (self.baseurl, path)
def send(self, method, path, data=None):
url = self._url_builder(path)
data = self.module.jsonify(data)
resp, info = fetch_url(self.module, url, data=data, headers=self.headers, method=method, timeout=self.timeout)
return Response(resp, info)
def get(self, path, data=None):
return self.send('GET', path, data)
def put(self, path, data=None):
return self.send('PUT', path, data)
def post(self, path, data=None):
return self.send('POST', path, data)
def delete(self, path, data=None):
return self.send('DELETE', path, data)
@staticmethod
def digital_ocean_argument_spec():
return dict(
validate_certs=dict(type='bool', required=False, default=True),
oauth_token=dict(
no_log=True,
# Support environment variable for DigitalOcean OAuth Token
fallback=(env_fallback, ['DO_API_TOKEN', 'DO_API_KEY', 'DO_OAUTH_TOKEN', 'OAUTH_TOKEN']),
required=False,
aliases=['api_token'],
),
timeout=dict(type='int', default=30),
)
def get_paginated_data(self, base_url=None, data_key_name=None, data_per_page=40, expected_status_code=200):
"""
Function to get all paginated data from given URL
Args:
base_url: Base URL to get data from
data_key_name: Name of data key value
data_per_page: Number results per page (Default: 40)
expected_status_code: Expected returned code from DigitalOcean (Default: 200)
Returns: List of data
"""
page = 1
has_next = True
ret_data = []
status_code = None
response = None
while has_next or status_code != expected_status_code:
required_url = "{0}page={1}&per_page={2}".format(base_url, page, data_per_page)
response = self.get(required_url)
status_code = response.status_code
# stop if any error during pagination
if status_code != expected_status_code:
break
page += 1
ret_data.extend(response.json[data_key_name])
has_next = "pages" in response.json["links"] and "next" in response.json["links"]["pages"]
if status_code != expected_status_code:
msg = "Failed to fetch %s from %s" % (data_key_name, base_url)
if response:
msg += " due to error : %s" % response.json['message']
self.module.fail_json(msg=msg)
return ret_data

@ -1,338 +0,0 @@
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Dimension Data
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# - Aimon Bustardo <aimon.bustardo@dimensiondata.com>
# - Mark Maglana <mmaglana@gmail.com>
# - Adam Friedman <tintoy@tintoy.io>
#
# Common functionality to be used by versious module components
import os
import re
import traceback
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.six.moves import configparser
from os.path import expanduser
from uuid import UUID
LIBCLOUD_IMP_ERR = None
try:
from libcloud.common.dimensiondata import API_ENDPOINTS, DimensionDataAPIException, DimensionDataStatus
from libcloud.compute.base import Node, NodeLocation
from libcloud.compute.providers import get_driver
from libcloud.compute.types import Provider
import libcloud.security
HAS_LIBCLOUD = True
except ImportError:
LIBCLOUD_IMP_ERR = traceback.format_exc()
HAS_LIBCLOUD = False
# MCP 2.x version patten for location (datacenter) names.
#
# Note that this is not a totally reliable way of determining MCP version.
# Unfortunately, libcloud's NodeLocation currently makes no provision for extended properties.
# At some point we may therefore want to either enhance libcloud or enable overriding mcp_version
# by specifying it in the module parameters.
MCP_2_LOCATION_NAME_PATTERN = re.compile(r".*MCP\s?2.*")
class DimensionDataModule(object):
"""
The base class containing common functionality used by Dimension Data modules for Ansible.
"""
def __init__(self, module):
"""
Create a new DimensionDataModule.
Will fail if Apache libcloud is not present.
:param module: The underlying Ansible module.
:type module: AnsibleModule
"""
self.module = module
if not HAS_LIBCLOUD:
self.module.fail_json(msg=missing_required_lib('libcloud'), exception=LIBCLOUD_IMP_ERR)
# Credentials are common to all Dimension Data modules.
credentials = self.get_credentials()
self.user_id = credentials['user_id']
self.key = credentials['key']
# Region and location are common to all Dimension Data modules.
region = self.module.params['region']
self.region = 'dd-{0}'.format(region)
self.location = self.module.params['location']
libcloud.security.VERIFY_SSL_CERT = self.module.params['validate_certs']
self.driver = get_driver(Provider.DIMENSIONDATA)(
self.user_id,
self.key,
region=self.region
)
# Determine the MCP API version (this depends on the target datacenter).
self.mcp_version = self.get_mcp_version(self.location)
# Optional "wait-for-completion" arguments
if 'wait' in self.module.params:
self.wait = self.module.params['wait']
self.wait_time = self.module.params['wait_time']
self.wait_poll_interval = self.module.params['wait_poll_interval']
else:
self.wait = False
self.wait_time = 0
self.wait_poll_interval = 0
def get_credentials(self):
"""
Get user_id and key from module configuration, environment, or dotfile.
Order of priority is module, environment, dotfile.
To set in environment:
export MCP_USER='myusername'
export MCP_PASSWORD='mypassword'
To set in dot file place a file at ~/.dimensiondata with
the following contents:
[dimensiondatacloud]
MCP_USER: myusername
MCP_PASSWORD: mypassword
"""
if not HAS_LIBCLOUD:
self.module.fail_json(msg='libcloud is required for this module.')
user_id = None
key = None
# First, try the module configuration
if 'mcp_user' in self.module.params:
if 'mcp_password' not in self.module.params:
self.module.fail_json(
msg='"mcp_user" parameter was specified, but not "mcp_password" (either both must be specified, or neither).'
)
user_id = self.module.params['mcp_user']
key = self.module.params['mcp_password']
# Fall back to environment
if not user_id or not key:
user_id = os.environ.get('MCP_USER', None)
key = os.environ.get('MCP_PASSWORD', None)
# Finally, try dotfile (~/.dimensiondata)
if not user_id or not key:
home = expanduser('~')
config = configparser.RawConfigParser()
config.read("%s/.dimensiondata" % home)
try:
user_id = config.get("dimensiondatacloud", "MCP_USER")
key = config.get("dimensiondatacloud", "MCP_PASSWORD")
except (configparser.NoSectionError, configparser.NoOptionError):
pass
# One or more credentials not found. Function can't recover from this
# so it has to raise an error instead of fail silently.
if not user_id:
raise MissingCredentialsError("Dimension Data user id not found")
elif not key:
raise MissingCredentialsError("Dimension Data key not found")
# Both found, return data
return dict(user_id=user_id, key=key)
def get_mcp_version(self, location):
"""
Get the MCP version for the specified location.
"""
location = self.driver.ex_get_location_by_id(location)
if MCP_2_LOCATION_NAME_PATTERN.match(location.name):
return '2.0'
return '1.0'
def get_network_domain(self, locator, location):
"""
Retrieve a network domain by its name or Id.
"""
if is_uuid(locator):
network_domain = self.driver.ex_get_network_domain(locator)
else:
matching_network_domains = [
network_domain for network_domain in self.driver.ex_list_network_domains(location=location)
if network_domain.name == locator
]
if matching_network_domains:
network_domain = matching_network_domains[0]
else:
network_domain = None
if network_domain:
return network_domain
raise UnknownNetworkError("Network '%s' could not be found" % locator)
def get_vlan(self, locator, location, network_domain):
"""
Get a VLAN object by its name or id
"""
if is_uuid(locator):
vlan = self.driver.ex_get_vlan(locator)
else:
matching_vlans = [
vlan for vlan in self.driver.ex_list_vlans(location, network_domain)
if vlan.name == locator
]
if matching_vlans:
vlan = matching_vlans[0]
else:
vlan = None
if vlan:
return vlan
raise UnknownVLANError("VLAN '%s' could not be found" % locator)
@staticmethod
def argument_spec(**additional_argument_spec):
"""
Build an argument specification for a Dimension Data module.
:param additional_argument_spec: An optional dictionary representing the specification for additional module arguments (if any).
:return: A dict containing the argument specification.
"""
spec = dict(
region=dict(type='str', default='na'),
mcp_user=dict(type='str', required=False),
mcp_password=dict(type='str', required=False, no_log=True),
location=dict(type='str', required=True),
validate_certs=dict(type='bool', required=False, default=True)
)
if additional_argument_spec:
spec.update(additional_argument_spec)
return spec
@staticmethod
def argument_spec_with_wait(**additional_argument_spec):
"""
Build an argument specification for a Dimension Data module that includes "wait for completion" arguments.
:param additional_argument_spec: An optional dictionary representing the specification for additional module arguments (if any).
:return: A dict containing the argument specification.
"""
spec = DimensionDataModule.argument_spec(
wait=dict(type='bool', required=False, default=False),
wait_time=dict(type='int', required=False, default=600),
wait_poll_interval=dict(type='int', required=False, default=2)
)
if additional_argument_spec:
spec.update(additional_argument_spec)
return spec
@staticmethod
def required_together(*additional_required_together):
"""
Get the basic argument specification for Dimension Data modules indicating which arguments are must be specified together.
:param additional_required_together: An optional list representing the specification for additional module arguments that must be specified together.
:return: An array containing the argument specifications.
"""
required_together = [
['mcp_user', 'mcp_password']
]
if additional_required_together:
required_together.extend(additional_required_together)
return required_together
class LibcloudNotFound(Exception):
"""
Exception raised when Apache libcloud cannot be found.
"""
pass
class MissingCredentialsError(Exception):
"""
Exception raised when credentials for Dimension Data CloudControl cannot be found.
"""
pass
class UnknownNetworkError(Exception):
"""
Exception raised when a network or network domain cannot be found.
"""
pass
class UnknownVLANError(Exception):
"""
Exception raised when a VLAN cannot be found.
"""
pass
def get_dd_regions():
"""
Get the list of available regions whose vendor is Dimension Data.
"""
# Get endpoints
all_regions = API_ENDPOINTS.keys()
# Only Dimension Data endpoints (no prefix)
regions = [region[3:] for region in all_regions if region.startswith('dd-')]
return regions
def is_uuid(u, version=4):
"""
Test if valid v4 UUID
"""
try:
uuid_obj = UUID(u, version=version)
return str(uuid_obj) == u
except ValueError:
return False

File diff suppressed because it is too large Load Diff

@ -1,280 +0,0 @@
# (c) 2019 Piotr Wojciechowski (@wojciechowskipiotr) <piotr@it-playground.pl>
# (c) Thierry Bouvet (@tbouvet)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from time import sleep
try:
from docker.errors import APIError, NotFound
except ImportError:
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
pass
from ansible.module_utils._text import to_native
from ansible.module_utils.docker.common import (
AnsibleDockerClient,
LooseVersion,
)
class AnsibleDockerSwarmClient(AnsibleDockerClient):
def __init__(self, **kwargs):
super(AnsibleDockerSwarmClient, self).__init__(**kwargs)
def get_swarm_node_id(self):
"""
Get the 'NodeID' of the Swarm node or 'None' if host is not in Swarm. It returns the NodeID
of Docker host the module is executed on
:return:
NodeID of host or 'None' if not part of Swarm
"""
try:
info = self.info()
except APIError as exc:
self.fail("Failed to get node information for %s" % to_native(exc))
if info:
json_str = json.dumps(info, ensure_ascii=False)
swarm_info = json.loads(json_str)
if swarm_info['Swarm']['NodeID']:
return swarm_info['Swarm']['NodeID']
return None
def check_if_swarm_node(self, node_id=None):
"""
Checking if host is part of Docker Swarm. If 'node_id' is not provided it reads the Docker host
system information looking if specific key in output exists. If 'node_id' is provided then it tries to
read node information assuming it is run on Swarm manager. The get_node_inspect() method handles exception if
it is not executed on Swarm manager
:param node_id: Node identifier
:return:
bool: True if node is part of Swarm, False otherwise
"""
if node_id is None:
try:
info = self.info()
except APIError:
self.fail("Failed to get host information.")
if info:
json_str = json.dumps(info, ensure_ascii=False)
swarm_info = json.loads(json_str)
if swarm_info['Swarm']['NodeID']:
return True
if swarm_info['Swarm']['LocalNodeState'] in ('active', 'pending', 'locked'):
return True
return False
else:
try:
node_info = self.get_node_inspect(node_id=node_id)
except APIError:
return
if node_info['ID'] is not None:
return True
return False
def check_if_swarm_manager(self):
"""
Checks if node role is set as Manager in Swarm. The node is the docker host on which module action
is performed. The inspect_swarm() will fail if node is not a manager
:return: True if node is Swarm Manager, False otherwise
"""
try:
self.inspect_swarm()
return True
except APIError:
return False
def fail_task_if_not_swarm_manager(self):
"""
If host is not a swarm manager then Ansible task on this host should end with 'failed' state
"""
if not self.check_if_swarm_manager():
self.fail("Error running docker swarm module: must run on swarm manager node")
def check_if_swarm_worker(self):
"""
Checks if node role is set as Worker in Swarm. The node is the docker host on which module action
is performed. Will fail if run on host that is not part of Swarm via check_if_swarm_node()
:return: True if node is Swarm Worker, False otherwise
"""
if self.check_if_swarm_node() and not self.check_if_swarm_manager():
return True
return False
def check_if_swarm_node_is_down(self, node_id=None, repeat_check=1):
"""
Checks if node status on Swarm manager is 'down'. If node_id is provided it query manager about
node specified in parameter, otherwise it query manager itself. If run on Swarm Worker node or
host that is not part of Swarm it will fail the playbook
:param repeat_check: number of check attempts with 5 seconds delay between them, by default check only once
:param node_id: node ID or name, if None then method will try to get node_id of host module run on
:return:
True if node is part of swarm but its state is down, False otherwise
"""
if repeat_check < 1:
repeat_check = 1
if node_id is None:
node_id = self.get_swarm_node_id()
for retry in range(0, repeat_check):
if retry > 0:
sleep(5)
node_info = self.get_node_inspect(node_id=node_id)
if node_info['Status']['State'] == 'down':
return True
return False
def get_node_inspect(self, node_id=None, skip_missing=False):
"""
Returns Swarm node info as in 'docker node inspect' command about single node
:param skip_missing: if True then function will return None instead of failing the task
:param node_id: node ID or name, if None then method will try to get node_id of host module run on
:return:
Single node information structure
"""
if node_id is None:
node_id = self.get_swarm_node_id()
if node_id is None:
self.fail("Failed to get node information.")
try:
node_info = self.inspect_node(node_id=node_id)
except APIError as exc:
if exc.status_code == 503:
self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
if exc.status_code == 404:
if skip_missing:
return None
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
except Exception as exc:
self.fail("Error inspecting swarm node: %s" % exc)
json_str = json.dumps(node_info, ensure_ascii=False)
node_info = json.loads(json_str)
if 'ManagerStatus' in node_info:
if node_info['ManagerStatus'].get('Leader'):
# This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
# Check moby/moby#35437 for details
count_colons = node_info['ManagerStatus']['Addr'].count(":")
if count_colons == 1:
swarm_leader_ip = node_info['ManagerStatus']['Addr'].split(":", 1)[0] or node_info['Status']['Addr']
else:
swarm_leader_ip = node_info['Status']['Addr']
node_info['Status']['Addr'] = swarm_leader_ip
return node_info
def get_all_nodes_inspect(self):
"""
Returns Swarm node info as in 'docker node inspect' command about all registered nodes
:return:
Structure with information about all nodes
"""
try:
node_info = self.nodes()
except APIError as exc:
if exc.status_code == 503:
self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
except Exception as exc:
self.fail("Error inspecting swarm node: %s" % exc)
json_str = json.dumps(node_info, ensure_ascii=False)
node_info = json.loads(json_str)
return node_info
def get_all_nodes_list(self, output='short'):
"""
Returns list of nodes registered in Swarm
:param output: Defines format of returned data
:return:
If 'output' is 'short' then return data is list of nodes hostnames registered in Swarm,
if 'output' is 'long' then returns data is list of dict containing the attributes as in
output of command 'docker node ls'
"""
nodes_list = []
nodes_inspect = self.get_all_nodes_inspect()
if nodes_inspect is None:
return None
if output == 'short':
for node in nodes_inspect:
nodes_list.append(node['Description']['Hostname'])
elif output == 'long':
for node in nodes_inspect:
node_property = {}
node_property.update({'ID': node['ID']})
node_property.update({'Hostname': node['Description']['Hostname']})
node_property.update({'Status': node['Status']['State']})
node_property.update({'Availability': node['Spec']['Availability']})
if 'ManagerStatus' in node:
if node['ManagerStatus']['Leader'] is True:
node_property.update({'Leader': True})
node_property.update({'ManagerStatus': node['ManagerStatus']['Reachability']})
node_property.update({'EngineVersion': node['Description']['Engine']['EngineVersion']})
nodes_list.append(node_property)
else:
return None
return nodes_list
def get_node_name_by_id(self, nodeid):
return self.get_node_inspect(nodeid)['Description']['Hostname']
def get_unlock_key(self):
if self.docker_py_version < LooseVersion('2.7.0'):
return None
return super(AnsibleDockerSwarmClient, self).get_unlock_key()
def get_service_inspect(self, service_id, skip_missing=False):
"""
Returns Swarm service info as in 'docker service inspect' command about single service
:param service_id: service ID or name
:param skip_missing: if True then function will return None instead of failing the task
:return:
Single service information structure
"""
try:
service_info = self.inspect_service(service_id)
except NotFound as exc:
if skip_missing is False:
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
else:
return None
except APIError as exc:
if exc.status_code == 503:
self.fail("Cannot inspect service: To inspect service execute module on Swarm Manager")
self.fail("Error inspecting swarm service: %s" % exc)
except Exception as exc:
self.fail("Error inspecting swarm service: %s" % exc)
json_str = json.dumps(service_info, ensure_ascii=False)
service_info = json.loads(json_str)
return service_info

@ -1,139 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2016, René Moser <mail@renemoser.net>
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
from ansible.module_utils.six.moves import configparser
from ansible.module_utils.six import integer_types, string_types
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.urls import fetch_url
EXO_DNS_BASEURL = "https://api.exoscale.ch/dns/v1"
def exo_dns_argument_spec():
return dict(
api_key=dict(default=os.environ.get('CLOUDSTACK_KEY'), no_log=True),
api_secret=dict(default=os.environ.get('CLOUDSTACK_SECRET'), no_log=True),
api_timeout=dict(type='int', default=os.environ.get('CLOUDSTACK_TIMEOUT') or 10),
api_region=dict(default=os.environ.get('CLOUDSTACK_REGION') or 'cloudstack'),
validate_certs=dict(default=True, type='bool'),
)
def exo_dns_required_together():
return [['api_key', 'api_secret']]
class ExoDns(object):
def __init__(self, module):
self.module = module
self.api_key = self.module.params.get('api_key')
self.api_secret = self.module.params.get('api_secret')
if not (self.api_key and self.api_secret):
try:
region = self.module.params.get('api_region')
config = self.read_config(ini_group=region)
self.api_key = config['key']
self.api_secret = config['secret']
except Exception as e:
self.module.fail_json(msg="Error while processing config: %s" % to_native(e))
self.headers = {
'X-DNS-Token': "%s:%s" % (self.api_key, self.api_secret),
'Content-Type': 'application/json',
'Accept': 'application/json',
}
self.result = {
'changed': False,
'diff': {
'before': {},
'after': {},
}
}
def read_config(self, ini_group=None):
if not ini_group:
ini_group = os.environ.get('CLOUDSTACK_REGION', 'cloudstack')
keys = ['key', 'secret']
env_conf = {}
for key in keys:
if 'CLOUDSTACK_%s' % key.upper() not in os.environ:
break
else:
env_conf[key] = os.environ['CLOUDSTACK_%s' % key.upper()]
else:
return env_conf
# Config file: $PWD/cloudstack.ini or $HOME/.cloudstack.ini
# Last read wins in configparser
paths = (
os.path.join(os.path.expanduser('~'), '.cloudstack.ini'),
os.path.join(os.getcwd(), 'cloudstack.ini'),
)
# Look at CLOUDSTACK_CONFIG first if present
if 'CLOUDSTACK_CONFIG' in os.environ:
paths += (os.path.expanduser(os.environ['CLOUDSTACK_CONFIG']),)
if not any([os.path.exists(c) for c in paths]):
self.module.fail_json(msg="Config file not found. Tried : %s" % ", ".join(paths))
conf = configparser.ConfigParser()
conf.read(paths)
return dict(conf.items(ini_group))
def api_query(self, resource="/domains", method="GET", data=None):
url = EXO_DNS_BASEURL + resource
if data:
data = self.module.jsonify(data)
response, info = fetch_url(
module=self.module,
url=url,
data=data,
method=method,
headers=self.headers,
timeout=self.module.params.get('api_timeout'),
)
if info['status'] not in (200, 201, 204):
self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg']))
try:
return self.module.from_json(to_text(response.read()))
except Exception as e:
self.module.fail_json(msg="Could not process response into json: %s" % to_native(e))
def has_changed(self, want_dict, current_dict, only_keys=None):
changed = False
for key, value in want_dict.items():
# Optionally limit by a list of keys
if only_keys and key not in only_keys:
continue
# Skip None values
if value is None:
continue
if key in current_dict:
if isinstance(current_dict[key], integer_types):
if value != current_dict[key]:
self.result['diff']['before'][key] = current_dict[key]
self.result['diff']['after'][key] = value
changed = True
elif isinstance(current_dict[key], string_types):
if value.lower() != current_dict[key].lower():
self.result['diff']['before'][key] = current_dict[key]
self.result['diff']['after'][key] = value
changed = True
else:
self.module.fail_json(msg="Unable to determine comparison for key %s" % key)
else:
self.result['diff']['after'][key] = value
changed = True
return changed

@ -1,383 +0,0 @@
#
# Copyright 2016 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Legacy
try:
import bigsuds
bigsuds_found = True
except ImportError:
bigsuds_found = False
from ansible.module_utils.basic import env_fallback
def f5_argument_spec():
return dict(
server=dict(
type='str',
required=True,
fallback=(env_fallback, ['F5_SERVER'])
),
user=dict(
type='str',
required=True,
fallback=(env_fallback, ['F5_USER'])
),
password=dict(
type='str',
aliases=['pass', 'pwd'],
required=True,
no_log=True,
fallback=(env_fallback, ['F5_PASSWORD'])
),
validate_certs=dict(
default='yes',
type='bool',
fallback=(env_fallback, ['F5_VALIDATE_CERTS'])
),
server_port=dict(
type='int',
default=443,
fallback=(env_fallback, ['F5_SERVER_PORT'])
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
),
partition=dict(
type='str',
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
def f5_parse_arguments(module):
if not bigsuds_found:
module.fail_json(msg="the python bigsuds module is required")
if module.params['validate_certs']:
import ssl
if not hasattr(ssl, 'SSLContext'):
module.fail_json(
msg="bigsuds does not support verifying certificates with python < 2.7.9."
"Either update python or set validate_certs=False on the task'")
return (
module.params['server'],
module.params['user'],
module.params['password'],
module.params['state'],
module.params['partition'],
module.params['validate_certs'],
module.params['server_port']
)
def bigip_api(bigip, user, password, validate_certs, port=443):
try:
if bigsuds.__version__ >= '1.0.4':
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password, verify=validate_certs, port=port)
elif bigsuds.__version__ == '1.0.3':
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password, verify=validate_certs)
else:
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
except TypeError:
# bigsuds < 1.0.3, no verify param
if validate_certs:
# Note: verified we have SSLContext when we parsed params
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
else:
import ssl
if hasattr(ssl, 'SSLContext'):
# Really, you should never do this. It disables certificate
# verification *globally*. But since older bigip libraries
# don't give us a way to toggle verification we need to
# disable it at the global level.
# From https://www.python.org/dev/peps/pep-0476/#id29
ssl._create_default_https_context = ssl._create_unverified_context
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
return api
# Fully Qualified name (with the partition)
def fq_name(partition, name):
if name is not None and not name.startswith('/'):
return '/%s/%s' % (partition, name)
return name
# Fully Qualified name (with partition) for a list
def fq_list_names(partition, list_names):
if list_names is None:
return None
return map(lambda x: fq_name(partition, x), list_names)
def to_commands(module, commands):
spec = {
'command': dict(key=True),
'prompt': dict(),
'answer': dict()
}
transform = ComplexList(spec, module)
return transform(commands)
def run_commands(module, commands, check_rc=True):
responses = list()
commands = to_commands(module, to_list(commands))
for cmd in commands:
cmd = module.jsonify(cmd)
rc, out, err = exec_command(module, cmd)
if check_rc and rc != 0:
module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), rc=rc)
responses.append(to_text(out, errors='surrogate_then_replace'))
return responses
# New style
from abc import ABCMeta, abstractproperty
from collections import defaultdict
try:
from f5.bigip import ManagementRoot as BigIpMgmt
from f5.bigip.contexts import TransactionContextManager as BigIpTxContext
from f5.bigiq import ManagementRoot as BigIqMgmt
from f5.iworkflow import ManagementRoot as iWorkflowMgmt
from icontrol.exceptions import iControlUnexpectedHTTPError
HAS_F5SDK = True
except ImportError:
HAS_F5SDK = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems, with_metaclass
from ansible.module_utils.network.common.utils import to_list, ComplexList
from ansible.module_utils.connection import exec_command
from ansible.module_utils._text import to_text
F5_COMMON_ARGS = dict(
server=dict(
type='str',
required=True,
fallback=(env_fallback, ['F5_SERVER'])
),
user=dict(
type='str',
required=True,
fallback=(env_fallback, ['F5_USER'])
),
password=dict(
type='str',
aliases=['pass', 'pwd'],
required=True,
no_log=True,
fallback=(env_fallback, ['F5_PASSWORD'])
),
validate_certs=dict(
default='yes',
type='bool',
fallback=(env_fallback, ['F5_VALIDATE_CERTS'])
),
server_port=dict(
type='int',
default=443,
fallback=(env_fallback, ['F5_SERVER_PORT'])
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
),
partition=dict(
type='str',
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
class AnsibleF5Client(object):
def __init__(self, argument_spec=None, supports_check_mode=False,
mutually_exclusive=None, required_together=None,
required_if=None, required_one_of=None, add_file_common_args=False,
f5_product_name='bigip', sans_state=False, sans_partition=False):
self.f5_product_name = f5_product_name
merged_arg_spec = dict()
merged_arg_spec.update(F5_COMMON_ARGS)
if argument_spec:
merged_arg_spec.update(argument_spec)
if sans_state:
del merged_arg_spec['state']
if sans_partition:
del merged_arg_spec['partition']
self.arg_spec = merged_arg_spec
mutually_exclusive_params = []
if mutually_exclusive:
mutually_exclusive_params += mutually_exclusive
required_together_params = []
if required_together:
required_together_params += required_together
self.module = AnsibleModule(
argument_spec=merged_arg_spec,
supports_check_mode=supports_check_mode,
mutually_exclusive=mutually_exclusive_params,
required_together=required_together_params,
required_if=required_if,
required_one_of=required_one_of,
add_file_common_args=add_file_common_args
)
self.check_mode = self.module.check_mode
self._connect_params = self._get_connect_params()
if 'transport' not in self.module.params or self.module.params['transport'] != 'cli':
try:
self.api = self._get_mgmt_root(
f5_product_name, **self._connect_params
)
except iControlUnexpectedHTTPError as exc:
self.fail(str(exc))
def fail(self, msg):
self.module.fail_json(msg=msg)
def _get_connect_params(self):
params = dict(
user=self.module.params['user'],
password=self.module.params['password'],
server=self.module.params['server'],
server_port=self.module.params['server_port'],
validate_certs=self.module.params['validate_certs']
)
return params
def _get_mgmt_root(self, type, **kwargs):
if type == 'bigip':
return BigIpMgmt(
kwargs['server'],
kwargs['user'],
kwargs['password'],
port=kwargs['server_port'],
token='tmos'
)
elif type == 'iworkflow':
return iWorkflowMgmt(
kwargs['server'],
kwargs['user'],
kwargs['password'],
port=kwargs['server_port'],
token='local'
)
elif type == 'bigiq':
return BigIqMgmt(
kwargs['server'],
kwargs['user'],
kwargs['password'],
port=kwargs['server_port'],
auth_provider='local'
)
def reconnect(self):
"""Attempts to reconnect to a device
The existing token from a ManagementRoot can become invalid if you,
for example, upgrade the device (such as is done in the *_software
module.
This method can be used to reconnect to a remote device without
having to re-instantiate the ArgumentSpec and AnsibleF5Client classes
it will use the same values that were initially provided to those
classes
:return:
:raises iControlUnexpectedHTTPError
"""
self.api = self._get_mgmt_root(
self.f5_product_name, **self._connect_params
)
class AnsibleF5Parameters(object):
def __init__(self, params=None):
self._values = defaultdict(lambda: None)
self._values['__warnings'] = []
if params:
self.update(params=params)
def update(self, params=None):
if params:
for k, v in iteritems(params):
if self.api_map is not None and k in self.api_map:
dict_to_use = self.api_map
map_key = self.api_map[k]
else:
dict_to_use = self._values
map_key = k
# Handle weird API parameters like `dns.proxy.__iter__` by
# using a map provided by the module developer
class_attr = getattr(type(self), map_key, None)
if isinstance(class_attr, property):
# There is a mapped value for the api_map key
if class_attr.fset is None:
# If the mapped value does not have an associated setter
self._values[map_key] = v
else:
# The mapped value has a setter
setattr(self, map_key, v)
else:
# If the mapped value is not a @property
self._values[map_key] = v
def __getattr__(self, item):
# Ensures that properties that weren't defined, and therefore stashed
# in the `_values` dict, will be retrievable.
return self._values[item]
@property
def partition(self):
if self._values['partition'] is None:
return 'Common'
return self._values['partition'].strip('/')
@partition.setter
def partition(self, value):
self._values['partition'] = value
def _filter_params(self, params):
return dict((k, v) for k, v in iteritems(params) if v is not None)
class F5ModuleError(Exception):
pass

@ -1,316 +0,0 @@
# -*- coding: utf-8 -*-
#
# (c) 2013-2018, Adam Miller (maxamillion@fedoraproject.org)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Imports and info for sanity checking
from distutils.version import LooseVersion
FW_VERSION = None
fw = None
fw_offline = False
import_failure = True
try:
import firewall.config
FW_VERSION = firewall.config.VERSION
from firewall.client import FirewallClient
from firewall.client import FirewallClientZoneSettings
from firewall.errors import FirewallError
import_failure = False
try:
fw = FirewallClient()
fw.getDefaultZone()
except (AttributeError, FirewallError):
# Firewalld is not currently running, permanent-only operations
fw_offline = True
# Import other required parts of the firewalld API
#
# NOTE:
# online and offline operations do not share a common firewalld API
try:
from firewall.core.fw_test import Firewall_test
fw = Firewall_test()
except (ModuleNotFoundError):
# In firewalld version 0.7.0 this behavior changed
from firewall.core.fw import Firewall
fw = Firewall(offline=True)
fw.start()
except ImportError:
pass
class FirewallTransaction(object):
"""
FirewallTransaction
This is the base class for all firewalld transactions we might want to have
"""
def __init__(self, module, action_args=(), zone=None, desired_state=None,
permanent=False, immediate=False, enabled_values=None, disabled_values=None):
# type: (firewall.client, tuple, str, bool, bool, bool)
"""
initializer the transaction
:module: AnsibleModule, instance of AnsibleModule
:action_args: tuple, args to pass for the action to take place
:zone: str, firewall zone
:desired_state: str, the desired state (enabled, disabled, etc)
:permanent: bool, action should be permanent
:immediate: bool, action should take place immediately
:enabled_values: str[], acceptable values for enabling something (default: enabled)
:disabled_values: str[], acceptable values for disabling something (default: disabled)
"""
self.module = module
self.fw = fw
self.action_args = action_args
if zone:
self.zone = zone
else:
if fw_offline:
self.zone = fw.get_default_zone()
else:
self.zone = fw.getDefaultZone()
self.desired_state = desired_state
self.permanent = permanent
self.immediate = immediate
self.fw_offline = fw_offline
self.enabled_values = enabled_values or ["enabled"]
self.disabled_values = disabled_values or ["disabled"]
# List of messages that we'll call module.fail_json or module.exit_json
# with.
self.msgs = []
# Allow for custom messages to be added for certain subclass transaction
# types
self.enabled_msg = None
self.disabled_msg = None
#####################
# exception handling
#
def action_handler(self, action_func, action_func_args):
"""
Function to wrap calls to make actions on firewalld in try/except
logic and emit (hopefully) useful error messages
"""
try:
return action_func(*action_func_args)
except Exception as e:
# If there are any commonly known errors that we should provide more
# context for to help the users diagnose what's wrong. Handle that here
if "INVALID_SERVICE" in "%s" % e:
self.msgs.append("Services are defined by port/tcp relationship and named as they are in /etc/services (on most systems)")
if len(self.msgs) > 0:
self.module.fail_json(
msg='ERROR: Exception caught: %s %s' % (e, ', '.join(self.msgs))
)
else:
self.module.fail_json(msg='ERROR: Exception caught: %s' % e)
def get_fw_zone_settings(self):
if self.fw_offline:
fw_zone = self.fw.config.get_zone(self.zone)
fw_settings = FirewallClientZoneSettings(
list(self.fw.config.get_zone_config(fw_zone))
)
else:
fw_zone = self.fw.config().getZoneByName(self.zone)
fw_settings = fw_zone.getSettings()
return (fw_zone, fw_settings)
def update_fw_settings(self, fw_zone, fw_settings):
if self.fw_offline:
self.fw.config.set_zone_config(fw_zone, fw_settings.settings)
else:
fw_zone.update(fw_settings)
def get_enabled_immediate(self):
raise NotImplementedError
def get_enabled_permanent(self):
raise NotImplementedError
def set_enabled_immediate(self):
raise NotImplementedError
def set_enabled_permanent(self):
raise NotImplementedError
def set_disabled_immediate(self):
raise NotImplementedError
def set_disabled_permanent(self):
raise NotImplementedError
def run(self):
"""
run
This function contains the "transaction logic" where as all operations
follow a similar pattern in order to perform their action but simply
call different functions to carry that action out.
"""
self.changed = False
if self.immediate and self.permanent:
is_enabled_permanent = self.action_handler(
self.get_enabled_permanent,
self.action_args
)
is_enabled_immediate = self.action_handler(
self.get_enabled_immediate,
self.action_args
)
self.msgs.append('Permanent and Non-Permanent(immediate) operation')
if self.desired_state in self.enabled_values:
if not is_enabled_permanent or not is_enabled_immediate:
if self.module.check_mode:
self.module.exit_json(changed=True)
if not is_enabled_permanent:
self.action_handler(
self.set_enabled_permanent,
self.action_args
)
self.changed = True
if not is_enabled_immediate:
self.action_handler(
self.set_enabled_immediate,
self.action_args
)
self.changed = True
if self.changed and self.enabled_msg:
self.msgs.append(self.enabled_msg)
elif self.desired_state in self.disabled_values:
if is_enabled_permanent or is_enabled_immediate:
if self.module.check_mode:
self.module.exit_json(changed=True)
if is_enabled_permanent:
self.action_handler(
self.set_disabled_permanent,
self.action_args
)
self.changed = True
if is_enabled_immediate:
self.action_handler(
self.set_disabled_immediate,
self.action_args
)
self.changed = True
if self.changed and self.disabled_msg:
self.msgs.append(self.disabled_msg)
elif self.permanent and not self.immediate:
is_enabled = self.action_handler(
self.get_enabled_permanent,
self.action_args
)
self.msgs.append('Permanent operation')
if self.desired_state in self.enabled_values:
if not is_enabled:
if self.module.check_mode:
self.module.exit_json(changed=True)
self.action_handler(
self.set_enabled_permanent,
self.action_args
)
self.changed = True
if self.changed and self.enabled_msg:
self.msgs.append(self.enabled_msg)
elif self.desired_state in self.disabled_values:
if is_enabled:
if self.module.check_mode:
self.module.exit_json(changed=True)
self.action_handler(
self.set_disabled_permanent,
self.action_args
)
self.changed = True
if self.changed and self.disabled_msg:
self.msgs.append(self.disabled_msg)
elif self.immediate and not self.permanent:
is_enabled = self.action_handler(
self.get_enabled_immediate,
self.action_args
)
self.msgs.append('Non-permanent operation')
if self.desired_state in self.enabled_values:
if not is_enabled:
if self.module.check_mode:
self.module.exit_json(changed=True)
self.action_handler(
self.set_enabled_immediate,
self.action_args
)
self.changed = True
if self.changed and self.enabled_msg:
self.msgs.append(self.enabled_msg)
elif self.desired_state in self.disabled_values:
if is_enabled:
if self.module.check_mode:
self.module.exit_json(changed=True)
self.action_handler(
self.set_disabled_immediate,
self.action_args
)
self.changed = True
if self.changed and self.disabled_msg:
self.msgs.append(self.disabled_msg)
return (self.changed, self.msgs)
@staticmethod
def sanity_check(module):
"""
Perform sanity checking, version checks, etc
:module: AnsibleModule instance
"""
if FW_VERSION and fw_offline:
# Pre-run version checking
if LooseVersion(FW_VERSION) < LooseVersion("0.3.9"):
module.fail_json(msg='unsupported version of firewalld, offline operations require >= 0.3.9 - found: {0}'.format(FW_VERSION))
elif FW_VERSION and not fw_offline:
# Pre-run version checking
if LooseVersion(FW_VERSION) < LooseVersion("0.2.11"):
module.fail_json(msg='unsupported version of firewalld, requires >= 0.2.11 - found: {0}'.format(FW_VERSION))
# Check for firewalld running
try:
if fw.connected is False:
module.fail_json(msg='firewalld service must be running, or try with offline=true')
except AttributeError:
module.fail_json(msg="firewalld connection can't be established,\
installed version (%s) likely too old. Requires firewalld >= 0.2.11" % FW_VERSION)
if import_failure:
module.fail_json(
msg='Python Module not found: firewalld and its python module are required for this module, \
version 0.2.11 or newer required (0.3.9 or newer for offline operations)'
)

@ -1,55 +0,0 @@
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Franck Cuny <franck.cuny@gmail.com>, 2014
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
try:
from libcloud.dns.types import Provider
from libcloud.dns.providers import get_driver
HAS_LIBCLOUD_BASE = True
except ImportError:
HAS_LIBCLOUD_BASE = False
from ansible.module_utils.gcp import gcp_connect
from ansible.module_utils.gcp import unexpected_error_msg as gcp_error
USER_AGENT_PRODUCT = "Ansible-gcdns"
USER_AGENT_VERSION = "v1"
def gcdns_connect(module, provider=None):
"""Return a GCP connection for Google Cloud DNS."""
if not HAS_LIBCLOUD_BASE:
module.fail_json(msg='libcloud must be installed to use this module')
provider = provider or Provider.GOOGLE
return gcp_connect(module, provider, get_driver, USER_AGENT_PRODUCT, USER_AGENT_VERSION)
def unexpected_error_msg(error):
"""Create an error string based on passed in error."""
return gcp_error(error)

@ -1,54 +0,0 @@
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Franck Cuny <franck.cuny@gmail.com>, 2014
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
HAS_LIBCLOUD_BASE = True
except ImportError:
HAS_LIBCLOUD_BASE = False
from ansible.module_utils.gcp import gcp_connect
from ansible.module_utils.gcp import unexpected_error_msg as gcp_error
USER_AGENT_PRODUCT = "Ansible-gce"
USER_AGENT_VERSION = "v1"
def gce_connect(module, provider=None):
"""Return a GCP connection for Google Compute Engine."""
if not HAS_LIBCLOUD_BASE:
module.fail_json(msg='libcloud must be installed to use this module')
provider = provider or Provider.GCE
return gcp_connect(module, provider, get_driver, USER_AGENT_PRODUCT, USER_AGENT_VERSION)
def unexpected_error_msg(error):
"""Create an error string based on passed in error."""
return gcp_error(error)

@ -1,815 +0,0 @@
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Franck Cuny <franck.cuny@gmail.com>, 2014
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import json
import os
import time
import traceback
from distutils.version import LooseVersion
# libcloud
try:
import libcloud
HAS_LIBCLOUD_BASE = True
except ImportError:
HAS_LIBCLOUD_BASE = False
# google-auth
try:
import google.auth
from google.oauth2 import service_account
HAS_GOOGLE_AUTH = True
except ImportError:
HAS_GOOGLE_AUTH = False
# google-python-api
try:
import google_auth_httplib2
from httplib2 import Http
from googleapiclient.http import set_user_agent
from googleapiclient.errors import HttpError
from apiclient.discovery import build
HAS_GOOGLE_API_LIB = True
except ImportError:
HAS_GOOGLE_API_LIB = False
import ansible.module_utils.six.moves.urllib.parse as urlparse
GCP_DEFAULT_SCOPES = ['https://www.googleapis.com/auth/cloud-platform']
def _get_gcp_ansible_credentials(module):
"""Helper to fetch creds from AnsibleModule object."""
service_account_email = module.params.get('service_account_email', None)
# Note: pem_file is discouraged and will be deprecated
credentials_file = module.params.get('pem_file', None) or module.params.get(
'credentials_file', None)
project_id = module.params.get('project_id', None)
return (service_account_email, credentials_file, project_id)
def _get_gcp_environ_var(var_name, default_value):
"""Wrapper around os.environ.get call."""
return os.environ.get(
var_name, default_value)
def _get_gcp_environment_credentials(service_account_email, credentials_file, project_id):
"""Helper to look in environment variables for credentials."""
# If any of the values are not given as parameters, check the appropriate
# environment variables.
if not service_account_email:
service_account_email = _get_gcp_environ_var('GCE_EMAIL', None)
if not credentials_file:
credentials_file = _get_gcp_environ_var(
'GCE_CREDENTIALS_FILE_PATH', None) or _get_gcp_environ_var(
'GOOGLE_APPLICATION_CREDENTIALS', None) or _get_gcp_environ_var(
'GCE_PEM_FILE_PATH', None)
if not project_id:
project_id = _get_gcp_environ_var('GCE_PROJECT', None) or _get_gcp_environ_var(
'GOOGLE_CLOUD_PROJECT', None)
return (service_account_email, credentials_file, project_id)
def _get_gcp_credentials(module, require_valid_json=True, check_libcloud=False):
"""
Obtain GCP credentials by trying various methods.
There are 3 ways to specify GCP credentials:
1. Specify via Ansible module parameters (recommended).
2. Specify via environment variables. Two sets of env vars are available:
a) GOOGLE_CLOUD_PROJECT, GOOGLE_CREDENTIALS_APPLICATION (preferred)
b) GCE_PROJECT, GCE_CREDENTIAL_FILE_PATH, GCE_EMAIL (legacy, not recommended; req'd if
using p12 key)
3. Specify via libcloud secrets.py file (deprecated).
There are 3 helper functions to assist in the above.
Regardless of method, the user also has the option of specifying a JSON
file or a p12 file as the credentials file. JSON is strongly recommended and
p12 will be removed in the future.
Additionally, flags may be set to require valid json and check the libcloud
version.
AnsibleModule.fail_json is called only if the project_id cannot be found.
:param module: initialized Ansible module object
:type module: `class AnsibleModule`
:param require_valid_json: If true, require credentials to be valid JSON. Default is True.
:type require_valid_json: ``bool``
:params check_libcloud: If true, check the libcloud version available to see if
JSON creds are supported.
:type check_libcloud: ``bool``
:return: {'service_account_email': service_account_email,
'credentials_file': credentials_file,
'project_id': project_id}
:rtype: ``dict``
"""
(service_account_email,
credentials_file,
project_id) = _get_gcp_ansible_credentials(module)
# If any of the values are not given as parameters, check the appropriate
# environment variables.
(service_account_email,
credentials_file,
project_id) = _get_gcp_environment_credentials(service_account_email,
credentials_file, project_id)
if credentials_file is None or project_id is None or service_account_email is None:
if check_libcloud is True:
if project_id is None:
# TODO(supertom): this message is legacy and integration tests
# depend on it.
module.fail_json(msg='Missing GCE connection parameters in libcloud '
'secrets file.')
else:
if project_id is None:
module.fail_json(msg=('GCP connection error: unable to determine project (%s) or '
'credentials file (%s)' % (project_id, credentials_file)))
# Set these fields to empty strings if they are None
# consumers of this will make the distinction between an empty string
# and None.
if credentials_file is None:
credentials_file = ''
if service_account_email is None:
service_account_email = ''
# ensure the credentials file is found and is in the proper format.
if credentials_file:
_validate_credentials_file(module, credentials_file,
require_valid_json=require_valid_json,
check_libcloud=check_libcloud)
return {'service_account_email': service_account_email,
'credentials_file': credentials_file,
'project_id': project_id}
def _validate_credentials_file(module, credentials_file, require_valid_json=True, check_libcloud=False):
"""
Check for valid credentials file.
Optionally check for JSON format and if libcloud supports JSON.
:param module: initialized Ansible module object
:type module: `class AnsibleModule`
:param credentials_file: path to file on disk
:type credentials_file: ``str``. Complete path to file on disk.
:param require_valid_json: This argument is ignored as of Ansible 2.7.
:type require_valid_json: ``bool``
:params check_libcloud: If true, check the libcloud version available to see if
JSON creds are supported.
:type check_libcloud: ``bool``
:returns: True
:rtype: ``bool``
"""
try:
# Try to read credentials as JSON
with open(credentials_file) as credentials:
json.loads(credentials.read())
# If the credentials are proper JSON and we do not have the minimum
# required libcloud version, bail out and return a descriptive
# error
if check_libcloud and LooseVersion(libcloud.__version__) < '0.17.0':
module.fail_json(msg='Using JSON credentials but libcloud minimum version not met. '
'Upgrade to libcloud>=0.17.0.')
return True
except IOError as e:
module.fail_json(msg='GCP Credentials File %s not found.' %
credentials_file, changed=False)
return False
except ValueError as e:
module.fail_json(
msg='Non-JSON credentials file provided. Please generate a new JSON key from the Google Cloud console',
changed=False)
def gcp_connect(module, provider, get_driver, user_agent_product, user_agent_version):
"""Return a Google libcloud driver connection."""
if not HAS_LIBCLOUD_BASE:
module.fail_json(msg='libcloud must be installed to use this module')
creds = _get_gcp_credentials(module,
require_valid_json=False,
check_libcloud=True)
try:
gcp = get_driver(provider)(creds['service_account_email'], creds['credentials_file'],
datacenter=module.params.get('zone', None),
project=creds['project_id'])
gcp.connection.user_agent_append("%s/%s" % (
user_agent_product, user_agent_version))
except (RuntimeError, ValueError) as e:
module.fail_json(msg=str(e), changed=False)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
return gcp
def get_google_cloud_credentials(module, scopes=None):
"""
Get credentials object for use with Google Cloud client.
Attempts to obtain credentials by calling _get_gcp_credentials. If those are
not present will attempt to connect via Application Default Credentials.
To connect via libcloud, don't use this function, use gcp_connect instead. For
Google Python API Client, see get_google_api_auth for how to connect.
For more information on Google's client library options for Python, see:
U(https://cloud.google.com/apis/docs/client-libraries-explained#google_api_client_libraries)
Google Cloud example:
creds, params = get_google_cloud_credentials(module, scopes, user_agent_product, user_agent_version)
pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds)
pubsub_client.user_agent = 'ansible-pubsub-0.1'
...
:param module: initialized Ansible module object
:type module: `class AnsibleModule`
:param scopes: list of scopes
:type module: ``list`` of URIs
:returns: A tuple containing (google authorized) credentials object and
params dict {'service_account_email': '...', 'credentials_file': '...', 'project_id': ...}
:rtype: ``tuple``
"""
scopes = [] if scopes is None else scopes
if not HAS_GOOGLE_AUTH:
module.fail_json(msg='Please install google-auth.')
conn_params = _get_gcp_credentials(module,
require_valid_json=True,
check_libcloud=False)
try:
if conn_params['credentials_file']:
credentials = service_account.Credentials.from_service_account_file(
conn_params['credentials_file'])
if scopes:
credentials = credentials.with_scopes(scopes)
else:
(credentials, project_id) = google.auth.default(
scopes=scopes)
if project_id is not None:
conn_params['project_id'] = project_id
return (credentials, conn_params)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
return (None, None)
def get_google_api_auth(module, scopes=None, user_agent_product='ansible-python-api', user_agent_version='NA'):
"""
Authentication for use with google-python-api-client.
Function calls get_google_cloud_credentials, which attempts to assemble the credentials
from various locations. Next it attempts to authenticate with Google.
This function returns an httplib2 (compatible) object that can be provided to the Google Python API client.
For libcloud, don't use this function, use gcp_connect instead. For Google Cloud, See
get_google_cloud_credentials for how to connect.
For more information on Google's client library options for Python, see:
U(https://cloud.google.com/apis/docs/client-libraries-explained#google_api_client_libraries)
Google API example:
http_auth, conn_params = get_google_api_auth(module, scopes, user_agent_product, user_agent_version)
service = build('myservice', 'v1', http=http_auth)
...
:param module: initialized Ansible module object
:type module: `class AnsibleModule`
:param scopes: list of scopes
:type scopes: ``list`` of URIs
:param user_agent_product: User agent product. eg: 'ansible-python-api'
:type user_agent_product: ``str``
:param user_agent_version: Version string to append to product. eg: 'NA' or '0.1'
:type user_agent_version: ``str``
:returns: A tuple containing (google authorized) httplib2 request object and a
params dict {'service_account_email': '...', 'credentials_file': '...', 'project_id': ...}
:rtype: ``tuple``
"""
scopes = [] if scopes is None else scopes
if not HAS_GOOGLE_API_LIB:
module.fail_json(msg="Please install google-api-python-client library")
if not scopes:
scopes = GCP_DEFAULT_SCOPES
try:
(credentials, conn_params) = get_google_cloud_credentials(module, scopes)
http = set_user_agent(Http(), '%s-%s' %
(user_agent_product, user_agent_version))
http_auth = google_auth_httplib2.AuthorizedHttp(credentials, http=http)
return (http_auth, conn_params)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
return (None, None)
def get_google_api_client(module, service, user_agent_product, user_agent_version,
scopes=None, api_version='v1'):
"""
Get the discovery-based python client. Use when a cloud client is not available.
client = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT,
user_agent_version=USER_AGENT_VERSION)
:returns: A tuple containing the authorized client to the specified service and a
params dict {'service_account_email': '...', 'credentials_file': '...', 'project_id': ...}
:rtype: ``tuple``
"""
if not scopes:
scopes = GCP_DEFAULT_SCOPES
http_auth, conn_params = get_google_api_auth(module, scopes=scopes,
user_agent_product=user_agent_product,
user_agent_version=user_agent_version)
client = build(service, api_version, http=http_auth)
return (client, conn_params)
def check_min_pkg_version(pkg_name, minimum_version):
"""Minimum required version is >= installed version."""
from pkg_resources import get_distribution
try:
installed_version = get_distribution(pkg_name).version
return LooseVersion(installed_version) >= minimum_version
except Exception as e:
return False
def unexpected_error_msg(error):
"""Create an error string based on passed in error."""
return 'Unexpected response: (%s). Detail: %s' % (str(error), traceback.format_exc())
def get_valid_location(module, driver, location, location_type='zone'):
if location_type == 'zone':
l = driver.ex_get_zone(location)
else:
l = driver.ex_get_region(location)
if l is None:
link = 'https://cloud.google.com/compute/docs/regions-zones/regions-zones#available'
module.fail_json(msg=('%s %s is invalid. Please see the list of '
'available %s at %s' % (
location_type, location, location_type, link)),
changed=False)
return l
def check_params(params, field_list):
"""
Helper to validate params.
Use this in function definitions if they require specific fields
to be present.
:param params: structure that contains the fields
:type params: ``dict``
:param field_list: list of dict representing the fields
[{'name': str, 'required': True/False', 'type': cls}]
:type field_list: ``list`` of ``dict``
:return True or raises ValueError
:rtype: ``bool`` or `class:ValueError`
"""
for d in field_list:
if not d['name'] in params:
if 'required' in d and d['required'] is True:
raise ValueError(("%s is required and must be of type: %s" %
(d['name'], str(d['type']))))
else:
if not isinstance(params[d['name']], d['type']):
raise ValueError(("%s must be of type: %s. %s (%s) provided." % (
d['name'], str(d['type']), params[d['name']],
type(params[d['name']]))))
if 'values' in d:
if params[d['name']] not in d['values']:
raise ValueError(("%s must be one of: %s" % (
d['name'], ','.join(d['values']))))
if isinstance(params[d['name']], int):
if 'min' in d:
if params[d['name']] < d['min']:
raise ValueError(("%s must be greater than or equal to: %s" % (
d['name'], d['min'])))
if 'max' in d:
if params[d['name']] > d['max']:
raise ValueError("%s must be less than or equal to: %s" % (
d['name'], d['max']))
return True
class GCPUtils(object):
"""
Helper utilities for GCP.
"""
@staticmethod
def underscore_to_camel(txt):
return txt.split('_')[0] + ''.join(x.capitalize() or '_' for x in txt.split('_')[1:])
@staticmethod
def remove_non_gcp_params(params):
"""
Remove params if found.
"""
params_to_remove = ['state']
for p in params_to_remove:
if p in params:
del params[p]
return params
@staticmethod
def params_to_gcp_dict(params, resource_name=None):
"""
Recursively convert ansible params to GCP Params.
Keys are converted from snake to camelCase
ex: default_service to defaultService
Handles lists, dicts and strings
special provision for the resource name
"""
if not isinstance(params, dict):
return params
gcp_dict = {}
params = GCPUtils.remove_non_gcp_params(params)
for k, v in params.items():
gcp_key = GCPUtils.underscore_to_camel(k)
if isinstance(v, dict):
retval = GCPUtils.params_to_gcp_dict(v)
gcp_dict[gcp_key] = retval
elif isinstance(v, list):
gcp_dict[gcp_key] = [GCPUtils.params_to_gcp_dict(x) for x in v]
else:
if resource_name and k == resource_name:
gcp_dict['name'] = v
else:
gcp_dict[gcp_key] = v
return gcp_dict
@staticmethod
def execute_api_client_req(req, client=None, raw=True,
operation_timeout=180, poll_interval=5,
raise_404=True):
"""
General python api client interaction function.
For use with google-api-python-client, or clients created
with get_google_api_client function
Not for use with Google Cloud client libraries
For long-running operations, we make an immediate query and then
sleep poll_interval before re-querying. After the request is done
we rebuild the request with a get method and return the result.
"""
try:
resp = req.execute()
if not resp:
return None
if raw:
return resp
if resp['kind'] == 'compute#operation':
resp = GCPUtils.execute_api_client_operation_req(req, resp,
client,
operation_timeout,
poll_interval)
if 'items' in resp:
return resp['items']
return resp
except HttpError as h:
# Note: 404s can be generated (incorrectly) for dependent
# resources not existing. We let the caller determine if
# they want 404s raised for their invocation.
if h.resp.status == 404 and not raise_404:
return None
else:
raise
except Exception:
raise
@staticmethod
def execute_api_client_operation_req(orig_req, op_resp, client,
operation_timeout=180, poll_interval=5):
"""
Poll an operation for a result.
"""
parsed_url = GCPUtils.parse_gcp_url(orig_req.uri)
project_id = parsed_url['project']
resource_name = GCPUtils.get_gcp_resource_from_methodId(
orig_req.methodId)
resource = GCPUtils.build_resource_from_name(client, resource_name)
start_time = time.time()
complete = False
attempts = 1
while not complete:
if start_time + operation_timeout >= time.time():
op_req = client.globalOperations().get(
project=project_id, operation=op_resp['name'])
op_resp = op_req.execute()
if op_resp['status'] != 'DONE':
time.sleep(poll_interval)
attempts += 1
else:
complete = True
if op_resp['operationType'] == 'delete':
# don't wait for the delete
return True
elif op_resp['operationType'] in ['insert', 'update', 'patch']:
# TODO(supertom): Isolate 'build-new-request' stuff.
resource_name_singular = GCPUtils.get_entity_name_from_resource_name(
resource_name)
if op_resp['operationType'] == 'insert' or 'entity_name' not in parsed_url:
parsed_url['entity_name'] = GCPUtils.parse_gcp_url(op_resp['targetLink'])[
'entity_name']
args = {'project': project_id,
resource_name_singular: parsed_url['entity_name']}
new_req = resource.get(**args)
resp = new_req.execute()
return resp
else:
# assuming multiple entities, do a list call.
new_req = resource.list(project=project_id)
resp = new_req.execute()
return resp
else:
# operation didn't complete on time.
raise GCPOperationTimeoutError("Operation timed out: %s" % (
op_resp['targetLink']))
@staticmethod
def build_resource_from_name(client, resource_name):
try:
method = getattr(client, resource_name)
return method()
except AttributeError:
raise NotImplementedError('%s is not an attribute of %s' % (resource_name,
client))
@staticmethod
def get_gcp_resource_from_methodId(methodId):
try:
parts = methodId.split('.')
if len(parts) != 3:
return None
else:
return parts[1]
except AttributeError:
return None
@staticmethod
def get_entity_name_from_resource_name(resource_name):
if not resource_name:
return None
try:
# Chop off global or region prefixes
if resource_name.startswith('global'):
resource_name = resource_name.replace('global', '')
elif resource_name.startswith('regional'):
resource_name = resource_name.replace('region', '')
# ensure we have a lower case first letter
resource_name = resource_name[0].lower() + resource_name[1:]
if resource_name[-3:] == 'ies':
return resource_name.replace(
resource_name[-3:], 'y')
if resource_name[-1] == 's':
return resource_name[:-1]
return resource_name
except AttributeError:
return None
@staticmethod
def parse_gcp_url(url):
"""
Parse GCP urls and return dict of parts.
Supported URL structures:
/SERVICE/VERSION/'projects'/PROJECT_ID/RESOURCE
/SERVICE/VERSION/'projects'/PROJECT_ID/RESOURCE/ENTITY_NAME
/SERVICE/VERSION/'projects'/PROJECT_ID/RESOURCE/ENTITY_NAME/METHOD_NAME
/SERVICE/VERSION/'projects'/PROJECT_ID/'global'/RESOURCE
/SERVICE/VERSION/'projects'/PROJECT_ID/'global'/RESOURCE/ENTITY_NAME
/SERVICE/VERSION/'projects'/PROJECT_ID/'global'/RESOURCE/ENTITY_NAME/METHOD_NAME
/SERVICE/VERSION/'projects'/PROJECT_ID/LOCATION_TYPE/LOCATION/RESOURCE
/SERVICE/VERSION/'projects'/PROJECT_ID/LOCATION_TYPE/LOCATION/RESOURCE/ENTITY_NAME
/SERVICE/VERSION/'projects'/PROJECT_ID/LOCATION_TYPE/LOCATION/RESOURCE/ENTITY_NAME/METHOD_NAME
:param url: GCP-generated URL, such as a selflink or resource location.
:type url: ``str``
:return: dictionary of parts. Includes stanard components of urlparse, plus
GCP-specific 'service', 'api_version', 'project' and
'resource_name' keys. Optionally, 'zone', 'region', 'entity_name'
and 'method_name', if applicable.
:rtype: ``dict``
"""
p = urlparse.urlparse(url)
if not p:
return None
else:
# we add extra items such as
# zone, region and resource_name
url_parts = {}
url_parts['scheme'] = p.scheme
url_parts['host'] = p.netloc
url_parts['path'] = p.path
if p.path.find('/') == 0:
url_parts['path'] = p.path[1:]
url_parts['params'] = p.params
url_parts['fragment'] = p.fragment
url_parts['query'] = p.query
url_parts['project'] = None
url_parts['service'] = None
url_parts['api_version'] = None
path_parts = url_parts['path'].split('/')
url_parts['service'] = path_parts[0]
url_parts['api_version'] = path_parts[1]
if path_parts[2] == 'projects':
url_parts['project'] = path_parts[3]
else:
# invalid URL
raise GCPInvalidURLError('unable to parse: %s' % url)
if 'global' in path_parts:
url_parts['global'] = True
idx = path_parts.index('global')
if len(path_parts) - idx == 4:
# we have a resource, entity and method_name
url_parts['resource_name'] = path_parts[idx + 1]
url_parts['entity_name'] = path_parts[idx + 2]
url_parts['method_name'] = path_parts[idx + 3]
if len(path_parts) - idx == 3:
# we have a resource and entity
url_parts['resource_name'] = path_parts[idx + 1]
url_parts['entity_name'] = path_parts[idx + 2]
if len(path_parts) - idx == 2:
url_parts['resource_name'] = path_parts[idx + 1]
if len(path_parts) - idx < 2:
# invalid URL
raise GCPInvalidURLError('unable to parse: %s' % url)
elif 'regions' in path_parts or 'zones' in path_parts:
idx = -1
if 'regions' in path_parts:
idx = path_parts.index('regions')
url_parts['region'] = path_parts[idx + 1]
else:
idx = path_parts.index('zones')
url_parts['zone'] = path_parts[idx + 1]
if len(path_parts) - idx == 5:
# we have a resource, entity and method_name
url_parts['resource_name'] = path_parts[idx + 2]
url_parts['entity_name'] = path_parts[idx + 3]
url_parts['method_name'] = path_parts[idx + 4]
if len(path_parts) - idx == 4:
# we have a resource and entity
url_parts['resource_name'] = path_parts[idx + 2]
url_parts['entity_name'] = path_parts[idx + 3]
if len(path_parts) - idx == 3:
url_parts['resource_name'] = path_parts[idx + 2]
if len(path_parts) - idx < 3:
# invalid URL
raise GCPInvalidURLError('unable to parse: %s' % url)
else:
# no location in URL.
idx = path_parts.index('projects')
if len(path_parts) - idx == 5:
# we have a resource, entity and method_name
url_parts['resource_name'] = path_parts[idx + 2]
url_parts['entity_name'] = path_parts[idx + 3]
url_parts['method_name'] = path_parts[idx + 4]
if len(path_parts) - idx == 4:
# we have a resource and entity
url_parts['resource_name'] = path_parts[idx + 2]
url_parts['entity_name'] = path_parts[idx + 3]
if len(path_parts) - idx == 3:
url_parts['resource_name'] = path_parts[idx + 2]
if len(path_parts) - idx < 3:
# invalid URL
raise GCPInvalidURLError('unable to parse: %s' % url)
return url_parts
@staticmethod
def build_googleapi_url(project, api_version='v1', service='compute'):
return 'https://www.googleapis.com/%s/%s/projects/%s' % (service, api_version, project)
@staticmethod
def filter_gcp_fields(params, excluded_fields=None):
new_params = {}
if not excluded_fields:
excluded_fields = ['creationTimestamp', 'id', 'kind',
'selfLink', 'fingerprint', 'description']
if isinstance(params, list):
new_params = [GCPUtils.filter_gcp_fields(
x, excluded_fields) for x in params]
elif isinstance(params, dict):
for k in params.keys():
if k not in excluded_fields:
new_params[k] = GCPUtils.filter_gcp_fields(
params[k], excluded_fields)
else:
new_params = params
return new_params
@staticmethod
def are_params_equal(p1, p2):
"""
Check if two params dicts are equal.
TODO(supertom): need a way to filter out URLs, or they need to be built
"""
filtered_p1 = GCPUtils.filter_gcp_fields(p1)
filtered_p2 = GCPUtils.filter_gcp_fields(p2)
if filtered_p1 != filtered_p2:
return False
return True
class GCPError(Exception):
pass
class GCPOperationTimeoutError(GCPError):
pass
class GCPInvalidURLError(GCPError):
pass

@ -1,104 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
# Copyright: (c) 2018, Marcus Watkins <marwatk@marcuswatkins.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import
import json
from distutils.version import StrictVersion
from ansible.module_utils.basic import missing_required_lib
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_native
try:
from urllib import quote_plus # Python 2.X
except ImportError:
from urllib.parse import quote_plus # Python 3+
import traceback
GITLAB_IMP_ERR = None
try:
import gitlab
HAS_GITLAB_PACKAGE = True
except Exception:
GITLAB_IMP_ERR = traceback.format_exc()
HAS_GITLAB_PACKAGE = False
def request(module, api_url, project, path, access_token, private_token, rawdata='', method='GET'):
url = "%s/v4/projects/%s%s" % (api_url, quote_plus(project), path)
headers = {}
if access_token:
headers['Authorization'] = "Bearer %s" % access_token
else:
headers['Private-Token'] = private_token
headers['Accept'] = "application/json"
headers['Content-Type'] = "application/json"
response, info = fetch_url(module=module, url=url, headers=headers, data=rawdata, method=method)
status = info['status']
content = ""
if response:
content = response.read()
if status == 204:
return True, content
elif status == 200 or status == 201:
return True, json.loads(content)
else:
return False, str(status) + ": " + content
def findProject(gitlab_instance, identifier):
try:
project = gitlab_instance.projects.get(identifier)
except Exception as e:
current_user = gitlab_instance.user
try:
project = gitlab_instance.projects.get(current_user.username + '/' + identifier)
except Exception as e:
return None
return project
def findGroup(gitlab_instance, identifier):
try:
project = gitlab_instance.groups.get(identifier)
except Exception as e:
return None
return project
def gitlabAuthentication(module):
gitlab_url = module.params['api_url']
validate_certs = module.params['validate_certs']
gitlab_user = module.params['api_username']
gitlab_password = module.params['api_password']
gitlab_token = module.params['api_token']
if not HAS_GITLAB_PACKAGE:
module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
try:
# python-gitlab library remove support for username/password authentication since 1.13.0
# Changelog : https://github.com/python-gitlab/python-gitlab/releases/tag/v1.13.0
# This condition allow to still support older version of the python-gitlab library
if StrictVersion(gitlab.__version__) < StrictVersion("1.13.0"):
gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, email=gitlab_user, password=gitlab_password,
private_token=gitlab_token, api_version=4)
else:
gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, private_token=gitlab_token, api_version=4)
gitlab_instance.auth()
except (gitlab.exceptions.GitlabAuthenticationError, gitlab.exceptions.GitlabGetError) as e:
module.fail_json(msg="Failed to connect to GitLab server: %s" % to_native(e))
except (gitlab.exceptions.GitlabHttpError) as e:
module.fail_json(msg="Failed to connect to GitLab server: %s. \
GitLab remove Session API now that private tokens are removed from user API endpoints since version 10.2." % to_native(e))
return gitlab_instance

@ -1,41 +0,0 @@
# Copyright: (c) 2018, Ansible Project
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import traceback
from ansible.module_utils.basic import env_fallback, missing_required_lib
HAS_HEROKU = False
HEROKU_IMP_ERR = None
try:
import heroku3
HAS_HEROKU = True
except ImportError:
HEROKU_IMP_ERR = traceback.format_exc()
class HerokuHelper():
def __init__(self, module):
self.module = module
self.check_lib()
self.api_key = module.params["api_key"]
def check_lib(self):
if not HAS_HEROKU:
self.module.fail_json(msg=missing_required_lib('heroku3'), exception=HEROKU_IMP_ERR)
@staticmethod
def heroku_argument_spec():
return dict(
api_key=dict(fallback=(env_fallback, ['HEROKU_API_KEY', 'TF_VAR_HEROKU_API_KEY']), type='str', no_log=True))
def get_heroku_client(self):
client = heroku3.from_key(self.api_key)
if not client.is_authenticated:
self.module.fail_json(msg='Heroku authentication failure, please check your API Key')
return client

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save