Pure modules: rename _facts with ansible_facts result to _info (#60641)

pull/60699/head
Simon Dodsley 5 years ago committed by Felix Fontein
parent c4d841f848
commit 491a47c7c5

@ -0,0 +1,3 @@
minor_changes:
- The ``purefa_facts`` module has been deprecated. Use ``purefa_info`` instead.
- The ``purefb_facts`` module has been deprecated. Use ``purefb_info`` instead.

@ -78,16 +78,20 @@ The following modules will be removed in Ansible 2.13. Please update update your
* lambda_facts use :ref:`lambda_info <lambda_info_module>` instead.
* nxos_interface use :ref:`nxos_interfaces <nxos_interfaces_module>` instead.
* nxos_linkagg use :ref:`nxos_lag_interfaces <nxos_lag_interfaces_module>` instead.
* nxos_vlan use :ref:`nxos_vlans <nxos_vlans_module>` instead.
* purefa_facts use :ref:`purefa_info <purefa_info_module>` instead.
* purefb_facts use :ref:`purefb_info <purefb_info_module>` instead.
* vyos_interface use :ref:`vyos_interfaces <vyos_interfaces_module>` instead.
* vyos_l3_interface use :ref:`vyos_l3_interfaces <vyos_l3_interfaces_module>` instead.
* nxos_vlan use :ref:`nxos_vlans <nxos_vlans_module>` instead.
* nxos_interface use :ref:`nxos_interfaces <nxos_interfaces_module>` instead.
* vyos_linkagg use :ref:`vyos_lag_interfaces <vyos_lag_interfaces_module>` instead.
The following functionality will be removed in Ansible 2.12. Please update update your playbooks accordingly.

@ -8,13 +8,17 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: purefa_facts
version_added: '2.6'
deprecated:
removed_in: '2.13'
why: Deprecated in favor of C(_info) module.
alternative: Use M(purefa_info) instead.
short_description: Collect facts from Pure Storage FlashArray
description:
- Collect facts information from a Pure Storage Flasharray running the

@ -9,13 +9,17 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: purefb_facts
version_added: '2.7'
deprecated:
removed_in: '2.13'
why: Deprecated in favor of C(_info) module.
alternative: Use M(purefb_info) instead.
short_description: Collect facts from Pure Storage FlashBlade
description:
- Collect facts information from a Pure Storage FlashBlade running the

@ -0,0 +1,959 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2019, Simon Dodsley (simon@purestorage.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: purefa_info
version_added: '2.9'
short_description: Collect information from Pure Storage FlashArray
description:
- Collect information from a Pure Storage Flasharray running the
Purity//FA operating system. By default, the module will collect basic
information including hosts, host groups, protection
groups and volume counts. Additional information can be collected
based on the configured set of arguements.
author:
- Pure Storage ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
options:
gather_subset:
description:
- When supplied, this argument will define the information to be collected.
Possible values for this include all, minimum, config, performance,
capacity, network, subnet, interfaces, hgroups, pgroups, hosts,
admins, volumes, snapshots, pods, vgroups, offload, apps and arrays.
type: list
required: false
default: minimum
extends_documentation_fragment:
- purestorage.fa
'''
EXAMPLES = r'''
- name: collect default set of information
purefa_info:
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
register: array_info
- name: show default information
debug:
msg: "{{ array_info['purefa_info']['default'] }}"
- name: collect configuration and capacity information
purefa_info:
gather_subset:
- config
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
register: array_info
- name: show configuration information
debug:
msg: "{{ array_info['purefa_info']['config'] }}"
- name: collect all information
purefa_info:
gather_subset:
- all
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
- name: show all information
debug:
msg: "{{ array_info['purefa_info'] }}"
'''
RETURN = r'''
purefa_info:
description: Returns the information collected from the FlashArray
returned: always
type: complex
contains:
"admins": {
"pureuser": {
"role": "array_admin",
"type": "local"
}
}
"apps": {
"offload": {
"description": "Snapshot offload to NFS or Amazon S3",
"status": "healthy",
"version": "5.2.1"
}
}
"arrays": {}
"capacity": {
"data_reduction": 11.664774599686346,
"free_space": 6995782867042,
"provisioned_space": 442391871488,
"shared_space": 3070918120,
"snapshot_space": 284597118,
"system_space": 0,
"thin_provisioning": 0.8201773449669771,
"total_capacity": 7002920315199,
"total_reduction": 64.86821472825108,
"volume_space": 3781932919
}
"config": {
"directory_service": {
"base_dn": null,
"bind_password": null,
"bind_user": null,
"check_peer": false,
"enabled": false,
"uri": [],
"user_login_attribute": null,
"user_object_class": null
},
"directory_service_roles": {
"array_admin": {
"group": null,
"group_base": null
},
"ops_admin": {
"group": null,
"group_base": null
},
"readonly": {
"group": null,
"group_base": null
},
"storage_admin": {
"group": null,
"group_base": null
}
},
"dns": {
"domain": "acme.com",
"nameservers": [
"8.8.8.8",
"8.8.4.4"
]
},
"global_admin": {
"lockout_duration": null,
"max_login_attempts": null,
"min_password_length": 1,
"single_sign_on_enabled": false
},
"idle_timeout": 0,
"ntp": [
"prod-ntp1.puretec.purestorage.com"
],
"phonehome": "enabled",
"proxy": "",
"relayhost": "smtp.puretec.purestorage.com",
"scsi_timeout": 60,
"senderdomain": "purestorage.com",
"smtp": [
{
"enabled": true,
"name": "flasharray-alerts@purestorage.com"
}
],
"snmp": [
{
"auth_passphrase": null,
"auth_protocol": null,
"community": null,
"host": "localhost",
"name": "localhost",
"notification": null,
"privacy_passphrase": null,
"privacy_protocol": null,
"user": null,
"version": "v2c"
},
{
"auth_passphrase": null,
"auth_protocol": null,
"community": "****",
"host": "10.21.23.34",
"name": "manager1",
"notification": "trap",
"privacy_passphrase": null,
"privacy_protocol": null,
"user": null,
"version": "v2c"
}
],
"ssl_certs": {
"common_name": null,
"country": null,
"email": null,
"issued_by": "",
"issued_to": "",
"key_size": 2048,
"locality": null,
"name": "management",
"organization": "Pure Storage, Inc.",
"organizational_unit": "Pure Storage, Inc.",
"state": null,
"status": "self-signed",
"valid_from": 1502492946000,
"valid_to": 1817852946000
},
"syslog": [
"udp://prod-ntp1.puretec.purestorage.com:333",
"udp://prod-ntp2.puretec.purestorage.com:333"
]
}
"default": {
"admins": 1,
"array_model": "FA-405",
"array_name": "array",
"connected_arrays": 0,
"connection_key": "c6033033-fe69-2515-a9e8-966bb7fe4b40",
"hostgroups": 0,
"hosts": 15,
"pods": 1,
"protection_groups": 1,
"purity_version": "5.2.1",
"snapshots": 2,
"volume_groups": 1
}
"hgroups": {}
"hosts": {
"@offload": {
"hgroup": null,
"iqn": [],
"nqn": [],
"personality": null,
"preferred_array": [],
"target_port": [],
"wwn": []
},
"docker-host": {
"hgroup": null,
"iqn": [
"iqn.1994-05.com.redhat:d97adf78472"
],
"nqn": [],
"personality": null,
"preferred_array": [],
"target_port": [
"CT1.ETH5",
"CT0.ETH5",
"CT0.ETH4",
"CT1.ETH4"
],
"wwn": []
}
}
"interfaces": {
"CT0.ETH4": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682",
"CT0.ETH5": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682",
"CT1.ETH4": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682",
"CT1.ETH5": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682"
}
"network": {
"@offload.data0": {
"address": "10.21.200.222",
"gateway": "10.21.200.1",
"hwaddr": "52:54:30:02:b9:4e",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"app"
],
"speed": 10000000000
},
"ct0.eth0": {
"address": "10.21.200.211",
"gateway": "10.21.200.1",
"hwaddr": "ec:f4:bb:c8:8a:04",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"management"
],
"speed": 1000000000
},
"ct0.eth2": {
"address": "10.21.200.218",
"gateway": null,
"hwaddr": "ec:f4:bb:c8:8a:00",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"replication"
],
"speed": 10000000000
},
"ct0.eth4": {
"address": "10.21.200.214",
"gateway": null,
"hwaddr": "90:e2:ba:83:79:0c",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"iscsi"
],
"speed": 10000000000
},
"ct1.eth0": {
"address": "10.21.200.212",
"gateway": "10.21.200.1",
"hwaddr": "ec:f4:bb:e4:c6:3c",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"management"
],
"speed": 1000000000
},
"ct1.eth2": {
"address": "10.21.200.220",
"gateway": null,
"hwaddr": "ec:f4:bb:e4:c6:38",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"replication"
],
"speed": 10000000000
},
"ct1.eth4": {
"address": "10.21.200.216",
"gateway": null,
"hwaddr": "90:e2:ba:8b:b1:8c",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"iscsi"
],
"speed": 10000000000
},
"vir0": {
"address": "10.21.200.210",
"gateway": "10.21.200.1",
"hwaddr": "fe:ba:e9:e7:6b:0f",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"management"
],
"speed": 1000000000
}
}
"nfs_offload": {}
"performance": {
"input_per_sec": 0,
"local_queue_usec_per_op": 0,
"output_per_sec": 0,
"qos_rate_limit_usec_per_read_op": 0,
"qos_rate_limit_usec_per_write_op": 0,
"queue_depth": 0,
"queue_usec_per_read_op": 0,
"queue_usec_per_write_op": 0,
"reads_per_sec": 0,
"san_usec_per_read_op": 0,
"san_usec_per_write_op": 0,
"time": "2019-08-14T21:33:51Z",
"usec_per_read_op": 0,
"usec_per_write_op": 0,
"writes_per_sec": 0
}
"pgroups": {
"simon": {
"hgroups": null,
"hosts": null,
"source": "docker-host",
"targets": null,
"volumes": null
}
}
"pods": {
"test": {
"arrays": [
{
"array_id": "043be47c-1233-4399-b9d6-8fe38727dd9d",
"mediator_status": "online",
"name": "array2",
"status": "online"
}
],
"source": null
}
}
"s3_offload": {
"s3-offload": {
"access_key_id": "AKIAILNVEPWZTV4FGWZQ",
"bucket": "offload-bucket",
"protocol": "s3",
"status": "connected"
}
}
"snapshots": {
"@offload_boot.1": {
"created": "2019-03-14T15:29:20Z",
"size": 68719476736,
"source": "@offload_boot"
}
}
"subnet": {}
"vgroups": {
"test": {
"volumes": [
"test/test",
"test/test1"
]
}
}
"volumes": {
"@offload_boot": {
"bandwidth": null,
"hosts": [
[
"@offload",
1
]
],
"serial": "43BE47C12334399B00013959",
"size": 68719476736,
"source": null
},
"docker-store": {
"bandwidth": null,
"hosts": [
[
"docker-host",
1
]
],
"serial": "43BE47C12334399B00011418",
"size": 21474836480,
"source": null
}
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pure import get_system, purefa_argument_spec
S3_REQUIRED_API_VERSION = '1.16'
LATENCY_REQUIRED_API_VERSION = '1.16'
AC_REQUIRED_API_VERSION = '1.14'
CAP_REQUIRED_API_VERSION = '1.6'
SAN_REQUIRED_API_VERSION = '1.10'
NVME_API_VERSION = '1.16'
PREFERRED_API_VERSION = '1.15'
CONN_STATUS_API_VERSION = '1.17'
def generate_default_dict(array):
default_info = {}
defaults = array.get()
api_version = array._list_available_rest_versions()
if AC_REQUIRED_API_VERSION in api_version:
default_info['volume_groups'] = len(array.list_vgroups())
default_info['connected_arrays'] = len(array.list_array_connections())
default_info['pods'] = len(array.list_pods())
default_info['connection_key'] = array.get(connection_key=True)['connection_key']
hosts = array.list_hosts()
admins = array.list_admins()
snaps = array.list_volumes(snap=True, pending=True)
pgroups = array.list_pgroups(pending=True)
hgroups = array.list_hgroups()
# Old FA arrays only report model from the primary controller
ct0_model = array.get_hardware('CT0')['model']
if ct0_model:
model = ct0_model
else:
ct1_model = array.get_hardware('CT1')['model']
model = ct1_model
default_info['array_model'] = model
default_info['array_name'] = defaults['array_name']
default_info['purity_version'] = defaults['version']
default_info['hosts'] = len(hosts)
default_info['snapshots'] = len(snaps)
default_info['protection_groups'] = len(pgroups)
default_info['hostgroups'] = len(hgroups)
default_info['admins'] = len(admins)
return default_info
def generate_perf_dict(array):
perf_info = {}
api_version = array._list_available_rest_versions()
if LATENCY_REQUIRED_API_VERSION in api_version:
latency_info = array.get(action='monitor', latency=True)[0]
perf_info = array.get(action='monitor')[0]
# IOPS
perf_info['writes_per_sec'] = perf_info['writes_per_sec']
perf_info['reads_per_sec'] = perf_info['reads_per_sec']
# Bandwidth
perf_info['input_per_sec'] = perf_info['input_per_sec']
perf_info['output_per_sec'] = perf_info['output_per_sec']
# Latency
if LATENCY_REQUIRED_API_VERSION in api_version:
perf_info['san_usec_per_read_op'] = latency_info['san_usec_per_read_op']
perf_info['san_usec_per_write_op'] = latency_info['san_usec_per_write_op']
perf_info['queue_usec_per_read_op'] = latency_info['queue_usec_per_read_op']
perf_info['queue_usec_per_write_op'] = latency_info['queue_usec_per_write_op']
perf_info['qos_rate_limit_usec_per_read_op'] = latency_info['qos_rate_limit_usec_per_read_op']
perf_info['qos_rate_limit_usec_per_write_op'] = latency_info['qos_rate_limit_usec_per_write_op']
perf_info['local_queue_usec_per_op'] = perf_info['local_queue_usec_per_op']
perf_info['usec_per_read_op'] = perf_info['usec_per_read_op']
perf_info['usec_per_write_op'] = perf_info['usec_per_write_op']
perf_info['queue_depth'] = perf_info['queue_depth']
return perf_info
def generate_config_dict(array):
config_info = {}
api_version = array._list_available_rest_versions()
# DNS
config_info['dns'] = array.get_dns()
# SMTP
config_info['smtp'] = array.list_alert_recipients()
# SMNP
config_info['snmp'] = array.list_snmp_managers()
config_info['snmp_v3_engine_id'] = array.get_snmp_engine_id()['engine_id']
# DS
config_info['directory_service'] = array.get_directory_service()
if S3_REQUIRED_API_VERSION in api_version:
config_info['directory_service_roles'] = {}
roles = array.list_directory_service_roles()
for role in range(0, len(roles)):
role_name = roles[role]['name']
config_info['directory_service_roles'][role_name] = {
'group': roles[role]['group'],
'group_base': roles[role]['group_base'],
}
else:
config_info['directory_service'].update(array.get_directory_service(groups=True))
# NTP
config_info['ntp'] = array.get(ntpserver=True)['ntpserver']
# SYSLOG
config_info['syslog'] = array.get(syslogserver=True)['syslogserver']
# Phonehome
config_info['phonehome'] = array.get(phonehome=True)['phonehome']
# Proxy
config_info['proxy'] = array.get(proxy=True)['proxy']
# Relay Host
config_info['relayhost'] = array.get(relayhost=True)['relayhost']
# Sender Domain
config_info['senderdomain'] = array.get(senderdomain=True)['senderdomain']
# SYSLOG
config_info['syslog'] = array.get(syslogserver=True)['syslogserver']
# Idle Timeout
config_info['idle_timeout'] = array.get(idle_timeout=True)['idle_timeout']
# SCSI Timeout
config_info['scsi_timeout'] = array.get(scsi_timeout=True)['scsi_timeout']
# SSL
config_info['ssl_certs'] = array.get_certificate()
# Global Admin settings
if S3_REQUIRED_API_VERSION in api_version:
config_info['global_admin'] = array.get_global_admin_attributes()
return config_info
def generate_admin_dict(array):
admin_info = {}
admins = array.list_admins()
for admin in range(0, len(admins)):
admin_name = admins[admin]['name']
admin_info[admin_name] = {
'type': admins[admin]['type'],
'role': admins[admin]['role'],
}
return admin_info
def generate_subnet_dict(array):
sub_info = {}
subnets = array.list_subnets()
for sub in range(0, len(subnets)):
sub_name = subnets[sub]['name']
if subnets[sub]['enabled']:
sub_info[sub_name] = {
'gateway': subnets[sub]['gateway'],
'mtu': subnets[sub]['mtu'],
'vlan': subnets[sub]['vlan'],
'prefix': subnets[sub]['prefix'],
'interfaces': subnets[sub]['interfaces'],
'services': subnets[sub]['services'],
}
return sub_info
def generate_network_dict(array):
net_info = {}
ports = array.list_network_interfaces()
for port in range(0, len(ports)):
int_name = ports[port]['name']
net_info[int_name] = {
'hwaddr': ports[port]['hwaddr'],
'mtu': ports[port]['mtu'],
'enabled': ports[port]['enabled'],
'speed': ports[port]['speed'],
'address': ports[port]['address'],
'slaves': ports[port]['slaves'],
'services': ports[port]['services'],
'gateway': ports[port]['gateway'],
'netmask': ports[port]['netmask'],
}
if ports[port]['subnet']:
subnets = array.get_subnet(ports[port]['subnet'])
if subnets['enabled']:
net_info[int_name]['subnet'] = {
'name': subnets['name'],
'prefix': subnets['prefix'],
'vlan': subnets['vlan'],
}
return net_info
def generate_capacity_dict(array):
capacity_info = {}
api_version = array._list_available_rest_versions()
if CAP_REQUIRED_API_VERSION in api_version:
volumes = array.list_volumes(pending=True)
capacity_info['provisioned_space'] = sum(item['size'] for item in volumes)
capacity = array.get(space=True)
total_capacity = capacity[0]['capacity']
used_space = capacity[0]["total"]
capacity_info['free_space'] = total_capacity - used_space
capacity_info['total_capacity'] = total_capacity
capacity_info['data_reduction'] = capacity[0]['data_reduction']
capacity_info['system_space'] = capacity[0]['system']
capacity_info['volume_space'] = capacity[0]['volumes']
capacity_info['shared_space'] = capacity[0]['shared_space']
capacity_info['snapshot_space'] = capacity[0]['snapshots']
capacity_info['thin_provisioning'] = capacity[0]['thin_provisioning']
capacity_info['total_reduction'] = capacity[0]['total_reduction']
return capacity_info
def generate_snap_dict(array):
snap_info = {}
snaps = array.list_volumes(snap=True)
for snap in range(0, len(snaps)):
snapshot = snaps[snap]['name']
snap_info[snapshot] = {
'size': snaps[snap]['size'],
'source': snaps[snap]['source'],
'created': snaps[snap]['created'],
}
return snap_info
def generate_vol_dict(array):
volume_info = {}
vols = array.list_volumes()
for vol in range(0, len(vols)):
volume = vols[vol]['name']
volume_info[volume] = {
'source': vols[vol]['source'],
'size': vols[vol]['size'],
'serial': vols[vol]['serial'],
'hosts': [],
'bandwidth': ""
}
api_version = array._list_available_rest_versions()
if AC_REQUIRED_API_VERSION in api_version:
qvols = array.list_volumes(qos=True)
for qvol in range(0, len(qvols)):
volume = qvols[qvol]['name']
qos = qvols[qvol]['bandwidth_limit']
volume_info[volume]['bandwidth'] = qos
vvols = array.list_volumes(protocol_endpoint=True)
for vvol in range(0, len(vvols)):
volume = vvols[vvol]['name']
volume_info[volume] = {
'source': vvols[vvol]['source'],
'serial': vvols[vvol]['serial'],
'hosts': []
}
cvols = array.list_volumes(connect=True)
for cvol in range(0, len(cvols)):
volume = cvols[cvol]['name']
voldict = [cvols[cvol]['host'], cvols[cvol]['lun']]
volume_info[volume]['hosts'].append(voldict)
return volume_info
def generate_host_dict(array):
api_version = array._list_available_rest_versions()
host_info = {}
hosts = array.list_hosts()
for host in range(0, len(hosts)):
hostname = hosts[host]['name']
tports = []
host_all_info = array.get_host(hostname, all=True)
if host_all_info:
tports = host_all_info[0]['target_port']
host_info[hostname] = {
'hgroup': hosts[host]['hgroup'],
'iqn': hosts[host]['iqn'],
'wwn': hosts[host]['wwn'],
'personality': array.get_host(hostname,
personality=True)['personality'],
'target_port': tports
}
if NVME_API_VERSION in api_version:
host_info[hostname]['nqn'] = hosts[host]['nqn']
if PREFERRED_API_VERSION in api_version:
hosts = array.list_hosts(preferred_array=True)
for host in range(0, len(hosts)):
hostname = hosts[host]['name']
host_info[hostname]['preferred_array'] = hosts[host]['preferred_array']
return host_info
def generate_pgroups_dict(array):
pgroups_info = {}
pgroups = array.list_pgroups()
for pgroup in range(0, len(pgroups)):
protgroup = pgroups[pgroup]['name']
pgroups_info[protgroup] = {
'hgroups': pgroups[pgroup]['hgroups'],
'hosts': pgroups[pgroup]['hosts'],
'source': pgroups[pgroup]['source'],
'targets': pgroups[pgroup]['targets'],
'volumes': pgroups[pgroup]['volumes'],
}
prot_sched = array.get_pgroup(protgroup, schedule=True)
prot_reten = array.get_pgroup(protgroup, retention=True)
if prot_sched['snap_enabled'] or prot_sched['replicate_enabled']:
pgroups_info[protgroup]['snap_freqyency'] = prot_sched['snap_frequency']
pgroups_info[protgroup]['replicate_freqyency'] = prot_sched['replicate_frequency']
pgroups_info[protgroup]['snap_enabled'] = prot_sched['snap_enabled']
pgroups_info[protgroup]['replicate_enabled'] = prot_sched['replicate_enabled']
pgroups_info[protgroup]['snap_at'] = prot_sched['snap_at']
pgroups_info[protgroup]['replicate_at'] = prot_sched['replicate_at']
pgroups_info[protgroup]['replicate_blackout'] = prot_sched['replicate_blackout']
pgroups_info[protgroup]['per_day'] = prot_reten['per_day']
pgroups_info[protgroup]['target_per_day'] = prot_reten['target_per_day']
pgroups_info[protgroup]['target_days'] = prot_reten['target_days']
pgroups_info[protgroup]['days'] = prot_reten['days']
pgroups_info[protgroup]['all_for'] = prot_reten['all_for']
pgroups_info[protgroup]['target_all_for'] = prot_reten['target_all_for']
if ":" in protgroup:
snap_transfers = array.get_pgroup(protgroup, snap=True, transfer=True)
pgroups_info[protgroup]['snaps'] = {}
for snap_transfer in range(0, len(snap_transfers)):
snap = snap_transfers[snap_transfer]['name']
pgroups_info[protgroup]['snaps'][snap] = {
'created': snap_transfers[snap_transfer]['created'],
'started': snap_transfers[snap_transfer]['started'],
'completed': snap_transfers[snap_transfer]['completed'],
'physical_bytes_written': snap_transfers[snap_transfer]['physical_bytes_written'],
'data_transferred': snap_transfers[snap_transfer]['data_transferred'],
'progress': snap_transfers[snap_transfer]['progress'],
}
return pgroups_info
def generate_pods_dict(array):
pods_info = {}
api_version = array._list_available_rest_versions()
if AC_REQUIRED_API_VERSION in api_version:
pods = array.list_pods()
for pod in range(0, len(pods)):
acpod = pods[pod]['name']
pods_info[acpod] = {
'source': pods[pod]['source'],
'arrays': pods[pod]['arrays'],
}
return pods_info
def generate_conn_array_dict(array):
conn_array_info = {}
api_version = array._list_available_rest_versions()
if CONN_STATUS_API_VERSION in api_version:
carrays = array.list_connected_arrays()
for carray in range(0, len(carrays)):
arrayname = carrays[carray]['array_name']
conn_array_info[arrayname] = {
'array_id': carrays[carray]['id'],
'throtled': carrays[carray]['throtled'],
'version': carrays[carray]['version'],
'type': carrays[carray]['type'],
'mgmt_ip': carrays[carray]['management_address'],
'repl_ip': carrays[carray]['replication_address'],
}
if CONN_STATUS_API_VERSION in api_version:
conn_array_info[arrayname]['status'] = carrays[carray]['status']
return conn_array_info
def generate_apps_dict(array):
apps_info = {}
api_version = array._list_available_rest_versions()
if SAN_REQUIRED_API_VERSION in api_version:
apps = array.list_apps()
for app in range(0, len(apps)):
appname = apps[app]['name']
apps_info[appname] = {
'version': apps[app]['version'],
'status': apps[app]['status'],
'description': apps[app]['description'],
}
return apps_info
def generate_vgroups_dict(array):
vgroups_info = {}
api_version = array._list_available_rest_versions()
if AC_REQUIRED_API_VERSION in api_version:
vgroups = array.list_vgroups()
for vgroup in range(0, len(vgroups)):
virtgroup = vgroups[vgroup]['name']
vgroups_info[virtgroup] = {
'volumes': vgroups[vgroup]['volumes'],
}
return vgroups_info
def generate_nfs_offload_dict(array):
offload_info = {}
api_version = array._list_available_rest_versions()
if AC_REQUIRED_API_VERSION in api_version:
offload = array.list_nfs_offload()
for target in range(0, len(offload)):
offloadt = offload[target]['name']
offload_info[offloadt] = {
'status': offload[target]['status'],
'mount_point': offload[target]['mount_point'],
'protocol': offload[target]['protocol'],
'mount_options': offload[target]['mount_options'],
'address': offload[target]['address'],
}
return offload_info
def generate_s3_offload_dict(array):
offload_info = {}
api_version = array._list_available_rest_versions()
if S3_REQUIRED_API_VERSION in api_version:
offload = array.list_s3_offload()
for target in range(0, len(offload)):
offloadt = offload[target]['name']
offload_info[offloadt] = {
'status': offload[target]['status'],
'bucket': offload[target]['bucket'],
'protocol': offload[target]['protocol'],
'access_key_id': offload[target]['access_key_id'],
}
return offload_info
def generate_hgroups_dict(array):
hgroups_info = {}
hgroups = array.list_hgroups()
for hgroup in range(0, len(hgroups)):
hostgroup = hgroups[hgroup]['name']
hgroups_info[hostgroup] = {
'hosts': hgroups[hgroup]['hosts'],
'pgs': [],
'vols': [],
}
pghgroups = array.list_hgroups(protect=True)
for pghg in range(0, len(pghgroups)):
pgname = pghgroups[pghg]['name']
hgroups_info[pgname]['pgs'].append(pghgroups[pghg]['protection_group'])
volhgroups = array.list_hgroups(connect=True)
for pgvol in range(0, len(volhgroups)):
pgname = volhgroups[pgvol]['name']
volpgdict = [volhgroups[pgvol]['vol'], volhgroups[pgvol]['lun']]
hgroups_info[pgname]['vols'].append(volpgdict)
return hgroups_info
def generate_interfaces_dict(array):
api_version = array._list_available_rest_versions()
int_info = {}
ports = array.list_ports()
for port in range(0, len(ports)):
int_name = ports[port]['name']
if ports[port]['wwn']:
int_info[int_name] = ports[port]['wwn']
if ports[port]['iqn']:
int_info[int_name] = ports[port]['iqn']
if NVME_API_VERSION in api_version:
if ports[port]['nqn']:
int_info[int_name] = ports[port]['nqn']
return int_info
def main():
argument_spec = purefa_argument_spec()
argument_spec.update(dict(
gather_subset=dict(default='minimum', type='list',)
))
module = AnsibleModule(argument_spec, supports_check_mode=False)
array = get_system(module)
subset = [test.lower() for test in module.params['gather_subset']]
valid_subsets = ('all', 'minimum', 'config', 'performance', 'capacity',
'network', 'subnet', 'interfaces', 'hgroups', 'pgroups',
'hosts', 'admins', 'volumes', 'snapshots', 'pods',
'vgroups', 'offload', 'apps', 'arrays')
subset_test = (test in valid_subsets for test in subset)
if not all(subset_test):
module.fail_json(msg="value must gather_subset must be one or more of: %s, got: %s"
% (",".join(valid_subsets), ",".join(subset)))
info = {}
if 'minimum' in subset or 'all' in subset:
info['default'] = generate_default_dict(array)
if 'performance' in subset or 'all' in subset:
info['performance'] = generate_perf_dict(array)
if 'config' in subset or 'all' in subset:
info['config'] = generate_config_dict(array)
if 'capacity' in subset or 'all' in subset:
info['capacity'] = generate_capacity_dict(array)
if 'network' in subset or 'all' in subset:
info['network'] = generate_network_dict(array)
if 'subnet' in subset or 'all' in subset:
info['subnet'] = generate_subnet_dict(array)
if 'interfaces' in subset or 'all' in subset:
info['interfaces'] = generate_interfaces_dict(array)
if 'hosts' in subset or 'all' in subset:
info['hosts'] = generate_host_dict(array)
if 'volumes' in subset or 'all' in subset:
info['volumes'] = generate_vol_dict(array)
if 'snapshots' in subset or 'all' in subset:
info['snapshots'] = generate_snap_dict(array)
if 'hgroups' in subset or 'all' in subset:
info['hgroups'] = generate_hgroups_dict(array)
if 'pgroups' in subset or 'all' in subset:
info['pgroups'] = generate_pgroups_dict(array)
if 'pods' in subset or 'all' in subset:
info['pods'] = generate_pods_dict(array)
if 'admins' in subset or 'all' in subset:
info['admins'] = generate_admin_dict(array)
if 'vgroups' in subset or 'all' in subset:
info['vgroups'] = generate_vgroups_dict(array)
if 'offload' in subset or 'all' in subset:
info['nfs_offload'] = generate_nfs_offload_dict(array)
info['s3_offload'] = generate_s3_offload_dict(array)
if 'apps' in subset or 'all' in subset:
info['apps'] = generate_apps_dict(array)
if 'arrays' in subset or 'all' in subset:
info['arrays'] = generate_conn_array_dict(array)
module.exit_json(changed=False, purefa_info=info)
if __name__ == '__main__':
main()

@ -0,0 +1,662 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2019, Simon Dodsley (simon@purestorage.com)
# GNU General Public License v3.0+ (see COPYING or
# https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: purefb_info
version_added: '2.9'
short_description: Collect information from Pure Storage FlashBlade
description:
- Collect information from a Pure Storage FlashBlade running the
Purity//FB operating system. By default, the module will collect basic
information including hosts, host groups, protection
groups and volume counts. Additional information can be collected
based on the configured set of arguements.
author:
- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
options:
gather_subset:
description:
- When supplied, this argument will define the information to be collected.
Possible values for this include all, minimum, config, performance,
capacity, network, subnets, lags, filesystems and snapshots.
required: false
type: list
default: minimum
extends_documentation_fragment:
- purestorage.fb
'''
EXAMPLES = r'''
- name: collect default set of info
purefb_info:
fb_url: 10.10.10.2
api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
register: blade_info
- name: show default information
debug:
msg: "{{ blade_info['purefb_info']['default'] }}"
- name: collect configuration and capacity info
purefb_info:
gather_subset:
- config
fb_url: 10.10.10.2
api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
register: blade_info
- name: show config information
debug:
msg: "{{ blade_info['purefb_info']['config'] }}"
- name: collect all info
purefb_info:
gather_subset:
- all
fb_url: 10.10.10.2
api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
register: blade_info
- name: show all information
debug:
msg: "{{ blade_info['purefb_info'] }}"
'''
RETURN = r'''
purefb_info:
description: Returns the information collected from the FlashBlade
returned: always
type: complex
contains:
"capacity": {
"aggregate": {
"data_reduction": 1.1179228,
"snapshots": 0,
"total_physical": 17519748439,
"unique": 17519748439,
"virtual": 19585726464
},
"file-system": {
"data_reduction": 1.3642412,
"snapshots": 0,
"total_physical": 4748219708,
"unique": 4748219708,
"virtual": 6477716992
},
"object-store": {
"data_reduction": 1.0263462,
"snapshots": 0,
"total_physical": 12771528731,
"unique": 12771528731,
"virtual": 6477716992
},
"total": 83359896948925
}
"config": {
"alert_watchers": {
"enabled": true,
"name": "notify@acmestorage.com"
},
"array_management": {
"base_dn": null,
"bind_password": null,
"bind_user": null,
"enabled": false,
"name": "management",
"services": [
"management"
],
"uris": []
},
"directory_service_roles": {
"array_admin": {
"group": null,
"group_base": null
},
"ops_admin": {
"group": null,
"group_base": null
},
"readonly": {
"group": null,
"group_base": null
},
"storage_admin": {
"group": null,
"group_base": null
}
},
"dns": {
"domain": "demo.acmestorage.com",
"name": "demo-fb-1",
"nameservers": [
"8.8.8.8"
],
"search": [
"demo.acmestorage.com"
]
},
"nfs_directory_service": {
"base_dn": null,
"bind_password": null,
"bind_user": null,
"enabled": false,
"name": "nfs",
"services": [
"nfs"
],
"uris": []
},
"ntp": [
"0.ntp.pool.org"
],
"smb_directory_service": {
"base_dn": null,
"bind_password": null,
"bind_user": null,
"enabled": false,
"name": "smb",
"services": [
"smb"
],
"uris": []
},
"smtp": {
"name": "demo-fb-1",
"relay_host": null,
"sender_domain": "acmestorage.com"
},
"ssl_certs": {
"certificate": "-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----",
"common_name": "Acme Storage",
"country": "US",
"email": null,
"intermediate_certificate": null,
"issued_by": "Acme Storage",
"issued_to": "Acme Storage",
"key_size": 4096,
"locality": null,
"name": "global",
"organization": "Acme Storage",
"organizational_unit": "Acme Storage",
"passphrase": null,
"private_key": null,
"state": null,
"status": "self-signed",
"valid_from": "1508433967000",
"valid_to": "2458833967000"
}
}
"default": {
"blades": 15,
"buckets": 7,
"filesystems": 2,
"flashblade_name": "demo-fb-1",
"object_store_accounts": 1,
"object_store_users": 1,
"purity_version": "2.2.0",
"snapshots": 1,
"total_capacity": 83359896948925
}
"filesystems": {
"k8s-pvc-d24b1357-579e-11e8-811f-ecf4bbc88f54": {
"destroyed": false,
"fast_remove": false,
"hard_limit": true,
"nfs_rules": "*(rw,no_root_squash)",
"provisioned": 21474836480,
"snapshot_enabled": false
},
"z": {
"destroyed": false,
"fast_remove": false,
"hard_limit": false,
"provisioned": 1073741824,
"snapshot_enabled": false
}
}
"lag": {
"uplink": {
"lag_speed": 0,
"port_speed": 40000000000,
"ports": [
{
"name": "CH1.FM1.ETH1.1"
},
{
"name": "CH1.FM1.ETH1.2"
},
],
"status": "healthy"
}
}
"network": {
"fm1.admin0": {
"address": "10.10.100.6",
"gateway": "10.10.100.1",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"support"
],
"type": "vip",
"vlan": 2200
},
"fm2.admin0": {
"address": "10.10.100.7",
"gateway": "10.10.100.1",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"support"
],
"type": "vip",
"vlan": 2200
},
"nfs1": {
"address": "10.10.100.4",
"gateway": "10.10.100.1",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"data"
],
"type": "vip",
"vlan": 2200
},
"vir0": {
"address": "10.10.100.5",
"gateway": "10.10.100.1",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"management"
],
"type": "vip",
"vlan": 2200
}
}
"performance": {
"aggregate": {
"bytes_per_op": 0,
"bytes_per_read": 0,
"bytes_per_write": 0,
"read_bytes_per_sec": 0,
"reads_per_sec": 0,
"usec_per_other_op": 0,
"usec_per_read_op": 0,
"usec_per_write_op": 0,
"write_bytes_per_sec": 0,
"writes_per_sec": 0
},
"http": {
"bytes_per_op": 0,
"bytes_per_read": 0,
"bytes_per_write": 0,
"read_bytes_per_sec": 0,
"reads_per_sec": 0,
"usec_per_other_op": 0,
"usec_per_read_op": 0,
"usec_per_write_op": 0,
"write_bytes_per_sec": 0,
"writes_per_sec": 0
},
"nfs": {
"bytes_per_op": 0,
"bytes_per_read": 0,
"bytes_per_write": 0,
"read_bytes_per_sec": 0,
"reads_per_sec": 0,
"usec_per_other_op": 0,
"usec_per_read_op": 0,
"usec_per_write_op": 0,
"write_bytes_per_sec": 0,
"writes_per_sec": 0
},
"s3": {
"bytes_per_op": 0,
"bytes_per_read": 0,
"bytes_per_write": 0,
"read_bytes_per_sec": 0,
"reads_per_sec": 0,
"usec_per_other_op": 0,
"usec_per_read_op": 0,
"usec_per_write_op": 0,
"write_bytes_per_sec": 0,
"writes_per_sec": 0
}
}
"snapshots": {
"z.188": {
"destroyed": false,
"source": "z",
"source_destroyed": false,
"suffix": "188"
}
}
"subnet": {
"new-mgmt": {
"gateway": "10.10.100.1",
"interfaces": [
{
"name": "fm1.admin0"
},
{
"name": "fm2.admin0"
},
{
"name": "nfs1"
},
{
"name": "vir0"
}
],
"lag": "uplink",
"mtu": 1500,
"prefix": "10.10.100.0/24",
"services": [
"data",
"management",
"support"
],
"vlan": 2200
}
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pure import get_blade, purefb_argument_spec
MIN_REQUIRED_API_VERSION = '1.3'
HARD_LIMIT_API_VERSION = '1.4'
def generate_default_dict(blade):
default_info = {}
defaults = blade.arrays.list_arrays().items[0]
default_info['flashblade_name'] = defaults.name
default_info['purity_version'] = defaults.version
default_info['filesystems'] = \
len(blade.file_systems.list_file_systems().items)
default_info['snapshots'] = \
len(blade.file_system_snapshots.list_file_system_snapshots().items)
default_info['buckets'] = len(blade.buckets.list_buckets().items)
default_info['object_store_users'] = \
len(blade.object_store_users.list_object_store_users().items)
default_info['object_store_accounts'] = \
len(blade.object_store_accounts.list_object_store_accounts().items)
default_info['blades'] = len(blade.blade.list_blades().items)
default_info['total_capacity'] = \
blade.arrays.list_arrays_space().items[0].capacity
return default_info
def generate_perf_dict(blade):
perf_info = {}
total_perf = blade.arrays.list_arrays_performance()
http_perf = blade.arrays.list_arrays_performance(protocol='http')
s3_perf = blade.arrays.list_arrays_performance(protocol='s3')
nfs_perf = blade.arrays.list_arrays_performance(protocol='nfs')
perf_info['aggregate'] = {
'bytes_per_op': total_perf.items[0].bytes_per_op,
'bytes_per_read': total_perf.items[0].bytes_per_read,
'bytes_per_write': total_perf.items[0].bytes_per_write,
'read_bytes_per_sec': total_perf.items[0].read_bytes_per_sec,
'reads_per_sec': total_perf.items[0].reads_per_sec,
'usec_per_other_op': total_perf.items[0].usec_per_other_op,
'usec_per_read_op': total_perf.items[0].usec_per_read_op,
'usec_per_write_op': total_perf.items[0].usec_per_write_op,
'write_bytes_per_sec': total_perf.items[0].write_bytes_per_sec,
'writes_per_sec': total_perf.items[0].writes_per_sec,
}
perf_info['http'] = {
'bytes_per_op': http_perf.items[0].bytes_per_op,
'bytes_per_read': http_perf.items[0].bytes_per_read,
'bytes_per_write': http_perf.items[0].bytes_per_write,
'read_bytes_per_sec': http_perf.items[0].read_bytes_per_sec,
'reads_per_sec': http_perf.items[0].reads_per_sec,
'usec_per_other_op': http_perf.items[0].usec_per_other_op,
'usec_per_read_op': http_perf.items[0].usec_per_read_op,
'usec_per_write_op': http_perf.items[0].usec_per_write_op,
'write_bytes_per_sec': http_perf.items[0].write_bytes_per_sec,
'writes_per_sec': http_perf.items[0].writes_per_sec,
}
perf_info['s3'] = {
'bytes_per_op': s3_perf.items[0].bytes_per_op,
'bytes_per_read': s3_perf.items[0].bytes_per_read,
'bytes_per_write': s3_perf.items[0].bytes_per_write,
'read_bytes_per_sec': s3_perf.items[0].read_bytes_per_sec,
'reads_per_sec': s3_perf.items[0].reads_per_sec,
'usec_per_other_op': s3_perf.items[0].usec_per_other_op,
'usec_per_read_op': s3_perf.items[0].usec_per_read_op,
'usec_per_write_op': s3_perf.items[0].usec_per_write_op,
'write_bytes_per_sec': s3_perf.items[0].write_bytes_per_sec,
'writes_per_sec': s3_perf.items[0].writes_per_sec,
}
perf_info['nfs'] = {
'bytes_per_op': nfs_perf.items[0].bytes_per_op,
'bytes_per_read': nfs_perf.items[0].bytes_per_read,
'bytes_per_write': nfs_perf.items[0].bytes_per_write,
'read_bytes_per_sec': nfs_perf.items[0].read_bytes_per_sec,
'reads_per_sec': nfs_perf.items[0].reads_per_sec,
'usec_per_other_op': nfs_perf.items[0].usec_per_other_op,
'usec_per_read_op': nfs_perf.items[0].usec_per_read_op,
'usec_per_write_op': nfs_perf.items[0].usec_per_write_op,
'write_bytes_per_sec': nfs_perf.items[0].write_bytes_per_sec,
'writes_per_sec': nfs_perf.items[0].writes_per_sec,
}
return perf_info
def generate_config_dict(blade):
config_info = {}
config_info['dns'] = blade.dns.list_dns().items[0].to_dict()
config_info['smtp'] = blade.smtp.list_smtp().items[0].to_dict()
config_info['alert_watchers'] = \
blade.alert_watchers.list_alert_watchers().items[0].to_dict()
api_version = blade.api_version.list_versions().versions
if HARD_LIMIT_API_VERSION in api_version:
config_info['array_management'] = \
blade.directory_services.list_directory_services(names=['management']).items[0].to_dict()
config_info['directory_service_roles'] = {}
roles = blade.directory_services.list_directory_services_roles()
for role in range(0, len(roles.items)):
role_name = roles.items[role].name
config_info['directory_service_roles'][role_name] = {
'group': roles.items[role].group,
'group_base': roles.items[role].group_base
}
config_info['nfs_directory_service'] = \
blade.directory_services.list_directory_services(names=['nfs']).items[0].to_dict()
config_info['smb_directory_service'] = \
blade.directory_services.list_directory_services(names=['smb']).items[0].to_dict()
config_info['ntp'] = blade.arrays.list_arrays().items[0].ntp_servers
config_info['ssl_certs'] = \
blade.certificates.list_certificates().items[0].to_dict()
return config_info
def generate_subnet_dict(blade):
sub_info = {}
subnets = blade.subnets.list_subnets()
for sub in range(0, len(subnets.items)):
sub_name = subnets.items[sub].name
if subnets.items[sub].enabled:
sub_info[sub_name] = {
'gateway': subnets.items[sub].gateway,
'mtu': subnets.items[sub].mtu,
'vlan': subnets.items[sub].vlan,
'prefix': subnets.items[sub].prefix,
'services': subnets.items[sub].services,
}
sub_info[sub_name]['lag'] = subnets.items[sub].link_aggregation_group.name
sub_info[sub_name]['interfaces'] = []
for iface in range(0, len(subnets.items[sub].interfaces)):
sub_info[sub_name]['interfaces'].append({'name': subnets.items[sub].interfaces[iface].name})
return sub_info
def generate_lag_dict(blade):
lag_info = {}
groups = blade.link_aggregation_groups.list_link_aggregation_groups()
for groupcnt in range(0, len(groups.items)):
lag_name = groups.items[groupcnt].name
lag_info[lag_name] = {
'lag_speed': groups.items[groupcnt].lag_speed,
'port_speed': groups.items[groupcnt].port_speed,
'status': groups.items[groupcnt].status,
}
lag_info[lag_name]['ports'] = []
for port in range(0, len(groups.items[groupcnt].ports)):
lag_info[lag_name]['ports'].append({'name': groups.items[groupcnt].ports[port].name})
return lag_info
def generate_network_dict(blade):
net_info = {}
ports = blade.network_interfaces.list_network_interfaces()
for portcnt in range(0, len(ports.items)):
int_name = ports.items[portcnt].name
if ports.items[portcnt].enabled:
net_info[int_name] = {
'type': ports.items[portcnt].type,
'mtu': ports.items[portcnt].mtu,
'vlan': ports.items[portcnt].vlan,
'address': ports.items[portcnt].address,
'services': ports.items[portcnt].services,
'gateway': ports.items[portcnt].gateway,
'netmask': ports.items[portcnt].netmask,
}
return net_info
def generate_capacity_dict(blade):
capacity_info = {}
total_cap = blade.arrays.list_arrays_space()
file_cap = blade.arrays.list_arrays_space(type='file-system')
object_cap = blade.arrays.list_arrays_space(type='object-store')
capacity_info['total'] = total_cap.items[0].capacity
capacity_info['aggregate'] = {
'data_reduction': total_cap.items[0].space.data_reduction,
'snapshots': total_cap.items[0].space.snapshots,
'total_physical': total_cap.items[0].space.total_physical,
'unique': total_cap.items[0].space.unique,
'virtual': total_cap.items[0].space.virtual,
}
capacity_info['file-system'] = {
'data_reduction': file_cap.items[0].space.data_reduction,
'snapshots': file_cap.items[0].space.snapshots,
'total_physical': file_cap.items[0].space.total_physical,
'unique': file_cap.items[0].space.unique,
'virtual': file_cap.items[0].space.virtual,
}
capacity_info['object-store'] = {
'data_reduction': object_cap.items[0].space.data_reduction,
'snapshots': object_cap.items[0].space.snapshots,
'total_physical': object_cap.items[0].space.total_physical,
'unique': object_cap.items[0].space.unique,
'virtual': file_cap.items[0].space.virtual,
}
return capacity_info
def generate_snap_dict(blade):
snap_info = {}
snaps = blade.file_system_snapshots.list_file_system_snapshots()
for snap in range(0, len(snaps.items)):
snapshot = snaps.items[snap].name
snap_info[snapshot] = {
'destroyed': snaps.items[snap].destroyed,
'source': snaps.items[snap].source,
'suffix': snaps.items[snap].suffix,
'source_destroyed': snaps.items[snap].source_destroyed,
}
return snap_info
def generate_fs_dict(blade):
fs_info = {}
fsys = blade.file_systems.list_file_systems()
for fsystem in range(0, len(fsys.items)):
share = fsys.items[fsystem].name
fs_info[share] = {
'fast_remove': fsys.items[fsystem].fast_remove_directory_enabled,
'snapshot_enabled': fsys.items[fsystem].snapshot_directory_enabled,
'provisioned': fsys.items[fsystem].provisioned,
'destroyed': fsys.items[fsystem].destroyed,
}
if fsys.items[fsystem].http.enabled:
fs_info[share]['http'] = fsys.items[fsystem].http.enabled
if fsys.items[fsystem].smb.enabled:
fs_info[share]['smb_mode'] = fsys.items[fsystem].smb.acl_mode
if fsys.items[fsystem].nfs.enabled:
fs_info[share]['nfs_rules'] = fsys.items[fsystem].nfs.rules
api_version = blade.api_version.list_versions().versions
if HARD_LIMIT_API_VERSION in api_version:
fs_info[share]['hard_limit'] = fsys.items[fsystem].hard_limit_enabled
return fs_info
def main():
argument_spec = purefb_argument_spec()
argument_spec.update(dict(
gather_subset=dict(default='minimum', type='list',)
))
module = AnsibleModule(argument_spec, supports_check_mode=True)
blade = get_blade(module)
versions = blade.api_version.list_versions().versions
if MIN_REQUIRED_API_VERSION not in versions:
module.fail_json(msg='FlashBlade REST version not supported. Minimum version required: {0}'.format(MIN_REQUIRED_API_VERSION))
subset = [test.lower() for test in module.params['gather_subset']]
valid_subsets = ('all', 'minimum', 'config', 'performance', 'capacity',
'network', 'subnets', 'lags',
'filesystems', 'snapshots')
subset_test = (test in valid_subsets for test in subset)
if not all(subset_test):
module.fail_json(msg="value must gather_subset must be one or more of: %s, got: %s"
% (",".join(valid_subsets), ",".join(subset)))
info = {}
if 'minimum' in subset or 'all' in subset:
info['default'] = generate_default_dict(blade)
if 'performance' in subset or 'all' in subset:
info['performance'] = generate_perf_dict(blade)
if 'config' in subset or 'all' in subset:
info['config'] = generate_config_dict(blade)
if 'capacity' in subset or 'all' in subset:
info['capacity'] = generate_capacity_dict(blade)
if 'lags' in subset or 'all' in subset:
info['lag'] = generate_lag_dict(blade)
if 'network' in subset or 'all' in subset:
info['network'] = generate_network_dict(blade)
if 'subnets' in subset or 'all' in subset:
info['subnet'] = generate_subnet_dict(blade)
if 'filesystems' in subset or 'all' in subset:
info['filesystems'] = generate_fs_dict(blade)
if 'snapshots' in subset or 'all' in subset:
info['snapshots'] = generate_snap_dict(blade)
module.exit_json(changed=False, purefb_info=info)
if __name__ == '__main__':
main()
Loading…
Cancel
Save