diff --git a/lib/ansible/modules/storage/netapp/netapp_e_storagepool.py b/lib/ansible/modules/storage/netapp/netapp_e_storagepool.py index 0cd3029749a..137934d4252 100644 --- a/lib/ansible/modules/storage/netapp/netapp_e_storagepool.py +++ b/lib/ansible/modules/storage/netapp/netapp_e_storagepool.py @@ -4,808 +4,907 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function -__metaclass__ = type - -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} +__metaclass__ = type +ANSIBLE_METADATA = {"metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community"} -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: netapp_e_storagepool -short_description: NetApp E-Series manage disk groups and disk pools +short_description: NetApp E-Series manage volume groups and disk pools +description: Create or remove volume groups and disk pools for NetApp E-series storage arrays. version_added: '2.2' -description: - - Create or remove disk groups and disk pools for NetApp E-series storage arrays. +author: + - Kevin Hulquest (@hulquest) + - Nathan Swartz (@ndswartz) extends_documentation_fragment: - - netapp.eseries + - netapp.eseries options: state: - required: true description: - - Whether the specified storage pool should exist or not. - - Note that removing a storage pool currently requires the removal of all defined volumes first. - choices: ['present', 'absent'] - name: + - Whether the specified storage pool should exist or not. + - Note that removing a storage pool currently requires the removal of all defined volumes first. required: true + choices: ["present", "absent"] + name: description: - - The name of the storage pool to manage + - The name of the storage pool to manage + required: true criteria_drive_count: description: - - The number of disks to use for building the storage pool. The pool will be expanded if this number exceeds the number of disks already in place + - The number of disks to use for building the storage pool. + - When I(state=="present") then I(criteria_drive_count) or I(criteria_min_usable_capacity) must be specified. + - The pool will be expanded if this number exceeds the number of disks already in place (See expansion note below) + required: false + type: int + criteria_min_usable_capacity: + description: + - The minimum size of the storage pool (in size_unit). + - When I(state=="present") then I(criteria_drive_count) or I(criteria_min_usable_capacity) must be specified. + - The pool will be expanded if this value exceeds its current size. (See expansion note below) + required: false + type: float criteria_drive_type: description: - - The type of disk (hdd or ssd) to use when searching for candidates to use. - choices: ['hdd','ssd'] + - The type of disk (hdd or ssd) to use when searching for candidates to use. + - When not specified each drive type will be evaluated until successful drive candidates are found starting with + the most prevalent drive type. + required: false + choices: ["hdd","ssd"] criteria_size_unit: description: - - The unit used to interpret size parameters - choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'] - default: 'gb' + - The unit used to interpret size parameters + choices: ["bytes", "b", "kb", "mb", "gb", "tb", "pb", "eb", "zb", "yb"] + default: "gb" criteria_drive_min_size: description: - - The minimum individual drive size (in size_unit) to consider when choosing drives for the storage pool. - criteria_min_usable_capacity: - description: - - The minimum size of the storage pool (in size_unit). The pool will be expanded if this value exceeds itscurrent size. + - The minimum individual drive size (in size_unit) to consider when choosing drives for the storage pool. criteria_drive_interface_type: description: - - The interface type to use when selecting drives for the storage pool (no value means all interface types will be considered) - choices: ['sas', 'sas4k', 'fibre', 'fibre520b', 'scsi', 'sata', 'pata'] + - The interface type to use when selecting drives for the storage pool + - If not provided then all interface types will be considered. + choices: ["sas", "sas4k", "fibre", "fibre520b", "scsi", "sata", "pata"] + required: false + criteria_drive_require_da: + description: + - Ensures the storage pool will be created with only data assurance (DA) capable drives. + - Only available for new storage pools; existing storage pools cannot be converted. + default: false + type: bool + version_added: '2.9' criteria_drive_require_fde: description: - - Whether full disk encryption ability is required for drives to be added to the storage pool + - Whether full disk encryption ability is required for drives to be added to the storage pool + default: false type: bool raid_level: - required: true - choices: ['raidAll', 'raid0', 'raid1', 'raid3', 'raid5', 'raid6', 'raidDiskPool'] description: - - "Only required when the requested state is 'present'. The RAID level of the storage pool to be created." - erase_secured_drives: + - The RAID level of the storage pool to be created. + - Required only when I(state=="present"). + - When I(raid_level=="raidDiskPool") then I(criteria_drive_count >= 10 or criteria_drive_count >= 11) is required + depending on the storage array specifications. + - When I(raid_level=="raid0") then I(1<=criteria_drive_count) is required. + - When I(raid_level=="raid1") then I(2<=criteria_drive_count) is required. + - When I(raid_level=="raid3") then I(3<=criteria_drive_count<=30) is required. + - When I(raid_level=="raid5") then I(3<=criteria_drive_count<=30) is required. + - When I(raid_level=="raid6") then I(5<=criteria_drive_count<=30) is required. + - Note that raidAll will be treated as raidDiskPool and raid3 as raid5. required: false - type: bool - description: - - Whether to erase secured disks before adding to storage pool + choices: ["raidAll", "raid0", "raid1", "raid3", "raid5", "raid6", "raidDiskPool"] + default: "raidDiskPool" secure_pool: + description: + - Enables security at rest feature on the storage pool. + - Will only work if all drives in the pool are security capable (FDE, FIPS, or mix) + - Warning, once security is enabled it is impossible to disable without erasing the drives. required: false type: bool - description: - - Whether to convert to a secure storage pool. Will only work if all drives in the pool are security capable. reserve_drive_count: - required: false description: - - Set the number of drives reserved by the storage pool for reconstruction operations. Only valide on raid disk pools. - remove_volumes: + - Set the number of drives reserved by the storage pool for reconstruction operations. + - Only valid on raid disk pools. required: false - default: False + remove_volumes: description: - Prior to removing a storage pool, delete all volumes in the pool. + default: true + erase_secured_drives: + description: + - If I(state=="absent") then all storage pool drives will be erase + - If I(state=="present") then delete all available storage array drives that have security enabled. + default: true type: bool -author: Kevin Hulquest (@hulquest) - -''' -EXAMPLES = ''' - - name: No disk groups - netapp_e_storagepool: - ssid: "{{ ssid }}" - name: "{{ item }}" - state: absent - api_url: "{{ netapp_api_url }}" - api_username: "{{ netapp_api_username }}" - api_password: "{{ netapp_api_password }}" - validate_certs: "{{ netapp_api_validate_certs }}" -''' -RETURN = ''' +notes: + - The expansion operations are non-blocking due to the time consuming nature of expanding volume groups + - Traditional volume groups (raid0, raid1, raid5, raid6) are performed in steps dictated by the storage array. Each + required step will be attempted until the request fails which is likely because of the required expansion time. + - raidUnsupported will be treated as raid0, raidAll as raidDiskPool and raid3 as raid5. +""" +EXAMPLES = """ +- name: No disk groups + netapp_e_storagepool: + ssid: "{{ ssid }}" + name: "{{ item }}" + state: absent + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" +""" +RETURN = """ msg: description: Success message returned: success type: str sample: Json facts for the pool that was created. -''' +""" -import json -import logging -from traceback import format_exc - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.netapp import request, eseries_host_argument_spec +import functools +from itertools import groupby +from time import sleep +from pprint import pformat +from ansible.module_utils.netapp import NetAppESeriesModule from ansible.module_utils._text import to_native -def select(predicate, iterable): - # python 2, 3 generic filtering. - if predicate is None: - predicate = bool - for x in iterable: - if predicate(x): - yield x +def get_most_common_elements(iterator): + """Returns a generator containing a descending list of most common elements.""" + if not isinstance(iterator, list): + raise TypeError("iterator must be a list.") + grouped = [(key, len(list(group))) for key, group in groupby(sorted(iterator))] + return sorted(grouped, key=lambda x: x[1], reverse=True) -def _identity(obj): - return obj +def memoize(func): + """Generic memoizer for any function with any number of arguments including zero.""" -class GroupBy(object): - # python 2, 3 generic grouping. - def __init__(self, iterable, key=None): - self.keyfunc = key if key else _identity - self.it = iter(iterable) - self.tgtkey = self.currkey = self.currvalue = object() + @functools.wraps(func) + def wrapper(*args, **kwargs): + class MemoizeFuncArgs(dict): + def __missing__(self, _key): + self[_key] = func(*args, **kwargs) + return self[_key] - def __iter__(self): - return self + key = str((args, kwargs)) if args and kwargs else "no_argument_response" + return MemoizeFuncArgs().__getitem__(key) - def next(self): - while self.currkey == self.tgtkey: - self.currvalue = next(self.it) # Exit on StopIteration - self.currkey = self.keyfunc(self.currvalue) - self.tgtkey = self.currkey - return (self.currkey, self._grouper(self.tgtkey)) + return wrapper - def _grouper(self, tgtkey): - while self.currkey == tgtkey: - yield self.currvalue - try: - self.currvalue = next(self.it) # Exit on StopIteration - except StopIteration: - return - self.currkey = self.keyfunc(self.currvalue) +class NetAppESeriesStoragePool(NetAppESeriesModule): + EXPANSION_TIMEOUT_SEC = 10 + DEFAULT_DISK_POOL_MINIMUM_DISK_COUNT = 11 -class NetAppESeriesStoragePool(object): def __init__(self): - self._sp_drives_cached = None - - self._size_unit_map = dict( - bytes=1, - b=1, - kb=1024, - mb=1024 ** 2, - gb=1024 ** 3, - tb=1024 ** 4, - pb=1024 ** 5, - eb=1024 ** 6, - zb=1024 ** 7, - yb=1024 ** 8 - ) - - argument_spec = eseries_host_argument_spec() - argument_spec.update(dict( - api_url=dict(type='str', required=True), - state=dict(required=True, choices=['present', 'absent'], type='str'), - name=dict(required=True, type='str'), - criteria_size_unit=dict(default='gb', type='str'), - criteria_drive_count=dict(type='int'), - criteria_drive_interface_type=dict(choices=['sas', 'sas4k', 'fibre', 'fibre520b', 'scsi', 'sata', 'pata'], - type='str'), - criteria_drive_type=dict(choices=['ssd', 'hdd'], type='str'), - criteria_drive_min_size=dict(type='int'), - criteria_drive_require_fde=dict(type='bool'), - criteria_min_usable_capacity=dict(type='int'), - raid_level=dict( - choices=['raidUnsupported', 'raidAll', 'raid0', 'raid1', 'raid3', 'raid5', 'raid6', 'raidDiskPool']), - erase_secured_drives=dict(type='bool'), - log_path=dict(type='str'), - remove_drives=dict(type='list'), - secure_pool=dict(type='bool', default=False), - reserve_drive_count=dict(type='int'), - remove_volumes=dict(type='bool', default=False) - )) - - self.module = AnsibleModule( - argument_spec=argument_spec, - required_if=[ - ('state', 'present', ['raid_level']) - ], - mutually_exclusive=[ - - ], - # TODO: update validation for various selection criteria - supports_check_mode=True - ) - - p = self.module.params - - log_path = p['log_path'] - - # logging setup - self._logger = logging.getLogger(self.__class__.__name__) - self.debug = self._logger.debug - - if log_path: - logging.basicConfig(level=logging.DEBUG, filename=log_path) - - self.state = p['state'] - self.ssid = p['ssid'] - self.name = p['name'] - self.validate_certs = p['validate_certs'] - - self.criteria_drive_count = p['criteria_drive_count'] - self.criteria_drive_type = p['criteria_drive_type'] - self.criteria_size_unit = p['criteria_size_unit'] - self.criteria_drive_min_size = p['criteria_drive_min_size'] - self.criteria_min_usable_capacity = p['criteria_min_usable_capacity'] - self.criteria_drive_interface_type = p['criteria_drive_interface_type'] - self.criteria_drive_require_fde = p['criteria_drive_require_fde'] - - self.raid_level = p['raid_level'] - self.erase_secured_drives = p['erase_secured_drives'] - self.remove_drives = p['remove_drives'] - self.secure_pool = p['secure_pool'] - self.reserve_drive_count = p['reserve_drive_count'] - self.remove_volumes = p['remove_volumes'] + version = "02.00.0000.0000" + ansible_options = dict( + state=dict(required=True, choices=["present", "absent"], type="str"), + name=dict(required=True, type="str"), + criteria_size_unit=dict(choices=["bytes", "b", "kb", "mb", "gb", "tb", "pb", "eb", "zb", "yb"], + default="gb", type="str"), + criteria_drive_count=dict(type="int"), + criteria_drive_interface_type=dict(choices=["sas", "sas4k", "fibre", "fibre520b", "scsi", "sata", "pata"], + type="str"), + criteria_drive_type=dict(choices=["ssd", "hdd"], type="str", required=False), + criteria_drive_min_size=dict(type="float"), + criteria_drive_require_da=dict(type="bool", required=False), + criteria_drive_require_fde=dict(type="bool", required=False), + criteria_min_usable_capacity=dict(type="float"), + raid_level=dict(choices=["raidAll", "raid0", "raid1", "raid3", "raid5", "raid6", "raidDiskPool"], + default="raidDiskPool"), + erase_secured_drives=dict(type="bool", default=True), + secure_pool=dict(type="bool", default=False), + reserve_drive_count=dict(type="int"), + remove_volumes=dict(type="bool", default=True)) + + required_if = [["state", "present", ["raid_level"]]] + super(NetAppESeriesStoragePool, self).__init__(ansible_options=ansible_options, + web_services_version=version, + supports_check_mode=True, + required_if=required_if) + + args = self.module.params + self.state = args["state"] + self.ssid = args["ssid"] + self.name = args["name"] + self.criteria_drive_count = args["criteria_drive_count"] + self.criteria_min_usable_capacity = args["criteria_min_usable_capacity"] + self.criteria_size_unit = args["criteria_size_unit"] + self.criteria_drive_min_size = args["criteria_drive_min_size"] + self.criteria_drive_type = args["criteria_drive_type"] + self.criteria_drive_interface_type = args["criteria_drive_interface_type"] + self.criteria_drive_require_fde = args["criteria_drive_require_fde"] + self.criteria_drive_require_da = args["criteria_drive_require_da"] + self.raid_level = args["raid_level"] + self.erase_secured_drives = args["erase_secured_drives"] + self.secure_pool = args["secure_pool"] + self.reserve_drive_count = args["reserve_drive_count"] + self.remove_volumes = args["remove_volumes"] + self.pool_detail = None + + # Change all sizes to be measured in bytes + if self.criteria_min_usable_capacity: + self.criteria_min_usable_capacity = int(self.criteria_min_usable_capacity * + self.SIZE_UNIT_MAP[self.criteria_size_unit]) + if self.criteria_drive_min_size: + self.criteria_drive_min_size = int(self.criteria_drive_min_size * + self.SIZE_UNIT_MAP[self.criteria_size_unit]) + self.criteria_size_unit = "bytes" + + # Adjust unused raid level option to reflect documentation + if self.raid_level == "raidAll": + self.raid_level = "raidDiskPool" + if self.raid_level == "raid3": + self.raid_level = "raid5" - try: - self.api_usr = p['api_username'] - self.api_pwd = p['api_password'] - self.api_url = p['api_url'] - except KeyError: - self.module.fail_json(msg="You must pass in api_username " - "and api_password and api_url to the module.") - - self.post_headers = dict(Accept="application/json") - self.post_headers['Content-Type'] = 'application/json' - - # Quick and dirty drive selector, since the one provided by web service proxy is broken for min_disk_size as of 2016-03-12. - # Doesn't really need to be a class once this is in module_utils or retired- just groups everything together so we - # can copy/paste to other modules more easily. - # Filters all disks by specified criteria, then groups remaining disks by capacity, interface and disk type, and selects - # the first set that matches the specified count and/or aggregate capacity. - # class DriveSelector(object): - def filter_drives( - self, - drives, # raw drives resp - interface_type=None, # sas, sata, fibre, etc - drive_type=None, # ssd/hdd - spindle_speed=None, # 7200, 10000, 15000, ssd (=0) - min_drive_size=None, - max_drive_size=None, - fde_required=None, - size_unit='gb', - min_total_capacity=None, - min_drive_count=None, - exact_drive_count=None, - raid_level=None - ): - if min_total_capacity is None and exact_drive_count is None: - raise Exception("One of criteria_min_total_capacity or criteria_drive_count must be specified.") - - if min_total_capacity: - min_total_capacity = min_total_capacity * self._size_unit_map[size_unit] - - # filter clearly invalid/unavailable drives first - drives = select(self._is_valid_drive, drives) - - if interface_type: - drives = select(lambda d: d['phyDriveType'] == interface_type, drives) - - if drive_type: - drives = select(lambda d: d['driveMediaType'] == drive_type, drives) - - if spindle_speed is not None: # 0 is valid for ssds - drives = select(lambda d: d['spindleSpeed'] == spindle_speed, drives) - - if min_drive_size: - min_drive_size_bytes = min_drive_size * self._size_unit_map[size_unit] - drives = select(lambda d: int(d['rawCapacity']) >= min_drive_size_bytes, drives) - - if max_drive_size: - max_drive_size_bytes = max_drive_size * self._size_unit_map[size_unit] - drives = select(lambda d: int(d['rawCapacity']) <= max_drive_size_bytes, drives) - - if fde_required: - drives = select(lambda d: d['fdeCapable'], drives) - - # initial implementation doesn't have a preference for any of these values... - # just return the first set we find that matches the requested disk count and/or minimum total capacity - for (cur_capacity, drives_by_capacity) in GroupBy(drives, lambda d: int(d['rawCapacity'])): - for (cur_interface_type, drives_by_interface_type) in GroupBy(drives_by_capacity, - lambda d: d['phyDriveType']): - for (cur_drive_type, drives_by_drive_type) in GroupBy(drives_by_interface_type, - lambda d: d['driveMediaType']): - # listify so we can consume more than once - drives_by_drive_type = list(drives_by_drive_type) - candidate_set = list() # reset candidate list on each iteration of the innermost loop - - if exact_drive_count: - if len(drives_by_drive_type) < exact_drive_count: - continue # we know this set is too small, move on - - for drive in drives_by_drive_type: - candidate_set.append(drive) - if self._candidate_set_passes(candidate_set, min_capacity_bytes=min_total_capacity, - min_drive_count=min_drive_count, - exact_drive_count=exact_drive_count, raid_level=raid_level): - return candidate_set - - raise Exception("couldn't find an available set of disks to match specified criteria") - - def _is_valid_drive(self, d): - is_valid = d['available'] \ - and d['status'] == 'optimal' \ - and not d['pfa'] \ - and not d['removed'] \ - and not d['uncertified'] \ - and not d['invalidDriveData'] \ - and not d['nonRedundantAccess'] - - return is_valid - - def _candidate_set_passes(self, candidate_set, min_capacity_bytes=None, min_drive_count=None, - exact_drive_count=None, raid_level=None): - if not self._is_drive_count_valid(len(candidate_set), min_drive_count=min_drive_count, - exact_drive_count=exact_drive_count, raid_level=raid_level): - return False - # TODO: this assumes candidate_set is all the same size- if we want to allow wastage, need to update to use min size of set - if min_capacity_bytes is not None and self._calculate_usable_capacity(int(candidate_set[0]['rawCapacity']), - len(candidate_set), - raid_level=raid_level) < min_capacity_bytes: - return False + @property + @memoize + def available_drives(self): + """Determine the list of available drives""" + return [drive["id"] for drive in self.drives if drive["available"] and drive["status"] == "optimal"] - return True - - def _calculate_usable_capacity(self, disk_size_bytes, disk_count, raid_level=None): - if raid_level in [None, 'raid0']: - return disk_size_bytes * disk_count - if raid_level == 'raid1': - return (disk_size_bytes * disk_count) // 2 - if raid_level in ['raid3', 'raid5']: - return (disk_size_bytes * disk_count) - disk_size_bytes - if raid_level in ['raid6', 'raidDiskPool']: - return (disk_size_bytes * disk_count) - (disk_size_bytes * 2) - raise Exception("unsupported raid_level: %s" % raid_level) - - def _is_drive_count_valid(self, drive_count, min_drive_count=0, exact_drive_count=None, raid_level=None): - if exact_drive_count and exact_drive_count != drive_count: - return False - if raid_level == 'raidDiskPool': - if drive_count < 11: - return False - if raid_level == 'raid1': - if drive_count % 2 != 0: - return False - if raid_level in ['raid3', 'raid5']: - if drive_count < 3: - return False - if raid_level == 'raid6': - if drive_count < 4: - return False - if min_drive_count and drive_count < min_drive_count: - return False + @property + @memoize + def available_drive_types(self): + """Determine the types of available drives sorted by the most common first.""" + types = [drive["driveMediaType"] for drive in self.drives] + return [entry[0] for entry in get_most_common_elements(types)] - return True + @property + @memoize + def available_drive_interface_types(self): + """Determine the types of available drives.""" + interfaces = [drive["phyDriveType"] for drive in self.drives] + return [entry[0] for entry in get_most_common_elements(interfaces)] - def get_storage_pool(self, storage_pool_name): - # global ifilter - self.debug("fetching storage pools") - # map the storage pool name to its id - try: - (rc, resp) = request(self.api_url + "/storage-systems/%s/storage-pools" % (self.ssid), - headers=dict(Accept="application/json"), url_username=self.api_usr, - url_password=self.api_pwd, validate_certs=self.validate_certs) - except Exception as err: - rc = err.args[0] - if rc == 404 and self.state == 'absent': - self.module.exit_json( - msg="Storage pool [%s] did not exist." % (self.name)) - else: - self.module.exit_json( - msg="Failed to get storage pools. Array id [%s]. Error[%s]. State[%s]. RC[%s]." % - (self.ssid, to_native(err), self.state, rc)) + @property + def storage_pool_drives(self, exclude_hotspares=True): + """Retrieve list of drives found in storage pool.""" + if exclude_hotspares: + return [drive for drive in self.drives + if drive["currentVolumeGroupRef"] == self.pool_detail["id"] and not drive["hotSpare"]] + + return [drive for drive in self.drives if drive["currentVolumeGroupRef"] == self.pool_detail["id"]] - self.debug("searching for storage pool '%s'", storage_pool_name) + @property + def expandable_drive_count(self): + """Maximum number of drives that a storage pool can be expended at a given time.""" + capabilities = None + if self.raid_level == "raidDiskPool": + return len(self.available_drives) + + try: + rc, capabilities = self.request("storage-systems/%s/capabilities" % self.ssid) + except Exception as error: + self.module.fail_json(msg="Failed to fetch maximum expandable drive count. Array id [%s]. Error[%s]." + % (self.ssid, to_native(error))) - pool_detail = next(select(lambda a: a['name'] == storage_pool_name, resp), None) + return capabilities["featureParameters"]["maxDCEDrives"] - if pool_detail: - found = 'found' + @property + def disk_pool_drive_minimum(self): + """Provide the storage array's minimum disk pool drive count.""" + rc, attr = self.request("storage-systems/%s/symbol/getSystemAttributeDefaults" % self.ssid, ignore_errors=True) + + # Standard minimum is 11 drives but some allow 10 drives. 10 will be the default + if (rc != 200 or "minimumDriveCount" not in attr["defaults"]["diskPoolDefaultAttributes"].keys() or + attr["defaults"]["diskPoolDefaultAttributes"]["minimumDriveCount"] == 0): + return self.DEFAULT_DISK_POOL_MINIMUM_DISK_COUNT + + return attr["defaults"]["diskPoolDefaultAttributes"]["minimumDriveCount"] + + def get_available_drive_capacities(self, drive_id_list=None): + """Determine the list of available drive capacities.""" + if drive_id_list: + available_drive_capacities = set([int(drive["usableCapacity"]) for drive in self.drives + if drive["id"] in drive_id_list and drive["available"] and + drive["status"] == "optimal"]) else: - found = 'not found' - self.debug(found) - - return pool_detail - - def get_candidate_disks(self): - self.debug("getting candidate disks...") - - # driveCapacityMin is broken on /drives POST. Per NetApp request we built our own - # switch back to commented code below if it gets fixed - # drives_req = dict( - # driveCount = self.criteria_drive_count, - # sizeUnit = 'mb', - # raidLevel = self.raid_level - # ) - # - # if self.criteria_drive_type: - # drives_req['driveType'] = self.criteria_drive_type - # if self.criteria_disk_min_aggregate_size_mb: - # drives_req['targetUsableCapacity'] = self.criteria_disk_min_aggregate_size_mb - # - # # TODO: this arg appears to be ignored, uncomment if it isn't - # #if self.criteria_disk_min_size_gb: - # # drives_req['driveCapacityMin'] = self.criteria_disk_min_size_gb * 1024 - # (rc,drives_resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid), data=json.dumps(drives_req), headers=self.post_headers, - # method='POST', url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs) - # - # if rc == 204: - # self.module.fail_json(msg='Cannot find disks to match requested criteria for storage pool') - - # disk_ids = [d['id'] for d in drives_resp] + available_drive_capacities = set([int(drive["usableCapacity"]) for drive in self.drives + if drive["available"] and drive["status"] == "optimal"]) - try: - (rc, drives_resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid), method='GET', - url_username=self.api_usr, url_password=self.api_pwd, - validate_certs=self.validate_certs) - except Exception as err: - self.module.exit_json( - msg="Failed to fetch disk drives. Array id [%s]. Error[%s]." % (self.ssid, to_native(err))) + self.module.log("available drive capacities: %s" % available_drive_capacities) + return list(available_drive_capacities) + @property + def drives(self): + """Retrieve list of drives found in storage pool.""" + drives = None try: - candidate_set = self.filter_drives(drives_resp, - exact_drive_count=self.criteria_drive_count, - drive_type=self.criteria_drive_type, - min_drive_size=self.criteria_drive_min_size, - raid_level=self.raid_level, - size_unit=self.criteria_size_unit, - min_total_capacity=self.criteria_min_usable_capacity, - interface_type=self.criteria_drive_interface_type, - fde_required=self.criteria_drive_require_fde - ) - except Exception as err: - self.module.fail_json( - msg="Failed to allocate adequate drive count. Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + rc, drives = self.request("storage-systems/%s/drives" % self.ssid) + except Exception as error: + self.module.fail_json(msg="Failed to fetch disk drives. Array id [%s]. Error[%s]." + % (self.ssid, to_native(error))) - disk_ids = [d['id'] for d in candidate_set] + return drives - return disk_ids + def is_drive_count_valid(self, drive_count): + """Validate drive count criteria is met.""" + if self.criteria_drive_count and drive_count < self.criteria_drive_count: + return False - def create_storage_pool(self): - self.debug("creating storage pool...") + if self.raid_level == "raidDiskPool": + return drive_count >= self.disk_pool_drive_minimum + if self.raid_level == "raid0": + return drive_count > 0 + if self.raid_level == "raid1": + return drive_count >= 2 and (drive_count % 2) == 0 + if self.raid_level in ["raid3", "raid5"]: + return 3 <= drive_count <= 30 + if self.raid_level == "raid6": + return 5 <= drive_count <= 30 + return False - sp_add_req = dict( - raidLevel=self.raid_level, - diskDriveIds=self.disk_ids, - name=self.name - ) + @property + def storage_pool(self): + """Retrieve storage pool information.""" + storage_pools_resp = None + try: + rc, storage_pools_resp = self.request("storage-systems/%s/storage-pools" % self.ssid) + except Exception as err: + self.module.fail_json(msg="Failed to get storage pools. Array id [%s]. Error[%s]. State[%s]." + % (self.ssid, to_native(err), self.state)) - if self.erase_secured_drives: - sp_add_req['eraseSecuredDrives'] = self.erase_secured_drives + pool_detail = [pool for pool in storage_pools_resp if pool["name"] == self.name] + return pool_detail[0] if pool_detail else dict() + @property + def storage_pool_volumes(self): + """Retrieve list of volumes associated with storage pool.""" + volumes_resp = None try: - (rc, resp) = request(self.api_url + "/storage-systems/%s/storage-pools" % (self.ssid), - data=json.dumps(sp_add_req), headers=self.post_headers, method='POST', - url_username=self.api_usr, url_password=self.api_pwd, - validate_certs=self.validate_certs, - timeout=120) + rc, volumes_resp = self.request("storage-systems/%s/volumes" % self.ssid) except Exception as err: - pool_id = self.pool_detail['id'] - self.module.exit_json( - msg="Failed to create storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id, - self.ssid, - to_native(err))) + self.module.fail_json(msg="Failed to get storage pools. Array id [%s]. Error[%s]. State[%s]." + % (self.ssid, to_native(err), self.state)) + + group_ref = self.storage_pool["volumeGroupRef"] + storage_pool_volume_list = [volume["id"] for volume in volumes_resp if volume["volumeGroupRef"] == group_ref] + return storage_pool_volume_list + + def get_ddp_capacity(self, expansion_drive_list): + """Return the total usable capacity based on the additional drives.""" + + def get_ddp_error_percent(_drive_count, _extent_count): + """Determine the space reserved for reconstruction""" + if _drive_count <= 36: + if _extent_count <= 600: + return 0.40 + elif _extent_count <= 1400: + return 0.35 + elif _extent_count <= 6200: + return 0.20 + elif _extent_count <= 50000: + return 0.15 + elif _drive_count <= 64: + if _extent_count <= 600: + return 0.20 + elif _extent_count <= 1400: + return 0.15 + elif _extent_count <= 6200: + return 0.10 + elif _extent_count <= 50000: + return 0.05 + elif _drive_count <= 480: + if _extent_count <= 600: + return 0.20 + elif _extent_count <= 1400: + return 0.15 + elif _extent_count <= 6200: + return 0.10 + elif _extent_count <= 50000: + return 0.05 + + self.module.fail_json(msg="Drive count exceeded the error percent table. Array[%s]" % self.ssid) + + def get_ddp_reserved_drive_count(_disk_count): + """Determine the number of reserved drive.""" + reserve_count = 0 + + if self.reserve_drive_count: + reserve_count = self.reserve_drive_count + elif _disk_count >= 256: + reserve_count = 8 + elif _disk_count >= 192: + reserve_count = 7 + elif _disk_count >= 128: + reserve_count = 6 + elif _disk_count >= 64: + reserve_count = 4 + elif _disk_count >= 32: + reserve_count = 3 + elif _disk_count >= 12: + reserve_count = 2 + elif _disk_count == 11: + reserve_count = 1 + + return reserve_count - self.pool_detail = self.get_storage_pool(self.name) + if self.pool_detail: + drive_count = len(self.storage_pool_drives) + len(expansion_drive_list) + else: + drive_count = len(expansion_drive_list) + + drive_usable_capacity = min(min(self.get_available_drive_capacities()), + min(self.get_available_drive_capacities(expansion_drive_list))) + drive_data_extents = ((drive_usable_capacity - 8053063680) / 536870912) + maximum_stripe_count = (drive_count * drive_data_extents) / 10 + + error_percent = get_ddp_error_percent(drive_count, drive_data_extents) + error_overhead = (drive_count * drive_data_extents / 10 * error_percent + 10) / 10 + + total_stripe_count = maximum_stripe_count - error_overhead + stripe_count_per_drive = total_stripe_count / drive_count + reserved_stripe_count = get_ddp_reserved_drive_count(drive_count) * stripe_count_per_drive + available_stripe_count = total_stripe_count - reserved_stripe_count + + return available_stripe_count * 4294967296 + + @memoize + def get_candidate_drives(self): + """Retrieve set of drives candidates for creating a new storage pool.""" + + def get_candidate_drive_request(): + """Perform request for new volume creation.""" + candidates_list = list() + drive_types = [self.criteria_drive_type] if self.criteria_drive_type else self.available_drive_types + interface_types = [self.criteria_drive_interface_type] \ + if self.criteria_drive_interface_type else self.available_drive_interface_types + + for interface_type in interface_types: + for drive_type in drive_types: + candidates = None + volume_candidate_request_data = dict( + type="diskPool" if self.raid_level == "raidDiskPool" else "traditional", + diskPoolVolumeCandidateRequestData=dict( + reconstructionReservedDriveCount=65535)) + candidate_selection_type = dict( + candidateSelectionType="count", + driveRefList=dict(driveRef=self.available_drives)) + criteria = dict(raidLevel=self.raid_level, + phyDriveType=interface_type, + dssPreallocEnabled=False, + securityType="capable" if self.criteria_drive_require_fde else "none", + driveMediaType=drive_type, + onlyProtectionInformationCapable=True if self.criteria_drive_require_da else False, + volumeCandidateRequestData=volume_candidate_request_data, + allocateReserveSpace=False, + securityLevel="fde" if self.criteria_drive_require_fde else "none", + candidateSelectionType=candidate_selection_type) + + try: + rc, candidates = self.request("storage-systems/%s/symbol/getVolumeCandidates?verboseError" + "Response=true" % self.ssid, data=criteria, method="POST") + except Exception as error: + self.module.fail_json(msg="Failed to retrieve volume candidates. Array [%s]. Error [%s]." + % (self.ssid, to_native(error))) + + if candidates: + candidates_list.extend(candidates["volumeCandidate"]) + + return candidates_list + + # Determine the appropriate candidate list + for candidate in get_candidate_drive_request(): + + # Evaluate candidates for required drive count, collective drive usable capacity and minimum drive size + if self.criteria_drive_count: + if self.criteria_drive_count != int(candidate["driveCount"]): + continue + if self.criteria_min_usable_capacity: + if ((self.raid_level == "raidDiskPool" and self.criteria_min_usable_capacity > + self.get_ddp_capacity(candidate["driveRefList"]["driveRef"])) or + self.criteria_min_usable_capacity > int(candidate["usableSize"])): + continue + if self.criteria_drive_min_size: + if self.criteria_drive_min_size > min(self.get_available_drive_capacities(candidate["drives"])): + continue + + return candidate + + self.module.fail_json(msg="Not enough drives to meet the specified criteria. Array [%s]." % self.ssid) + + @memoize + def get_expansion_candidate_drives(self): + """Retrieve required expansion drive list. - if self.secure_pool: - secure_pool_data = dict(securePool=True) - try: - (retc, r) = request( - self.api_url + "/storage-systems/%s/storage-pools/%s" % (self.ssid, self.pool_detail['id']), - data=json.dumps(secure_pool_data), headers=self.post_headers, method='POST', - url_username=self.api_usr, - url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120, ignore_errors=True) - except Exception as err: - pool_id = self.pool_detail['id'] - self.module.exit_json( - msg="Failed to update storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id, - self.ssid, - to_native(err))) + Note: To satisfy the expansion criteria each item in the candidate list must added specified group since there + is a potential limitation on how many drives can be incorporated at a time. + * Traditional raid volume groups must be added two drives maximum at a time. No limits on raid disk pools. - @property - def needs_raid_level_migration(self): - current_raid_level = self.pool_detail['raidLevel'] - needs_migration = self.raid_level != current_raid_level + :return list(candidate): list of candidate structures from the getVolumeGroupExpansionCandidates symbol endpoint + """ - if needs_migration: # sanity check some things so we can fail early/check-mode - if current_raid_level == 'raidDiskPool': - self.module.fail_json(msg="raid level cannot be changed for disk pools") + def get_expansion_candidate_drive_request(): + """Perform the request for expanding existing volume groups or disk pools. - return needs_migration + Note: the list of candidate structures do not necessarily produce candidates that meet all criteria. + """ + candidates_list = None + url = "storage-systems/%s/symbol/getVolumeGroupExpansionCandidates?verboseErrorResponse=true" % self.ssid + if self.raid_level == "raidDiskPool": + url = "storage-systems/%s/symbol/getDiskPoolExpansionCandidates?verboseErrorResponse=true" % self.ssid - def migrate_raid_level(self): - self.debug("migrating storage pool to raid level '%s'...", self.raid_level) - sp_raid_migrate_req = dict( - raidLevel=self.raid_level - ) - try: - (rc, resp) = request( - self.api_url + "/storage-systems/%s/storage-pools/%s/raid-type-migration" % (self.ssid, - self.name), - data=json.dumps(sp_raid_migrate_req), headers=self.post_headers, method='POST', - url_username=self.api_usr, - url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120) - except Exception as err: - pool_id = self.pool_detail['id'] - self.module.exit_json( - msg="Failed to change the raid level of storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % ( - pool_id, self.ssid, to_native(err))) + try: + rc, candidates_list = self.request(url, method="POST", data=self.pool_detail["id"]) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve volume candidates. Array [%s]. Error [%s]." + % (self.ssid, to_native(error))) - @property - def sp_drives(self, exclude_hotspares=True): - if not self._sp_drives_cached: + return candidates_list["candidates"] - self.debug("fetching drive list...") - try: - (rc, resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid), method='GET', - url_username=self.api_usr, url_password=self.api_pwd, - validate_certs=self.validate_certs) - except Exception as err: - pool_id = self.pool_detail['id'] - self.module.exit_json( - msg="Failed to fetch disk drives. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id, self.ssid, to_native(err))) - - sp_id = self.pool_detail['id'] - if exclude_hotspares: - self._sp_drives_cached = [d for d in resp if d['currentVolumeGroupRef'] == sp_id and not d['hotSpare']] + required_candidate_list = list() + required_additional_drives = 0 + required_additional_capacity = 0 + total_required_capacity = 0 + + # determine whether and how much expansion is need to satisfy the specified criteria + if self.criteria_min_usable_capacity: + total_required_capacity = self.criteria_min_usable_capacity + required_additional_capacity = self.criteria_min_usable_capacity - int(self.pool_detail["totalRaidedSpace"]) + + if self.criteria_drive_count: + required_additional_drives = self.criteria_drive_count - len(self.storage_pool_drives) + + # Determine the appropriate expansion candidate list + if required_additional_drives > 0 or required_additional_capacity > 0: + for candidate in get_expansion_candidate_drive_request(): + + if self.criteria_drive_min_size: + if self.criteria_drive_min_size > min(self.get_available_drive_capacities(candidate["drives"])): + continue + + if self.raid_level == "raidDiskPool": + if (len(candidate["drives"]) >= required_additional_drives and + self.get_ddp_capacity(candidate["drives"]) >= total_required_capacity): + required_candidate_list.append(candidate) + break + else: + required_additional_drives -= len(candidate["drives"]) + required_additional_capacity -= int(candidate["usableCapacity"]) + required_candidate_list.append(candidate) + + # Determine if required drives and capacities are satisfied + if required_additional_drives <= 0 and required_additional_capacity <= 0: + break else: - self._sp_drives_cached = [d for d in resp if d['currentVolumeGroupRef'] == sp_id] + self.module.fail_json(msg="Not enough drives to meet the specified criteria. Array [%s]." % self.ssid) - return self._sp_drives_cached + return required_candidate_list - @property - def reserved_drive_count_differs(self): - if int(self.pool_detail['volumeGroupData']['diskPoolData']['reconstructionReservedDriveCount']) != self.reserve_drive_count: - return True - return False + def get_reserve_drive_count(self): + """Retrieve the current number of reserve drives for raidDiskPool (Only for raidDiskPool).""" - @property - def needs_expansion(self): - if self.criteria_drive_count > len(self.sp_drives): - return True - # TODO: is totalRaidedSpace the best attribute for "how big is this SP"? - if self.criteria_min_usable_capacity and \ - (self.criteria_min_usable_capacity * self._size_unit_map[self.criteria_size_unit]) > int(self.pool_detail['totalRaidedSpace']): - return True + if not self.pool_detail: + self.module.fail_json(msg="The storage pool must exist. Array [%s]." % self.ssid) - return False + if self.raid_level != "raidDiskPool": + self.module.fail_json(msg="The storage pool must be a raidDiskPool. Pool [%s]. Array [%s]." + % (self.pool_detail["id"], self.ssid)) - def get_expansion_candidate_drives(self): - # sanity checks; don't call this if we can't/don't need to expand - if not self.needs_expansion: - self.module.fail_json(msg="can't get expansion candidates when pool doesn't need expansion") + return self.pool_detail["volumeGroupData"]["diskPoolData"]["reconstructionReservedDriveCount"] - self.debug("fetching expansion candidate drives...") - try: - (rc, resp) = request( - self.api_url + "/storage-systems/%s/storage-pools/%s/expand" % (self.ssid, - self.pool_detail['id']), - method='GET', url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs, - timeout=120) - except Exception as err: - pool_id = self.pool_detail['id'] - self.module.exit_json( - msg="Failed to fetch candidate drives for storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % ( - pool_id, self.ssid, to_native(err))) + def get_maximum_reserve_drive_count(self): + """Retrieve the maximum number of reserve drives for storage pool (Only for raidDiskPool).""" + if self.raid_level != "raidDiskPool": + self.module.fail_json(msg="The storage pool must be a raidDiskPool. Pool [%s]. Array [%s]." + % (self.pool_detail["id"], self.ssid)) - current_drive_count = len(self.sp_drives) - current_capacity_bytes = int(self.pool_detail['totalRaidedSpace']) # TODO: is this the right attribute to use? + drives_ids = list() - if self.criteria_min_usable_capacity: - requested_capacity_bytes = self.criteria_min_usable_capacity * self._size_unit_map[self.criteria_size_unit] + if self.pool_detail: + drives_ids.extend(self.storage_pool_drives) + for candidate in self.get_expansion_candidate_drives(): + drives_ids.extend((candidate["drives"])) else: - requested_capacity_bytes = current_capacity_bytes + candidate = self.get_candidate_drives() + drives_ids.extend(candidate["driveRefList"]["driveRef"]) - if self.criteria_drive_count: - minimum_disks_to_add = max((self.criteria_drive_count - current_drive_count), 1) - else: - minimum_disks_to_add = 1 + drive_count = len(drives_ids) + maximum_reserve_drive_count = min(int(drive_count * 0.2 + 1), drive_count - 10) + if maximum_reserve_drive_count > 10: + maximum_reserve_drive_count = 10 + + return maximum_reserve_drive_count - minimum_bytes_to_add = max(requested_capacity_bytes - current_capacity_bytes, 0) + def set_reserve_drive_count(self, check_mode=False): + """Set the reserve drive count for raidDiskPool.""" + changed = False - # FUTURE: allow more control over expansion candidate selection? - # loop over candidate disk sets and add until we've met both criteria + if self.raid_level == "raidDiskPool" and self.reserve_drive_count: + maximum_count = self.get_maximum_reserve_drive_count() - added_drive_count = 0 - added_capacity_bytes = 0 + if self.reserve_drive_count < 0 or self.reserve_drive_count > maximum_count: + self.module.fail_json(msg="Supplied reserve drive count is invalid or exceeds the maximum allowed. " + "Note that it may be necessary to wait for expansion operations to complete " + "before the adjusting the reserve drive count. Maximum [%s]. Array [%s]." + % (maximum_count, self.ssid)) - drives_to_add = set() + if self.reserve_drive_count != self.get_reserve_drive_count(): + changed = True - for s in resp: - # don't trust the API not to give us duplicate drives across candidate sets, especially in multi-drive sets - candidate_drives = s['drives'] - if len(drives_to_add.intersection(candidate_drives)) != 0: - # duplicate, skip - continue - drives_to_add.update(candidate_drives) - added_drive_count += len(candidate_drives) - added_capacity_bytes += int(s['usableCapacity']) + if not check_mode: + try: + rc, resp = self.request("storage-systems/%s/symbol/setDiskPoolReservedDriveCount" % self.ssid, + method="POST", data=dict(volumeGroupRef=self.pool_detail["id"], + newDriveCount=self.reserve_drive_count)) + except Exception as error: + self.module.fail_json(msg="Failed to set reserve drive count for disk pool. Disk Pool [%s]." + " Array [%s]." % (self.pool_detail["id"], self.ssid)) - if added_drive_count >= minimum_disks_to_add and added_capacity_bytes >= minimum_bytes_to_add: - break + return changed - if (added_drive_count < minimum_disks_to_add) or (added_capacity_bytes < minimum_bytes_to_add): - self.module.fail_json( - msg="unable to find at least %s drives to add that would add at least %s bytes of capacity" % ( - minimum_disks_to_add, minimum_bytes_to_add)) + def erase_all_available_secured_drives(self, check_mode=False): + """Erase all available drives that have encryption at rest feature enabled.""" + changed = False + drives_list = list() + for drive in self.drives: + if drive["available"] and drive["fdeEnabled"]: + changed = True + drives_list.append(drive["id"]) - return list(drives_to_add) + if drives_list and not check_mode: + try: + rc, resp = self.request("storage-systems/%s/symbol/reprovisionDrive?verboseErrorResponse=true" + % self.ssid, method="POST", data=dict(driveRef=drives_list)) + except Exception as error: + self.module.fail_json(msg="Failed to erase all secured drives. Array [%s]" % self.ssid) - def expand_storage_pool(self): - drives_to_add = self.get_expansion_candidate_drives() + return changed - self.debug("adding %s drives to storage pool...", len(drives_to_add)) - sp_expand_req = dict( - drives=drives_to_add - ) - try: - request( - self.api_url + "/storage-systems/%s/storage-pools/%s/expand" % (self.ssid, - self.pool_detail['id']), - data=json.dumps(sp_expand_req), headers=self.post_headers, method='POST', url_username=self.api_usr, - url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120) - except Exception as err: - pool_id = self.pool_detail['id'] - self.module.exit_json( - msg="Failed to add drives to storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id, - self.ssid, - to_native(err))) - - # TODO: check response - # TODO: support blocking wait? - - def reduce_drives(self, drive_list): - if all(drive in drive_list for drive in self.sp_drives): - # all the drives passed in are present in the system - pass - else: - self.module.fail_json( - msg="One of the drives you wish to remove does not currently exist in the storage pool you specified") + def create_storage_pool(self): + """Create new storage pool.""" + url = "storage-systems/%s/symbol/createVolumeGroup?verboseErrorResponse=true" % self.ssid + request_body = dict(label=self.name, + candidate=self.get_candidate_drives()) + + if self.raid_level == "raidDiskPool": + url = "storage-systems/%s/symbol/createDiskPool?verboseErrorResponse=true" % self.ssid + + request_body.update( + dict(backgroundOperationPriority="useDefault", + criticalReconstructPriority="useDefault", + degradedReconstructPriority="useDefault", + poolUtilizationCriticalThreshold=65535, + poolUtilizationWarningThreshold=0)) + + if self.reserve_drive_count: + request_body.update(dict(volumeCandidateData=dict( + diskPoolVolumeCandidateData=dict(reconstructionReservedDriveCount=self.reserve_drive_count)))) try: - (rc, resp) = request( - self.api_url + "/storage-systems/%s/storage-pools/%s/reduction" % (self.ssid, - self.pool_detail['id']), - data=json.dumps(drive_list), headers=self.post_headers, method='POST', url_username=self.api_usr, - url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120) - except Exception as err: - pool_id = self.pool_detail['id'] - self.module.exit_json( - msg="Failed to remove drives from storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % ( - pool_id, self.ssid, to_native(err))) + rc, resp = self.request(url, method="POST", data=request_body) + except Exception as error: + self.module.fail_json(msg="Failed to create storage pool. Array id [%s]. Error[%s]." + % (self.ssid, to_native(error))) - def update_reserve_drive_count(self, qty): - data = dict(reservedDriveCount=qty) + # Update drive and storage pool information + self.pool_detail = self.storage_pool + + def delete_storage_pool(self): + """Delete storage pool.""" + storage_pool_drives = [drive["id"] for drive in self.storage_pool_drives if drive["fdeEnabled"]] try: - (rc, resp) = request( - self.api_url + "/storage-systems/%s/storage-pools/%s" % (self.ssid, self.pool_detail['id']), - data=json.dumps(data), headers=self.post_headers, method='POST', url_username=self.api_usr, - url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120) - except Exception as err: - pool_id = self.pool_detail['id'] - self.module.exit_json( - msg="Failed to update reserve drive count. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id, - self.ssid, - to_native(err))) + delete_volumes_parameter = "?delete-volumes=true" if self.remove_volumes else "" + rc, resp = self.request("storage-systems/%s/storage-pools/%s%s" + % (self.ssid, self.pool_detail["id"], delete_volumes_parameter), method="DELETE") + except Exception as error: + self.module.fail_json(msg="Failed to delete storage pool. Pool id [%s]. Array id [%s]. Error[%s]." + % (self.pool_detail["id"], self.ssid, to_native(error))) + + if storage_pool_drives and self.erase_secured_drives: + try: + rc, resp = self.request("storage-systems/%s/symbol/reprovisionDrive?verboseErrorResponse=true" + % self.ssid, method="POST", data=dict(driveRef=storage_pool_drives)) + except Exception as error: + self.module.fail_json(msg="Failed to erase drives prior to creating new storage pool. Array [%s]." + " Error [%s]." % (self.ssid, to_native(error))) + + def secure_storage_pool(self, check_mode=False): + """Enable security on an existing storage pool""" + self.pool_detail = self.storage_pool + needs_secure_pool = False + + if not self.secure_pool and self.pool_detail["securityType"] == "enabled": + self.module.fail_json(msg="It is not possible to disable storage pool security! See array documentation.") + if self.secure_pool and self.pool_detail["securityType"] != "enabled": + needs_secure_pool = True + + if needs_secure_pool and not check_mode: + try: + rc, resp = self.request("storage-systems/%s/storage-pools/%s" % (self.ssid, self.pool_detail["id"]), + data=dict(securePool=True), method="POST") + except Exception as error: + self.module.fail_json(msg="Failed to secure storage pool. Pool id [%s]. Array [%s]. Error" + " [%s]." % (self.pool_detail["id"], self.ssid, to_native(error))) + + self.pool_detail = self.storage_pool + return needs_secure_pool + + def migrate_raid_level(self, check_mode=False): + """Request storage pool raid level migration.""" + needs_migration = self.raid_level != self.pool_detail["raidLevel"] + if needs_migration and self.pool_detail["raidLevel"] == "raidDiskPool": + self.module.fail_json(msg="Raid level cannot be changed for disk pools") + + if needs_migration and not check_mode: + sp_raid_migrate_req = dict(raidLevel=self.raid_level) + + try: + rc, resp = self.request("storage-systems/%s/storage-pools/%s/raid-type-migration" + % (self.ssid, self.name), data=sp_raid_migrate_req, method="POST") + except Exception as error: + self.module.fail_json(msg="Failed to change the raid level of storage pool. Array id [%s]." + " Error[%s]." % (self.ssid, to_native(error))) + + self.pool_detail = self.storage_pool + return needs_migration + + def expand_storage_pool(self, check_mode=False): + """Add drives to existing storage pool. + + :return bool: whether drives were required to be added to satisfy the specified criteria.""" + expansion_candidate_list = self.get_expansion_candidate_drives() + changed_required = bool(expansion_candidate_list) + estimated_completion_time = 0.0 + + # build expandable groupings of traditional raid candidate + required_expansion_candidate_list = list() + while expansion_candidate_list: + subset = list() + while expansion_candidate_list and len(subset) < self.expandable_drive_count: + subset.extend(expansion_candidate_list.pop()["drives"]) + required_expansion_candidate_list.append(subset) + + if required_expansion_candidate_list and not check_mode: + url = "storage-systems/%s/symbol/startVolumeGroupExpansion?verboseErrorResponse=true" % self.ssid + if self.raid_level == "raidDiskPool": + url = "storage-systems/%s/symbol/startDiskPoolExpansion?verboseErrorResponse=true" % self.ssid + + while required_expansion_candidate_list: + candidate_drives_list = required_expansion_candidate_list.pop() + request_body = dict(volumeGroupRef=self.pool_detail["volumeGroupRef"], + driveRef=candidate_drives_list) + try: + rc, resp = self.request(url, method="POST", data=request_body) + except Exception as error: + rc, actions_resp = self.request("storage-systems/%s/storage-pools/%s/action-progress" + % (self.ssid, self.pool_detail["id"]), ignore_errors=True) + if rc == 200 and actions_resp: + actions = [action["currentAction"] for action in actions_resp + if action["volumeRef"] in self.storage_pool_volumes] + self.module.fail_json(msg="Failed to add drives to the storage pool possibly because of actions" + " in progress. Actions [%s]. Pool id [%s]. Array id [%s]. Error[%s]." + % (", ".join(actions), self.pool_detail["id"], self.ssid, + to_native(error))) + + self.module.fail_json(msg="Failed to add drives to storage pool. Pool id [%s]. Array id [%s]." + " Error[%s]." % (self.pool_detail["id"], self.ssid, to_native(error))) + + # Wait for expansion completion unless it is the last request in the candidate list + if required_expansion_candidate_list: + for dummy in range(self.EXPANSION_TIMEOUT_SEC): + rc, actions_resp = self.request("storage-systems/%s/storage-pools/%s/action-progress" + % (self.ssid, self.pool_detail["id"]), ignore_errors=True) + if rc == 200: + for action in actions_resp: + if (action["volumeRef"] in self.storage_pool_volumes and + action["currentAction"] == "remappingDce"): + sleep(1) + estimated_completion_time = action["estimatedTimeToCompletion"] + break + else: + estimated_completion_time = 0.0 + break + + return changed_required, estimated_completion_time def apply(self): + """Apply requested state to storage array.""" changed = False - pool_exists = False - self.pool_detail = self.get_storage_pool(self.name) + if self.state == "present": + if self.criteria_drive_count is None and self.criteria_min_usable_capacity is None: + self.module.fail_json(msg="One of criteria_min_usable_capacity or criteria_drive_count must be" + " specified.") + if self.criteria_drive_count and not self.is_drive_count_valid(self.criteria_drive_count): + self.module.fail_json(msg="criteria_drive_count must be valid for the specified raid level.") + + self.pool_detail = self.storage_pool + self.module.log(pformat(self.pool_detail)) + if self.state == "present" and self.erase_secured_drives: + self.erase_all_available_secured_drives(check_mode=True) + + # Determine whether changes need to be applied to the storage array if self.pool_detail: - pool_exists = True - pool_id = self.pool_detail['id'] - if self.state == 'absent': - self.debug("CHANGED: storage pool exists, but requested state is 'absent'") + if self.state == "absent": changed = True - elif self.state == 'present': - # sanity checks first- we can't change these, so we'll bomb if they're specified - if self.criteria_drive_type and self.criteria_drive_type != self.pool_detail['driveMediaType']: - self.module.fail_json( - msg="drive media type %s cannot be changed to %s" % (self.pool_detail['driveMediaType'], - self.criteria_drive_type)) - - # now the things we can change... - if self.needs_expansion: - self.debug("CHANGED: storage pool needs expansion") - changed = True - if self.needs_raid_level_migration: - self.debug( - "CHANGED: raid level migration required; storage pool uses '%s', requested is '%s'", - self.pool_detail['raidLevel'], self.raid_level) - changed = True + elif self.state == "present": - # if self.reserved_drive_count_differs: - # changed = True + if self.criteria_drive_count and self.criteria_drive_count < len(self.storage_pool_drives): + self.module.fail_json(msg="Failed to reduce the size of the storage pool. Array [%s]. Pool [%s]." + % (self.ssid, self.pool_detail["id"])) - # TODO: validate other state details? (pool priority, alert threshold) + if self.criteria_drive_type and self.criteria_drive_type != self.pool_detail["driveMediaType"]: + self.module.fail_json(msg="Failed! It is not possible to modify storage pool media type." + " Array [%s]. Pool [%s]." % (self.ssid, self.pool_detail["id"])) - # per FPoole and others, pool reduce operations will not be supported. Automatic "smart" reduction - # presents a difficult parameter issue, as the disk count can increase due to expansion, so we - # can't just use disk count > criteria_drive_count. + if (self.criteria_drive_require_da is not None and self.criteria_drive_require_da != + self.pool_detail["protectionInformationCapabilities"]["protectionInformationCapable"]): + self.module.fail_json(msg="Failed! It is not possible to modify DA-capability. Array [%s]." + " Pool [%s]." % (self.ssid, self.pool_detail["id"])) - else: # pool does not exist - if self.state == 'present': - self.debug("CHANGED: storage pool does not exist, but requested state is 'present'") - changed = True + # Evaluate current storage pool for required change. + needs_expansion, estimated_completion_time = self.expand_storage_pool(check_mode=True) + if needs_expansion: + changed = True + if self.migrate_raid_level(check_mode=True): + changed = True + if self.secure_storage_pool(check_mode=True): + changed = True + if self.set_reserve_drive_count(check_mode=True): + changed = True - # ensure we can get back a workable set of disks - # (doing this early so candidate selection runs under check mode) - self.disk_ids = self.get_candidate_disks() - else: - self.module.exit_json(msg="Storage pool [%s] did not exist." % (self.name)) + elif self.state == "present": + changed = True + # Apply changes to storage array + msg = "No changes were required for the storage pool [%s]." if changed and not self.module.check_mode: - # apply changes - if self.state == 'present': - if not pool_exists: + if self.state == "present": + if self.erase_secured_drives: + self.erase_all_available_secured_drives() + + if self.pool_detail: + change_list = list() + + # Expansion needs to occur before raid level migration to account for any sizing needs. + expanded, estimated_completion_time = self.expand_storage_pool() + if expanded: + change_list.append("expanded") + if self.migrate_raid_level(): + change_list.append("raid migration") + if self.secure_storage_pool(): + change_list.append("secured") + if self.set_reserve_drive_count(): + change_list.append("adjusted reserve drive count") + + if change_list: + msg = "Following changes have been applied to the storage pool [%s]: " + ", ".join(change_list) + + if expanded: + msg += "\nThe expansion operation will complete in an estimated %s minutes."\ + % estimated_completion_time + else: self.create_storage_pool() - else: # pool exists but differs, modify... - if self.needs_expansion: - self.expand_storage_pool() - - if self.remove_drives: - self.reduce_drives(self.remove_drives) - - if self.needs_raid_level_migration: - self.migrate_raid_level() - - # if self.reserved_drive_count_differs: - # self.update_reserve_drive_count(self.reserve_drive_count) - - if self.secure_pool: - secure_pool_data = dict(securePool=True) - try: - (retc, r) = request( - self.api_url + "/storage-systems/%s/storage-pools/%s" % (self.ssid, - self.pool_detail[ - 'id']), - data=json.dumps(secure_pool_data), headers=self.post_headers, method='POST', - url_username=self.api_usr, url_password=self.api_pwd, - validate_certs=self.validate_certs, timeout=120, ignore_errors=True) - except Exception as err: - self.module.exit_json( - msg="Failed to delete storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % ( - pool_id, self.ssid, to_native(err))) - - if int(retc) == 422: - self.module.fail_json( - msg="Error in enabling secure pool. One of the drives in the specified storage pool is likely not security capable") - - elif self.state == 'absent': - # delete the storage pool - try: - remove_vol_opt = '' - if self.remove_volumes: - remove_vol_opt = '?delete-volumes=true' - (rc, resp) = request( - self.api_url + "/storage-systems/%s/storage-pools/%s%s" % (self.ssid, pool_id, - remove_vol_opt), - method='DELETE', - url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs, - timeout=120) - except Exception as err: - self.module.exit_json( - msg="Failed to delete storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id, - self.ssid, - to_native(err))) - - self.module.exit_json(changed=changed, **self.pool_detail) + msg = "Storage pool [%s] was created." + + if self.secure_storage_pool(): + msg = "Storage pool [%s] was created and secured." + if self.set_reserve_drive_count(): + msg += " Adjusted reserve drive count." + + elif self.pool_detail: + self.delete_storage_pool() + msg = "Storage pool [%s] removed." + + self.pool_detail = self.storage_pool + self.module.log(pformat(self.pool_detail)) + self.module.log(msg % self.name) + self.module.exit_json(msg=msg % self.name, changed=changed, **self.pool_detail) def main(): - sp = NetAppESeriesStoragePool() - try: - sp.apply() - except Exception as e: - sp.debug("Exception in apply(): \n%s", format_exc()) - raise + storage_pool = NetAppESeriesStoragePool() + storage_pool.apply() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/test/sanity/ignore.txt b/test/sanity/ignore.txt index aa7d6fc569b..82ef715cbf2 100644 --- a/test/sanity/ignore.txt +++ b/test/sanity/ignore.txt @@ -6389,8 +6389,6 @@ lib/ansible/modules/storage/netapp/netapp_e_storage_system.py validate-modules:E lib/ansible/modules/storage/netapp/netapp_e_storage_system.py validate-modules:E324 lib/ansible/modules/storage/netapp/netapp_e_storage_system.py validate-modules:E337 lib/ansible/modules/storage/netapp/netapp_e_storage_system.py validate-modules:E338 -lib/ansible/modules/storage/netapp/netapp_e_storagepool.py validate-modules:E322 -lib/ansible/modules/storage/netapp/netapp_e_storagepool.py validate-modules:E326 lib/ansible/modules/storage/netapp/netapp_e_storagepool.py validate-modules:E337 lib/ansible/modules/storage/netapp/netapp_e_storagepool.py validate-modules:E338 lib/ansible/modules/storage/netapp/netapp_e_syslog.py validate-modules:E337 diff --git a/test/units/modules/storage/netapp/test_netapp_e_storagepool.py b/test/units/modules/storage/netapp/test_netapp_e_storagepool.py new file mode 100644 index 00000000000..1ce48bddb69 --- /dev/null +++ b/test/units/modules/storage/netapp/test_netapp_e_storagepool.py @@ -0,0 +1,724 @@ +# coding=utf-8 +# (c) 2018, NetApp Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible.modules.storage.netapp.netapp_e_storagepool import NetAppESeriesStoragePool + +try: + from unittest.mock import patch, PropertyMock +except ImportError: + from mock import patch, PropertyMock + + +class StoragePoolTest(ModuleTestCase): + REQUIRED_PARAMS = {"api_username": "username", + "api_password": "password", + "api_url": "http://localhost/devmgr/v2", + "ssid": "1", + "validate_certs": "no"} + + STORAGE_POOL_DATA = [{"raidLevel": "raidDiskPool", "volumeGroupRef": "04000000600A098000A4B28D000017805C7BD4D8", + "securityType": "capable", + "protectionInformationCapabilities": {"protectionInformationCapable": True, + "protectionType": "type2Protection"}, + "volumeGroupData": {"diskPoolData": {"reconstructionReservedDriveCount": 2}}, + "totalRaidedSpace": "2735894167552", "name": "pool", + "id": "04000000600A098000A4B28D000017805C7BD4D8", "driveMediaType": "hdd"}] + DRIVES_DATA = [{'available': True, 'currentVolumeGroupRef': '0000000000000000000000000000000000000000', + 'driveMediaType': 'hdd', 'id': '010000005000C500551ED1FF0000000000000000', 'fdeCapable': True, + 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False, + 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, + 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False, + 'usableCapacity': '299463129088'}, + {'available': False, 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', + 'driveMediaType': 'hdd', 'id': '010000005000C500551EB1930000000000000000', 'fdeCapable': True, + 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False, + 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, + 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False, + 'usableCapacity': '299463129088'}, + {'available': False, 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', + 'driveMediaType': 'hdd', 'id': '010000005000C500551EAAE30000000000000000', 'fdeCapable': True, + 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False, + 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, + 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False, + 'usableCapacity': '299463129088'}, + {'available': False, 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', + 'driveMediaType': 'hdd', 'id': '010000005000C500551ECB1F0000000000000000', 'fdeCapable': True, + 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False, + 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, + 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False, + 'usableCapacity': '299463129088'}, + {'available': False, 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', + 'driveMediaType': 'hdd', 'id': '010000005000C500551EB2930000000000000000', 'fdeCapable': True, + 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False, + 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, + 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False, + 'usableCapacity': '299463129088'}, + {'available': False, 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', + 'driveMediaType': 'hdd', 'id': '010000005000C500551ECB0B0000000000000000', 'fdeCapable': True, + 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False, + 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, + 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False, + 'usableCapacity': '299463129088'}, + {'available': False, 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', + 'driveMediaType': 'hdd', 'id': '010000005000C500551EC6C70000000000000000', 'fdeCapable': True, + 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False, + 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, + 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False, + 'usableCapacity': '299463129088'}, + {'available': False, 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', + 'driveMediaType': 'hdd', 'id': '010000005000C500551E9BA70000000000000000', 'fdeCapable': True, + 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False, + 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, + 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False, + 'usableCapacity': '299463129088'}, + {'available': False, 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', + 'driveMediaType': 'hdd', 'id': '010000005000C500551ED7CF0000000000000000', 'fdeCapable': True, + 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False, + 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, + 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False, + 'usableCapacity': '299463129088'}, + {'available': False, 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', + 'driveMediaType': 'hdd', 'id': '010000005000C500551ECB0F0000000000000000', 'fdeCapable': True, + 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False, + 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, + 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False, + 'usableCapacity': '299463129088'}, + {'available': False, 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', + 'driveMediaType': 'hdd', 'id': '010000005000C500551E72870000000000000000', 'fdeCapable': True, + 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False, + 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, + 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False, + 'usableCapacity': '299463129088'}, + {'available': False, 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', + 'driveMediaType': 'hdd', 'id': '010000005000C500551E9DBB0000000000000000', 'fdeCapable': True, + 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False, + 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, + 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False, + 'usableCapacity': '299463129088'}, + {'available': False, 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', + 'driveMediaType': 'hdd', 'id': '010000005000C500551EAC230000000000000000', 'fdeCapable': True, + 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False, + 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, + 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False, + 'usableCapacity': '299463129088'}, + {'available': False, 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', + 'driveMediaType': 'hdd', 'id': '010000005000C500551EA0BB0000000000000000', 'fdeCapable': True, + 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False, + 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, + 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False, + 'usableCapacity': '299463129088'}, + {'available': False, 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', + 'driveMediaType': 'hdd', 'id': '010000005000C500551EAC4B0000000000000000', 'fdeCapable': True, + 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False, + 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, + 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False, + 'usableCapacity': '299463129088'}, + {'available': True, 'currentVolumeGroupRef': '0000000000000000000000000000000000000000', + 'driveMediaType': 'hdd', 'id': '010000005000C500551E7F2B0000000000000000', 'fdeCapable': True, + 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False, + 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, + 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False, + 'usableCapacity': '299463129088'}, + {'available': True, 'currentVolumeGroupRef': '0000000000000000000000000000000000000000', + 'driveMediaType': 'hdd', 'id': '010000005000C500551EC9270000000000000000', 'fdeCapable': True, + 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False, + 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, + 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False, + 'usableCapacity': '299463129088'}, + {'available': True, 'currentVolumeGroupRef': '0000000000000000000000000000000000000000', + 'driveMediaType': 'hdd', 'id': '010000005000C500551EC97F0000000000000000', 'fdeCapable': True, + 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False, + 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, + 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False, + 'usableCapacity': '299463129088'}, + {'available': True, 'currentVolumeGroupRef': '0000000000000000000000000000000000000000', + 'driveMediaType': 'hdd', 'id': '010000005000C500551ECBFF0000000000000000', 'fdeCapable': True, + 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False, + 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, + 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False, + 'usableCapacity': '299463129088'}, + {'available': True, 'currentVolumeGroupRef': '0000000000000000000000000000000000000000', + 'driveMediaType': 'hdd', 'id': '010000005000C500551E9ED30000000000000000', 'fdeCapable': True, + 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False, + 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, + 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False, + 'usableCapacity': '299463129088'}, + {'available': True, 'currentVolumeGroupRef': '0000000000000000000000000000000000000000', + 'driveMediaType': 'hdd', 'id': '010000005000C500551EA4CF0000000000000000', 'fdeCapable': True, + 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False, + 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, + 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False, + 'usableCapacity': '299463129088'}, + {'available': True, 'currentVolumeGroupRef': '0000000000000000000000000000000000000000', + 'driveMediaType': 'hdd', 'id': '010000005000C500551EA29F0000000000000000', 'fdeCapable': True, + 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False, + 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, + 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False, + 'usableCapacity': '299463129088'}, + {'available': True, 'currentVolumeGroupRef': '0000000000000000000000000000000000000000', + 'driveMediaType': 'hdd', 'id': '010000005000C500551ECDFB0000000000000000', 'fdeCapable': True, + 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False, + 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, + 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False, + 'usableCapacity': '299463129088'}, + {'available': True, 'currentVolumeGroupRef': '0000000000000000000000000000000000000000', + 'driveMediaType': 'hdd', 'id': '010000005000C500551E99230000000000000000', 'fdeCapable': True, + 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False, + 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, + 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False, + 'usableCapacity': '299463129088'}, + {'available': True, 'currentVolumeGroupRef': '0000000000000000000000000000000000000000', + 'driveMediaType': 'ssd', 'id': '010000005000C500551E9ED31000000000000000', 'fdeCapable': True, + 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False, + 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, + 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False, + 'usableCapacity': '299463129088'}, + {'available': True, 'currentVolumeGroupRef': '0000000000000000000000000000000000000000', + 'driveMediaType': 'ssd', 'id': '010000005000C500551EA4CF2000000000000000', 'fdeCapable': True, + 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False, + 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, + 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False, + 'usableCapacity': '299463129088'}, + {'available': True, 'currentVolumeGroupRef': '0000000000000000000000000000000000000000', + 'driveMediaType': 'ssd', 'id': '010000005000C500551EA29F3000000000000000', 'fdeCapable': True, + 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False, + 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, + 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False, + 'usableCapacity': '299463129088'}, + {'available': True, 'currentVolumeGroupRef': '0000000000000000000000000000000000000000', + 'driveMediaType': 'ssd', 'id': '010000005000C500551ECDFB4000000000000000', 'fdeCapable': True, + 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False, + 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, + 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False, + 'usableCapacity': '299463129088'}, + {'available': True, 'currentVolumeGroupRef': '0000000000000000000000000000000000000000', + 'driveMediaType': 'ssd', 'id': '010000005000C500551E99235000000000000000', 'fdeCapable': True, + 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False, + 'phyDriveType': 'sata', 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, + 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False, + 'usableCapacity': '299463129088'}] + + RAID6_CANDIDATE_DRIVES = {"volumeCandidate": [ + {"raidLevel": "raid6", "trayLossProtection": False, "rawSize": "898389368832", "usableSize": "898388459520", + "driveCount": 5, "freeExtentRef": "0000000000000000000000000000000000000000", "driveRefList": { + "driveRef": ["010000005000C500551E7F2B0000000000000000", "010000005000C500551EC9270000000000000000", + "010000005000C500551EC97F0000000000000000", "010000005000C500551ECBFF0000000000000000", + "010000005000C500551E9ED30000000000000000"]}, "candidateSelectionType": "count", + "spindleSpeedMatch": True, "spindleSpeed": 10000, "phyDriveType": "sas", "dssPreallocEnabled": False, + "securityType": "capable", "drawerLossProtection": False, "driveMediaType": "hdd", + "protectionInformationCapable": False, + "protectionInformationCapabilities": {"protectionInformationCapable": True, + "protectionType": "type2Protection"}, + "volumeCandidateData": {"type": "traditional", "diskPoolVolumeCandidateData": None}, + "driveBlockFormat": "allNative", "allocateReservedSpace": False, "securityLevel": "fde"}, + {"raidLevel": "raid6", "trayLossProtection": False, "rawSize": "1197852491776", "usableSize": "1197851279360", + "driveCount": 6, "freeExtentRef": "0000000000000000000000000000000000000000", "driveRefList": { + "driveRef": ["010000005000C500551E7F2B0000000000000000", "010000005000C500551EC9270000000000000000", + "010000005000C500551EC97F0000000000000000", "010000005000C500551ECBFF0000000000000000", + "010000005000C500551E9ED30000000000000000", "010000005000C500551EA4CF0000000000000000"]}, + "candidateSelectionType": "count", "spindleSpeedMatch": True, "spindleSpeed": 10000, "phyDriveType": "sas", + "dssPreallocEnabled": False, "securityType": "capable", "drawerLossProtection": False, "driveMediaType": "hdd", + "protectionInformationCapable": False, + "protectionInformationCapabilities": {"protectionInformationCapable": True, + "protectionType": "type2Protection"}, + "volumeCandidateData": {"type": "traditional", "diskPoolVolumeCandidateData": None}, + "driveBlockFormat": "allNative", "allocateReservedSpace": False, "securityLevel": "fde"}, + {"raidLevel": "raid6", "trayLossProtection": False, "rawSize": "1497315614720", "usableSize": "1497314099200", + "driveCount": 7, "freeExtentRef": "0000000000000000000000000000000000000000", "driveRefList": { + "driveRef": ["010000005000C500551E7F2B0000000000000000", "010000005000C500551EC9270000000000000000", + "010000005000C500551EC97F0000000000000000", "010000005000C500551ECBFF0000000000000000", + "010000005000C500551E9ED30000000000000000", "010000005000C500551EA4CF0000000000000000", + "010000005000C500551ED1FF0000000000000000"]}, "candidateSelectionType": "count", + "spindleSpeedMatch": True, "spindleSpeed": 10000, "phyDriveType": "sas", "dssPreallocEnabled": False, + "securityType": "capable", "drawerLossProtection": False, "driveMediaType": "hdd", + "protectionInformationCapable": False, + "protectionInformationCapabilities": {"protectionInformationCapable": True, + "protectionType": "type2Protection"}, + "volumeCandidateData": {"type": "traditional", "diskPoolVolumeCandidateData": None}, + "driveBlockFormat": "allNative", "allocateReservedSpace": False, "securityLevel": "fde"}, + {"raidLevel": "raid6", "trayLossProtection": False, "rawSize": "1796778737664", "usableSize": "1796776919040", + "driveCount": 8, "freeExtentRef": "0000000000000000000000000000000000000000", "driveRefList": { + "driveRef": ["010000005000C500551E7F2B0000000000000000", "010000005000C500551EC9270000000000000000", + "010000005000C500551EC97F0000000000000000", "010000005000C500551ECBFF0000000000000000", + "010000005000C500551E9ED30000000000000000", "010000005000C500551EA4CF0000000000000000", + "010000005000C500551ED1FF0000000000000000", "010000005000C500551EA29F0000000000000000"]}, + "candidateSelectionType": "count", "spindleSpeedMatch": True, "spindleSpeed": 10000, "phyDriveType": "sas", + "dssPreallocEnabled": False, "securityType": "capable", "drawerLossProtection": False, "driveMediaType": "hdd", + "protectionInformationCapable": False, + "protectionInformationCapabilities": {"protectionInformationCapable": True, + "protectionType": "type2Protection"}, + "volumeCandidateData": {"type": "traditional", "diskPoolVolumeCandidateData": None}, + "driveBlockFormat": "allNative", "allocateReservedSpace": False, "securityLevel": "fde"}, + {"raidLevel": "raid6", "trayLossProtection": False, "rawSize": "2096241860608", "usableSize": "2096239738880", + "driveCount": 9, "freeExtentRef": "0000000000000000000000000000000000000000", "driveRefList": { + "driveRef": ["010000005000C500551E7F2B0000000000000000", "010000005000C500551EC9270000000000000000", + "010000005000C500551EC97F0000000000000000", "010000005000C500551ECBFF0000000000000000", + "010000005000C500551E9ED30000000000000000", "010000005000C500551EA4CF0000000000000000", + "010000005000C500551ED1FF0000000000000000", "010000005000C500551EA29F0000000000000000", + "010000005000C500551ECDFB0000000000000000"]}, "candidateSelectionType": "count", + "spindleSpeedMatch": True, "spindleSpeed": 10000, "phyDriveType": "sas", "dssPreallocEnabled": False, + "securityType": "capable", "drawerLossProtection": False, "driveMediaType": "hdd", + "protectionInformationCapable": False, + "protectionInformationCapabilities": {"protectionInformationCapable": True, + "protectionType": "type2Protection"}, + "volumeCandidateData": {"type": "traditional", "diskPoolVolumeCandidateData": None}, + "driveBlockFormat": "allNative", "allocateReservedSpace": False, "securityLevel": "fde"}, + {"raidLevel": "raid6", "trayLossProtection": False, "rawSize": "2395704983552", "usableSize": "2395702558720", + "driveCount": 10, "freeExtentRef": "0000000000000000000000000000000000000000", "driveRefList": { + "driveRef": ["010000005000C500551E7F2B0000000000000000", "010000005000C500551EC9270000000000000000", + "010000005000C500551EC97F0000000000000000", "010000005000C500551ECBFF0000000000000000", + "010000005000C500551E9ED30000000000000000", "010000005000C500551EA4CF0000000000000000", + "010000005000C500551ED1FF0000000000000000", "010000005000C500551EA29F0000000000000000", + "010000005000C500551ECDFB0000000000000000", "010000005000C500551E99230000000000000000"]}, + "candidateSelectionType": "count", "spindleSpeedMatch": True, "spindleSpeed": 10000, "phyDriveType": "sas", + "dssPreallocEnabled": False, "securityType": "capable", "drawerLossProtection": False, "driveMediaType": "hdd", + "protectionInformationCapable": False, + "protectionInformationCapabilities": {"protectionInformationCapable": True, + "protectionType": "type2Protection"}, + "volumeCandidateData": {"type": "traditional", "diskPoolVolumeCandidateData": None}, + "driveBlockFormat": "allNative", "allocateReservedSpace": False, "securityLevel": "fde"}], "returnCode": "ok"} + + EXPANSION_DDP_DRIVES_LIST = ["010000005000C500551ED1FF0000000000000000", "010000005000C500551E7F2B0000000000000000", + "010000005000C500551EC9270000000000000000", "010000005000C500551EC97F0000000000000000", + "010000005000C500551ECBFF0000000000000000", "010000005000C500551E9ED30000000000000000", + "010000005000C500551EA4CF0000000000000000", "010000005000C500551EA29F0000000000000000", + "010000005000C500551ECDFB0000000000000000", "010000005000C500551E99230000000000000000", + "010000005000C500551E9ED31000000000000000", "010000005000C500551EA4CF2000000000000000", + "010000005000C500551EA29F3000000000000000", "010000005000C500551ECDFB4000000000000000", + "010000005000C500551E99235000000000000000"] + EXPANSION_DDP_DRIVE_DATA = {"returnCode": "ok", "candidates": [ + {"drives": ["010000005000C500551E7F2B0000000000000000"], "trayLossProtection": False, "wastedCapacity": "0", + "spindleSpeedMatch": True, "drawerLossProtection": False, "usableCapacity": "299463129088", + "driveBlockFormat": "allNative"}, + {"drives": ["010000005000C500551E7F2B0000000000000000", "010000005000C500551E99230000000000000000"], + "trayLossProtection": False, "wastedCapacity": "0", "spindleSpeedMatch": True, "drawerLossProtection": False, + "usableCapacity": "598926258176", "driveBlockFormat": "allNative"}, + {"drives": ["010000005000C500551E7F2B0000000000000000", "010000005000C500551E99230000000000000000", + "010000005000C500551E9ED30000000000000000"], "trayLossProtection": False, "wastedCapacity": "0", + "spindleSpeedMatch": True, "drawerLossProtection": False, "usableCapacity": "898389387264", + "driveBlockFormat": "allNative"}, + {"drives": ["010000005000C500551E7F2B0000000000000000", "010000005000C500551E99230000000000000000", + "010000005000C500551E9ED30000000000000000", "010000005000C500551EA29F0000000000000000"], + "trayLossProtection": False, "wastedCapacity": "0", "spindleSpeedMatch": True, "drawerLossProtection": False, + "usableCapacity": "1197852516352", "driveBlockFormat": "allNative"}, + {"drives": ["010000005000C500551E7F2B0000000000000000", "010000005000C500551E99230000000000000000", + "010000005000C500551E9ED30000000000000000", "010000005000C500551EA29F0000000000000000", + "010000005000C500551EA4CF0000000000000000"], "trayLossProtection": False, "wastedCapacity": "0", + "spindleSpeedMatch": True, "drawerLossProtection": False, "usableCapacity": "1497315645440", + "driveBlockFormat": "allNative"}, + {"drives": ["010000005000C500551E7F2B0000000000000000", "010000005000C500551E99230000000000000000", + "010000005000C500551E9ED30000000000000000", "010000005000C500551EA29F0000000000000000", + "010000005000C500551EA4CF0000000000000000", "010000005000C500551EC9270000000000000000"], + "trayLossProtection": False, "wastedCapacity": "0", "spindleSpeedMatch": True, "drawerLossProtection": False, + "usableCapacity": "1796778774528", "driveBlockFormat": "allNative"}, + {"drives": ["010000005000C500551E7F2B0000000000000000", "010000005000C500551E99230000000000000000", + "010000005000C500551E9ED30000000000000000", "010000005000C500551EA29F0000000000000000", + "010000005000C500551EA4CF0000000000000000", "010000005000C500551EC9270000000000000000", + "010000005000C500551EC97F0000000000000000"], "trayLossProtection": False, "wastedCapacity": "0", + "spindleSpeedMatch": True, "drawerLossProtection": False, "usableCapacity": "2096241903616", + "driveBlockFormat": "allNative"}, + {"drives": ["010000005000C500551E7F2B0000000000000000", "010000005000C500551E99230000000000000000", + "010000005000C500551E9ED30000000000000000", "010000005000C500551EA29F0000000000000000", + "010000005000C500551EA4CF0000000000000000", "010000005000C500551EC9270000000000000000", + "010000005000C500551EC97F0000000000000000", "010000005000C500551ECBFF0000000000000000"], + "trayLossProtection": False, "wastedCapacity": "0", "spindleSpeedMatch": True, "drawerLossProtection": False, + "usableCapacity": "2395705032704", "driveBlockFormat": "allNative"}, + {"drives": ["010000005000C500551E7F2B0000000000000000", "010000005000C500551E99230000000000000000", + "010000005000C500551E9ED30000000000000000", "010000005000C500551EA29F0000000000000000", + "010000005000C500551EA4CF0000000000000000", "010000005000C500551EC9270000000000000000", + "010000005000C500551EC97F0000000000000000", "010000005000C500551ECBFF0000000000000000", + "010000005000C500551ECDFB0000000000000000"], "trayLossProtection": False, "wastedCapacity": "0", + "spindleSpeedMatch": True, "drawerLossProtection": False, "usableCapacity": "2695168161792", + "driveBlockFormat": "allNative"}, + {"drives": ["010000005000C500551E7F2B0000000000000000", "010000005000C500551E99230000000000000000", + "010000005000C500551E9ED30000000000000000", "010000005000C500551EA29F0000000000000000", + "010000005000C500551EA4CF0000000000000000", "010000005000C500551EC9270000000000000000", + "010000005000C500551EC97F0000000000000000", "010000005000C500551ECBFF0000000000000000", + "010000005000C500551ECDFB0000000000000000", "010000005000C500551ED1FF0000000000000000"], + "trayLossProtection": False, "wastedCapacity": "0", "spindleSpeedMatch": True, "drawerLossProtection": False, + "usableCapacity": "2994631290880", "driveBlockFormat": "allNative"}]} + + REQUEST_FUNC = "ansible.modules.storage.netapp.netapp_e_storagepool.request" + NETAPP_REQUEST_FUNC = "ansible.module_utils.netapp.NetAppESeriesModule.request" + VALIDATE_FUNC = "ansible.modules.storage.netapp.netapp_e_storagepool.NetAppESeriesModule.validate_instance" + + DRIVES_PROPERTY = "ansible.modules.storage.netapp.netapp_e_storagepool.NetAppESeriesStoragePool.drives" + STORAGE_POOL_PROPERTY = "ansible.modules.storage.netapp.netapp_e_storagepool.NetAppESeriesStoragePool.storage_pool" + + def _set_args(self, args=None): + module_args = self.REQUIRED_PARAMS.copy() + if args is not None: + module_args.update(args) + set_module_args(module_args) + + def _initialize_dummy_instance(self, alt_args=None): + """Initialize a dummy instance of NetAppESeriesStoragePool for the purpose of testing individual methods.""" + args = {"state": "absent", "name": "storage_pool"} + if alt_args: + args.update(alt_args) + self._set_args(args) + return NetAppESeriesStoragePool() + + def test_drives_fail(self): + """Verify exception is thrown.""" + + with patch(self.NETAPP_REQUEST_FUNC) as netapp_request: + netapp_request.return_value = Exception() + storagepool = self._initialize_dummy_instance() + with self.assertRaisesRegexp(AnsibleFailJson, "Failed to fetch disk drives."): + drives = storagepool.drives + + def test_available_drives(self): + """Verify all drives returned are available""" + with patch(self.DRIVES_PROPERTY, new_callable=PropertyMock) as drives: + drives.return_value = self.DRIVES_DATA + + storagepool = self._initialize_dummy_instance() + self.assertEqual(storagepool.available_drives, + ['010000005000C500551ED1FF0000000000000000', '010000005000C500551E7F2B0000000000000000', + '010000005000C500551EC9270000000000000000', '010000005000C500551EC97F0000000000000000', + '010000005000C500551ECBFF0000000000000000', '010000005000C500551E9ED30000000000000000', + '010000005000C500551EA4CF0000000000000000', '010000005000C500551EA29F0000000000000000', + '010000005000C500551ECDFB0000000000000000', '010000005000C500551E99230000000000000000', + '010000005000C500551E9ED31000000000000000', '010000005000C500551EA4CF2000000000000000', + '010000005000C500551EA29F3000000000000000', '010000005000C500551ECDFB4000000000000000', + '010000005000C500551E99235000000000000000']) + + def test_available_drive_types(self): + """Verify all drive types are returned in most common first order.""" + with patch(self.DRIVES_PROPERTY, new_callable=PropertyMock) as drives: + drives.return_value = self.DRIVES_DATA + + storagepool = self._initialize_dummy_instance() + self.assertEqual(storagepool.available_drive_types[0], "hdd") + self.assertEqual(storagepool.available_drive_types[1], "ssd") + + def test_available_drive_interface_types(self): + """Verify all interface types are returned in most common first order.""" + with patch(self.DRIVES_PROPERTY, new_callable=PropertyMock) as drives: + drives.return_value = self.DRIVES_DATA + + storagepool = self._initialize_dummy_instance() + self.assertEqual(storagepool.available_drive_interface_types[0], "sas") + self.assertEqual(storagepool.available_drive_interface_types[1], "sata") + + def test_storage_pool_drives(self): + """Verify storage pool drive collection.""" + with patch(self.DRIVES_PROPERTY, new_callable=PropertyMock) as drives: + drives.return_value = self.DRIVES_DATA + + storagepool = self._initialize_dummy_instance( + {"state": "present", "name": "pool", "criteria_drive_count": "12", "raid_level": "raidDiskPool"}) + storagepool.pool_detail = self.STORAGE_POOL_DATA[0] + self.assertEqual(storagepool.storage_pool_drives, [ + {'available': False, 'pfa': False, 'driveMediaType': 'hdd', 'uncertified': False, + 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, 'fdeCapable': True, + 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', 'invalidDriveData': False, + 'nonRedundantAccess': False, 'hotSpare': False, 'status': 'optimal', 'rawCapacity': '300000000000', + 'usableCapacity': '299463129088', 'phyDriveType': 'sas', 'removed': False, + 'id': '010000005000C500551EB1930000000000000000'}, + {'available': False, 'pfa': False, 'driveMediaType': 'hdd', 'uncertified': False, + 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, 'fdeCapable': True, + 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', 'invalidDriveData': False, + 'nonRedundantAccess': False, 'hotSpare': False, 'status': 'optimal', 'rawCapacity': '300000000000', + 'usableCapacity': '299463129088', 'phyDriveType': 'sas', 'removed': False, + 'id': '010000005000C500551EAAE30000000000000000'}, + {'available': False, 'pfa': False, 'driveMediaType': 'hdd', 'uncertified': False, + 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, 'fdeCapable': True, + 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', 'invalidDriveData': False, + 'nonRedundantAccess': False, 'hotSpare': False, 'status': 'optimal', 'rawCapacity': '300000000000', + 'usableCapacity': '299463129088', 'phyDriveType': 'sas', 'removed': False, + 'id': '010000005000C500551ECB1F0000000000000000'}, + {'available': False, 'pfa': False, 'driveMediaType': 'hdd', 'uncertified': False, + 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, 'fdeCapable': True, + 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', 'invalidDriveData': False, + 'nonRedundantAccess': False, 'hotSpare': False, 'status': 'optimal', 'rawCapacity': '300000000000', + 'usableCapacity': '299463129088', 'phyDriveType': 'sas', 'removed': False, + 'id': '010000005000C500551EB2930000000000000000'}, + {'available': False, 'pfa': False, 'driveMediaType': 'hdd', 'uncertified': False, + 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, 'fdeCapable': True, + 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', 'invalidDriveData': False, + 'nonRedundantAccess': False, 'hotSpare': False, 'status': 'optimal', 'rawCapacity': '300000000000', + 'usableCapacity': '299463129088', 'phyDriveType': 'sas', 'removed': False, + 'id': '010000005000C500551ECB0B0000000000000000'}, + {'available': False, 'pfa': False, 'driveMediaType': 'hdd', 'uncertified': False, + 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, 'fdeCapable': True, + 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', 'invalidDriveData': False, + 'nonRedundantAccess': False, 'hotSpare': False, 'status': 'optimal', 'rawCapacity': '300000000000', + 'usableCapacity': '299463129088', 'phyDriveType': 'sas', 'removed': False, + 'id': '010000005000C500551EC6C70000000000000000'}, + {'available': False, 'pfa': False, 'driveMediaType': 'hdd', 'uncertified': False, + 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, 'fdeCapable': True, + 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', 'invalidDriveData': False, + 'nonRedundantAccess': False, 'hotSpare': False, 'status': 'optimal', 'rawCapacity': '300000000000', + 'usableCapacity': '299463129088', 'phyDriveType': 'sas', 'removed': False, + 'id': '010000005000C500551E9BA70000000000000000'}, + {'available': False, 'pfa': False, 'driveMediaType': 'hdd', 'uncertified': False, + 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, 'fdeCapable': True, + 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', 'invalidDriveData': False, + 'nonRedundantAccess': False, 'hotSpare': False, 'status': 'optimal', 'rawCapacity': '300000000000', + 'usableCapacity': '299463129088', 'phyDriveType': 'sas', 'removed': False, + 'id': '010000005000C500551ED7CF0000000000000000'}, + {'available': False, 'pfa': False, 'driveMediaType': 'hdd', 'uncertified': False, + 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, 'fdeCapable': True, + 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', 'invalidDriveData': False, + 'nonRedundantAccess': False, 'hotSpare': False, 'status': 'optimal', 'rawCapacity': '300000000000', + 'usableCapacity': '299463129088', 'phyDriveType': 'sas', 'removed': False, + 'id': '010000005000C500551ECB0F0000000000000000'}, + {'available': False, 'pfa': False, 'driveMediaType': 'hdd', 'uncertified': False, + 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, 'fdeCapable': True, + 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', 'invalidDriveData': False, + 'nonRedundantAccess': False, 'hotSpare': False, 'status': 'optimal', 'rawCapacity': '300000000000', + 'usableCapacity': '299463129088', 'phyDriveType': 'sas', 'removed': False, + 'id': '010000005000C500551E72870000000000000000'}, + {'available': False, 'pfa': False, 'driveMediaType': 'hdd', 'uncertified': False, + 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, 'fdeCapable': True, + 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', 'invalidDriveData': False, + 'nonRedundantAccess': False, 'hotSpare': False, 'status': 'optimal', 'rawCapacity': '300000000000', + 'usableCapacity': '299463129088', 'phyDriveType': 'sas', 'removed': False, + 'id': '010000005000C500551E9DBB0000000000000000'}, + {'available': False, 'pfa': False, 'driveMediaType': 'hdd', 'uncertified': False, + 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, 'fdeCapable': True, + 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', 'invalidDriveData': False, + 'nonRedundantAccess': False, 'hotSpare': False, 'status': 'optimal', 'rawCapacity': '300000000000', + 'usableCapacity': '299463129088', 'phyDriveType': 'sas', 'removed': False, + 'id': '010000005000C500551EAC230000000000000000'}, + {'available': False, 'pfa': False, 'driveMediaType': 'hdd', 'uncertified': False, + 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, 'fdeCapable': True, + 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', 'invalidDriveData': False, + 'nonRedundantAccess': False, 'hotSpare': False, 'status': 'optimal', 'rawCapacity': '300000000000', + 'usableCapacity': '299463129088', 'phyDriveType': 'sas', 'removed': False, + 'id': '010000005000C500551EA0BB0000000000000000'}, + {'available': False, 'pfa': False, 'driveMediaType': 'hdd', 'uncertified': False, + 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, 'fdeCapable': True, + 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', 'invalidDriveData': False, + 'nonRedundantAccess': False, 'hotSpare': False, 'status': 'optimal', 'rawCapacity': '300000000000', + 'usableCapacity': '299463129088', 'phyDriveType': 'sas', 'removed': False, + 'id': '010000005000C500551EAC4B0000000000000000'}]) + + def test_get_ddp_capacity(self): + """Evaluate returned capacity from get_ddp_capacity method.""" + with patch(self.DRIVES_PROPERTY, new_callable=PropertyMock) as drives: + drives.return_value = self.DRIVES_DATA + + storagepool = self._initialize_dummy_instance( + {"state": "present", "name": "pool", "criteria_drive_count": "12", "raid_level": "raidDiskPool"}) + storagepool.pool_detail = self.STORAGE_POOL_DATA[0] + self.assertAlmostEqual(storagepool.get_ddp_capacity(self.EXPANSION_DDP_DRIVES_LIST), 6038680353645, + places=-2) # Allows for python version/architecture computational differences + + def test_get_candidate_drives(self): + """Verify correct candidate list is returned.""" + with patch(self.NETAPP_REQUEST_FUNC) as netapp_request: + netapp_request.return_value = (200, self.RAID6_CANDIDATE_DRIVES) + with patch(self.DRIVES_PROPERTY, new_callable=PropertyMock) as drives: + drives.return_value = self.DRIVES_DATA + + storagepool = self._initialize_dummy_instance( + {"state": "present", "name": "raid6_vg", "criteria_drive_count": "6", "raid_level": "raid6"}) + self.assertEqual(storagepool.get_candidate_drives(), + {'candidateSelectionType': 'count', 'driveMediaType': 'hdd', + 'protectionInformationCapabilities': {'protectionInformationCapable': True, + 'protectionType': 'type2Protection'}, + 'dssPreallocEnabled': False, 'phyDriveType': 'sas', 'allocateReservedSpace': False, + 'trayLossProtection': False, 'raidLevel': 'raid6', 'spindleSpeed': 10000, + 'securityType': 'capable', 'securityLevel': 'fde', 'spindleSpeedMatch': True, + 'driveBlockFormat': 'allNative', 'protectionInformationCapable': False, + 'freeExtentRef': '0000000000000000000000000000000000000000', 'driveCount': 6, + 'driveRefList': {'driveRef': ['010000005000C500551E7F2B0000000000000000', + '010000005000C500551EC9270000000000000000', + '010000005000C500551EC97F0000000000000000', + '010000005000C500551ECBFF0000000000000000', + '010000005000C500551E9ED30000000000000000', + '010000005000C500551EA4CF0000000000000000']}, + 'rawSize': '1197852491776', 'usableSize': '1197851279360', + 'drawerLossProtection': False, + 'volumeCandidateData': {'type': 'traditional', 'diskPoolVolumeCandidateData': None}}) + + def test_get_expansion_candidate_drives(self): + """Verify correct drive list is returned""" + with patch(self.NETAPP_REQUEST_FUNC) as netapp_request: + netapp_request.return_value = (200, self.EXPANSION_DDP_DRIVE_DATA) + with patch(self.DRIVES_PROPERTY, new_callable=PropertyMock) as drives: + drives.return_value = self.DRIVES_DATA + + storagepool = self._initialize_dummy_instance( + {"state": "present", "name": "pool", "criteria_drive_count": "20", "raid_level": "raidDiskPool"}) + storagepool.pool_detail = self.STORAGE_POOL_DATA[0] + self.assertEqual(storagepool.get_expansion_candidate_drives(), [ + {'drawerLossProtection': False, 'trayLossProtection': False, + 'drives': ['010000005000C500551E7F2B0000000000000000', '010000005000C500551E99230000000000000000', + '010000005000C500551E9ED30000000000000000', '010000005000C500551EA29F0000000000000000', + '010000005000C500551EA4CF0000000000000000', '010000005000C500551EC9270000000000000000'], + 'spindleSpeedMatch': True, 'driveBlockFormat': 'allNative', 'usableCapacity': '1796778774528', + 'wastedCapacity': '0'}]) + + def test_get_maximum_reserve_drive_count(self): + """Ensure maximum reserve drive count is accurately calculated.""" + with patch(self.NETAPP_REQUEST_FUNC) as netapp_request: + netapp_request.return_value = (200, self.EXPANSION_DDP_DRIVE_DATA) + with patch(self.DRIVES_PROPERTY, new_callable=PropertyMock) as drives: + drives.return_value = self.DRIVES_DATA + + storagepool = self._initialize_dummy_instance( + {"state": "present", "name": "pool", "criteria_drive_count": "20", "raid_level": "raidDiskPool"}) + storagepool.pool_detail = self.STORAGE_POOL_DATA[0] + self.assertEqual(storagepool.get_maximum_reserve_drive_count(), 5) + + def test_apply_check_mode_unchange(self): + """Verify that the changes are appropriately determined.""" + # Absent storage pool required to be absent + with self.assertRaisesRegexp(AnsibleExitJson, "'changed': False"): + with patch(self.DRIVES_PROPERTY, new_callable=PropertyMock) as drives: + drives.return_value = self.DRIVES_DATA + with patch(self.STORAGE_POOL_PROPERTY, new_callable=PropertyMock) as storage_pool: + storage_pool.return_value = {} + storagepool = self._initialize_dummy_instance( + {"state": "absent", "name": "not-a-pool", "erase_secured_drives": False, + "criteria_drive_count": "14", "raid_level": "raidDiskPool"}) + storagepool.module.check_mode = True + storagepool.is_drive_count_valid = lambda x: True + storagepool.apply() + + # Present storage pool with no changes + with self.assertRaisesRegexp(AnsibleExitJson, "'changed': False"): + with patch(self.DRIVES_PROPERTY, new_callable=PropertyMock) as drives: + drives.return_value = self.DRIVES_DATA + with patch(self.STORAGE_POOL_PROPERTY, new_callable=PropertyMock) as storage_pool: + storage_pool.return_value = self.STORAGE_POOL_DATA[0] + storagepool = self._initialize_dummy_instance( + {"state": "present", "name": "pool", "erase_secured_drives": False, + "criteria_drive_count": "14", "raid_level": "raidDiskPool"}) + storagepool.module.check_mode = True + storagepool.is_drive_count_valid = lambda x: True + storagepool.apply() + + def test_apply_check_mode_change(self): + """Verify that the changes are appropriately determined.""" + # Remove absent storage pool + with self.assertRaisesRegexp(AnsibleExitJson, "'changed': True"): + with patch(self.DRIVES_PROPERTY, new_callable=PropertyMock) as drives: + drives.return_value = self.DRIVES_DATA + with patch(self.STORAGE_POOL_PROPERTY, new_callable=PropertyMock) as storage_pool: + storage_pool.return_value = self.STORAGE_POOL_DATA[0] + storagepool = self._initialize_dummy_instance( + {"state": "absent", "name": "pool", "erase_secured_drives": False, "criteria_drive_count": "14", + "raid_level": "raidDiskPool"}) + storagepool.module.check_mode = True + storagepool.is_drive_count_valid = lambda x: True + storagepool.apply() + + # Expand present storage pool + with self.assertRaisesRegexp(AnsibleExitJson, "'changed': True"): + with patch(self.DRIVES_PROPERTY, new_callable=PropertyMock) as drives: + drives.return_value = self.DRIVES_DATA + with patch(self.STORAGE_POOL_PROPERTY, new_callable=PropertyMock) as storage_pool: + storage_pool.return_value = self.STORAGE_POOL_DATA[0] + storagepool = self._initialize_dummy_instance( + {"state": "present", "name": "pool", "erase_secured_drives": False, + "criteria_drive_count": "15", "raid_level": "raidDiskPool"}) + storagepool.module.check_mode = True + storagepool.is_drive_count_valid = lambda x: True + storagepool.expand_storage_pool = lambda check_mode: (True, 100) + storagepool.migrate_raid_level = lambda check_mode: False + storagepool.secure_storage_pool = lambda check_mode: False + storagepool.set_reserve_drive_count = lambda check_mode: False + storagepool.apply() + + # Migrate present storage pool raid level + with self.assertRaisesRegexp(AnsibleExitJson, "'changed': True"): + with patch(self.DRIVES_PROPERTY, new_callable=PropertyMock) as drives: + drives.return_value = self.DRIVES_DATA + with patch(self.STORAGE_POOL_PROPERTY, new_callable=PropertyMock) as storage_pool: + storage_pool.return_value = self.STORAGE_POOL_DATA[0] + storagepool = self._initialize_dummy_instance( + {"state": "present", "name": "pool", "erase_secured_drives": False, + "criteria_drive_count": "15", "raid_level": "raidDiskPool"}) + storagepool.module.check_mode = True + storagepool.is_drive_count_valid = lambda x: True + storagepool.expand_storage_pool = lambda check_mode: (False, 0) + storagepool.migrate_raid_level = lambda check_mode: True + storagepool.secure_storage_pool = lambda check_mode: False + storagepool.set_reserve_drive_count = lambda check_mode: False + storagepool.apply() + + # Secure present storage pool + with self.assertRaisesRegexp(AnsibleExitJson, "'changed': True"): + with patch(self.DRIVES_PROPERTY, new_callable=PropertyMock) as drives: + drives.return_value = self.DRIVES_DATA + with patch(self.STORAGE_POOL_PROPERTY, new_callable=PropertyMock) as storage_pool: + storage_pool.return_value = self.STORAGE_POOL_DATA[0] + storagepool = self._initialize_dummy_instance( + {"state": "present", "name": "pool", "erase_secured_drives": False, + "criteria_drive_count": "15", "raid_level": "raidDiskPool"}) + storagepool.module.check_mode = True + storagepool.is_drive_count_valid = lambda x: True + storagepool.expand_storage_pool = lambda check_mode: (False, 0) + storagepool.migrate_raid_level = lambda check_mode: False + storagepool.secure_storage_pool = lambda check_mode: True + storagepool.set_reserve_drive_count = lambda check_mode: False + storagepool.apply() + + # Change present storage pool reserve drive count + with self.assertRaisesRegexp(AnsibleExitJson, "'changed': True"): + with patch(self.DRIVES_PROPERTY, new_callable=PropertyMock) as drives: + drives.return_value = self.DRIVES_DATA + with patch(self.STORAGE_POOL_PROPERTY, new_callable=PropertyMock) as storage_pool: + storage_pool.return_value = self.STORAGE_POOL_DATA[0] + storagepool = self._initialize_dummy_instance( + {"state": "present", "name": "pool", "erase_secured_drives": False, + "criteria_drive_count": "15", "raid_level": "raidDiskPool"}) + storagepool.module.check_mode = True + storagepool.is_drive_count_valid = lambda x: True + storagepool.expand_storage_pool = lambda check_mode: (False, 0) + storagepool.migrate_raid_level = lambda check_mode: False + storagepool.secure_storage_pool = lambda check_mode: False + storagepool.set_reserve_drive_count = lambda check_mode: True + storagepool.apply()