pull/60257/head
Chris Archibald 6 years ago committed by ansibot
parent 2c90ddd44d
commit 2cae0739e9

@ -31,6 +31,13 @@ options:
required: true required: true
description: description:
- It specifies the node to assign all visible unowned disks. - It specifies the node to assign all visible unowned disks.
disk_count:
required: false
type: int
description:
- Total number of disks a node should own
version_added: '2.9'
''' '''
EXAMPLES = """ EXAMPLES = """
@ -40,6 +47,14 @@ EXAMPLES = """
hostname: "{{ hostname }}" hostname: "{{ hostname }}"
username: "{{ admin username }}" username: "{{ admin username }}"
password: "{{ admin password }}" password: "{{ admin password }}"
- name: Assign specified total disks
na_ontap_disks:
node: cluster-01
disk_count: 56
hostname: "{{ hostname }}"
username: "{{ admin username }}"
password: "{{ admin password }}"
""" """
RETURN = """ RETURN = """
@ -63,6 +78,7 @@ class NetAppOntapDisks(object):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec() self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict( self.argument_spec.update(dict(
node=dict(required=True, type='str'), node=dict(required=True, type='str'),
disk_count=dict(required=False, type='int')
)) ))
self.module = AnsibleModule( self.module = AnsibleModule(
@ -78,9 +94,9 @@ class NetAppOntapDisks(object):
else: else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
def disk_check(self): def get_unassigned_disk_count(self):
""" """
Check for disks Check for free disks
""" """
disk_iter = netapp_utils.zapi.NaElement('storage-disk-get-iter') disk_iter = netapp_utils.zapi.NaElement('storage-disk-get-iter')
disk_storage_info = netapp_utils.zapi.NaElement('storage-disk-info') disk_storage_info = netapp_utils.zapi.NaElement('storage-disk-info')
@ -92,17 +108,47 @@ class NetAppOntapDisks(object):
disk_query.add_child_elem(disk_storage_info) disk_query.add_child_elem(disk_storage_info)
disk_iter.add_child_elem(disk_query) disk_iter.add_child_elem(disk_query)
try:
result = self.server.invoke_successfully(disk_iter, True) result = self.server.invoke_successfully(disk_iter, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error getting disk information: %s'
% (to_native(error)),
exception=traceback.format_exc())
return int(result.get_child_content('num-records'))
def get_owned_disk_count(self):
"""
Check for owned disks
"""
disk_iter = netapp_utils.zapi.NaElement('storage-disk-get-iter')
disk_storage_info = netapp_utils.zapi.NaElement('storage-disk-info')
disk_ownership_info = netapp_utils.zapi.NaElement('disk-ownership-info')
disk_ownership_info.add_new_child('home-node-name', self.parameters['node'])
disk_storage_info.add_child_elem(disk_ownership_info)
disk_query = netapp_utils.zapi.NaElement('query')
disk_query.add_child_elem(disk_storage_info)
if result.get_child_by_name('num-records') and \ disk_iter.add_child_elem(disk_query)
int(result.get_child_content('num-records')) >= 1:
has_disks = "true"
return has_disks
def disk_assign(self): try:
result = self.server.invoke_successfully(disk_iter, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error getting disk information: %s'
% (to_native(error)),
exception=traceback.format_exc())
return int(result.get_child_content('num-records'))
def disk_assign(self, needed_disks):
""" """
enable aggregate (online). Set node as disk owner.
""" """
if needed_disks > 0:
assign_disk = netapp_utils.zapi.NaElement.create_node_with_children(
'disk-sanown-assign', **{'owner': self.parameters['node'],
'disk-count': str(needed_disks)})
else:
assign_disk = netapp_utils.zapi.NaElement.create_node_with_children( assign_disk = netapp_utils.zapi.NaElement.create_node_with_children(
'disk-sanown-assign', **{'node-name': self.parameters['node'], 'disk-sanown-assign', **{'node-name': self.parameters['node'],
'all': 'true'}) 'all': 'true'})
@ -128,9 +174,23 @@ class NetAppOntapDisks(object):
netapp_utils.ems_log_event("na_ontap_disks", cserver) netapp_utils.ems_log_event("na_ontap_disks", cserver)
# check if anything needs to be changed (add/delete/update) # check if anything needs to be changed (add/delete/update)
unowned_disks = self.disk_check() unowned_disks = self.get_unassigned_disk_count()
if unowned_disks == 'true': owned_disks = self.get_owned_disk_count()
self.disk_assign() if 'disk_count' in self.parameters:
if self.parameters['disk_count'] < owned_disks:
self.module.fail_json(msg="Fewer disks than are currently owned was requested. "
"This module does not do any disk removing. "
"All disk removing will need to be done manually.")
if self.parameters['disk_count'] > owned_disks + unowned_disks:
self.module.fail_json(msg="Not enough unowned disks remain to fulfill request")
if unowned_disks >= 1:
if 'disk_count' in self.parameters:
if self.parameters['disk_count'] > owned_disks:
needed_disks = self.parameters['disk_count'] - owned_disks
self.disk_assign(needed_disks)
changed = True
else:
self.disk_assign(0)
changed = True changed = True
self.module.exit_json(changed=changed) self.module.exit_json(changed=changed)

Loading…
Cancel
Save