|
|
|
|
@ -31,13 +31,28 @@ options:
|
|
|
|
|
required: true
|
|
|
|
|
description:
|
|
|
|
|
- It specifies the node to assign all visible unowned disks.
|
|
|
|
|
|
|
|
|
|
disk_count:
|
|
|
|
|
required: false
|
|
|
|
|
type: int
|
|
|
|
|
description:
|
|
|
|
|
- Total number of disks a node should own
|
|
|
|
|
version_added: '2.9'
|
|
|
|
|
'''
|
|
|
|
|
|
|
|
|
|
EXAMPLES = """
|
|
|
|
|
- name: Assign unowned disks
|
|
|
|
|
na_ontap_disks:
|
|
|
|
|
node: cluster-01
|
|
|
|
|
hostname: "{{ hostname }} "
|
|
|
|
|
hostname: "{{ hostname }}"
|
|
|
|
|
username: "{{ admin username }}"
|
|
|
|
|
password: "{{ admin password }}"
|
|
|
|
|
|
|
|
|
|
- name: Assign specified total disks
|
|
|
|
|
na_ontap_disks:
|
|
|
|
|
node: cluster-01
|
|
|
|
|
disk_count: 56
|
|
|
|
|
hostname: "{{ hostname }}"
|
|
|
|
|
username: "{{ admin username }}"
|
|
|
|
|
password: "{{ admin password }}"
|
|
|
|
|
"""
|
|
|
|
|
@ -63,6 +78,7 @@ class NetAppOntapDisks(object):
|
|
|
|
|
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
|
|
|
|
|
self.argument_spec.update(dict(
|
|
|
|
|
node=dict(required=True, type='str'),
|
|
|
|
|
disk_count=dict(required=False, type='int')
|
|
|
|
|
))
|
|
|
|
|
|
|
|
|
|
self.module = AnsibleModule(
|
|
|
|
|
@ -78,9 +94,9 @@ class NetAppOntapDisks(object):
|
|
|
|
|
else:
|
|
|
|
|
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
|
|
|
|
|
|
|
|
|
|
def disk_check(self):
|
|
|
|
|
def get_unassigned_disk_count(self):
|
|
|
|
|
"""
|
|
|
|
|
Check for disks
|
|
|
|
|
Check for free disks
|
|
|
|
|
"""
|
|
|
|
|
disk_iter = netapp_utils.zapi.NaElement('storage-disk-get-iter')
|
|
|
|
|
disk_storage_info = netapp_utils.zapi.NaElement('storage-disk-info')
|
|
|
|
|
@ -92,20 +108,50 @@ class NetAppOntapDisks(object):
|
|
|
|
|
disk_query.add_child_elem(disk_storage_info)
|
|
|
|
|
|
|
|
|
|
disk_iter.add_child_elem(disk_query)
|
|
|
|
|
result = self.server.invoke_successfully(disk_iter, True)
|
|
|
|
|
|
|
|
|
|
if result.get_child_by_name('num-records') and \
|
|
|
|
|
int(result.get_child_content('num-records')) >= 1:
|
|
|
|
|
has_disks = "true"
|
|
|
|
|
return has_disks
|
|
|
|
|
try:
|
|
|
|
|
result = self.server.invoke_successfully(disk_iter, True)
|
|
|
|
|
except netapp_utils.zapi.NaApiError as error:
|
|
|
|
|
self.module.fail_json(msg='Error getting disk information: %s'
|
|
|
|
|
% (to_native(error)),
|
|
|
|
|
exception=traceback.format_exc())
|
|
|
|
|
return int(result.get_child_content('num-records'))
|
|
|
|
|
|
|
|
|
|
def get_owned_disk_count(self):
|
|
|
|
|
"""
|
|
|
|
|
Check for owned disks
|
|
|
|
|
"""
|
|
|
|
|
disk_iter = netapp_utils.zapi.NaElement('storage-disk-get-iter')
|
|
|
|
|
disk_storage_info = netapp_utils.zapi.NaElement('storage-disk-info')
|
|
|
|
|
disk_ownership_info = netapp_utils.zapi.NaElement('disk-ownership-info')
|
|
|
|
|
disk_ownership_info.add_new_child('home-node-name', self.parameters['node'])
|
|
|
|
|
disk_storage_info.add_child_elem(disk_ownership_info)
|
|
|
|
|
|
|
|
|
|
disk_query = netapp_utils.zapi.NaElement('query')
|
|
|
|
|
disk_query.add_child_elem(disk_storage_info)
|
|
|
|
|
|
|
|
|
|
disk_iter.add_child_elem(disk_query)
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
result = self.server.invoke_successfully(disk_iter, True)
|
|
|
|
|
except netapp_utils.zapi.NaApiError as error:
|
|
|
|
|
self.module.fail_json(msg='Error getting disk information: %s'
|
|
|
|
|
% (to_native(error)),
|
|
|
|
|
exception=traceback.format_exc())
|
|
|
|
|
return int(result.get_child_content('num-records'))
|
|
|
|
|
|
|
|
|
|
def disk_assign(self):
|
|
|
|
|
def disk_assign(self, needed_disks):
|
|
|
|
|
"""
|
|
|
|
|
enable aggregate (online).
|
|
|
|
|
Set node as disk owner.
|
|
|
|
|
"""
|
|
|
|
|
assign_disk = netapp_utils.zapi.NaElement.create_node_with_children(
|
|
|
|
|
'disk-sanown-assign', **{'node-name': self.parameters['node'],
|
|
|
|
|
'all': 'true'})
|
|
|
|
|
if needed_disks > 0:
|
|
|
|
|
assign_disk = netapp_utils.zapi.NaElement.create_node_with_children(
|
|
|
|
|
'disk-sanown-assign', **{'owner': self.parameters['node'],
|
|
|
|
|
'disk-count': str(needed_disks)})
|
|
|
|
|
else:
|
|
|
|
|
assign_disk = netapp_utils.zapi.NaElement.create_node_with_children(
|
|
|
|
|
'disk-sanown-assign', **{'node-name': self.parameters['node'],
|
|
|
|
|
'all': 'true'})
|
|
|
|
|
try:
|
|
|
|
|
self.server.invoke_successfully(assign_disk,
|
|
|
|
|
enable_tunneling=True)
|
|
|
|
|
@ -128,10 +174,24 @@ class NetAppOntapDisks(object):
|
|
|
|
|
netapp_utils.ems_log_event("na_ontap_disks", cserver)
|
|
|
|
|
|
|
|
|
|
# check if anything needs to be changed (add/delete/update)
|
|
|
|
|
unowned_disks = self.disk_check()
|
|
|
|
|
if unowned_disks == 'true':
|
|
|
|
|
self.disk_assign()
|
|
|
|
|
changed = True
|
|
|
|
|
unowned_disks = self.get_unassigned_disk_count()
|
|
|
|
|
owned_disks = self.get_owned_disk_count()
|
|
|
|
|
if 'disk_count' in self.parameters:
|
|
|
|
|
if self.parameters['disk_count'] < owned_disks:
|
|
|
|
|
self.module.fail_json(msg="Fewer disks than are currently owned was requested. "
|
|
|
|
|
"This module does not do any disk removing. "
|
|
|
|
|
"All disk removing will need to be done manually.")
|
|
|
|
|
if self.parameters['disk_count'] > owned_disks + unowned_disks:
|
|
|
|
|
self.module.fail_json(msg="Not enough unowned disks remain to fulfill request")
|
|
|
|
|
if unowned_disks >= 1:
|
|
|
|
|
if 'disk_count' in self.parameters:
|
|
|
|
|
if self.parameters['disk_count'] > owned_disks:
|
|
|
|
|
needed_disks = self.parameters['disk_count'] - owned_disks
|
|
|
|
|
self.disk_assign(needed_disks)
|
|
|
|
|
changed = True
|
|
|
|
|
else:
|
|
|
|
|
self.disk_assign(0)
|
|
|
|
|
changed = True
|
|
|
|
|
self.module.exit_json(changed=changed)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|