diff --git a/playbooks/modules/storagepool.yml b/playbooks/modules/storagepool.yml index 7e7860a..7d407b6 100644 --- a/playbooks/modules/storagepool.yml +++ b/playbooks/modules/storagepool.yml @@ -20,7 +20,29 @@ validate_certs: "{{ validate_certs }}" storage_pool_name: "{{ pool_name }}" protection_domain_name: "{{ protection_domain_name }}" - media_type: "HDD" + cap_alert_thresholds: + high_threshold: 30 + critical_threshold: 50 + media_type: "TRANSITIONAL" + enable_zero_padding: true + rep_cap_max_ratio: 40 + rmcache_write_handling_mode: "Passthrough" + spare_percentage: 80 + enable_rebalance: false + enable_fragmentation: false + enable_rebuild: false + use_rmcache: true + use_rfcache: true + parallel_rebuild_rebalance_limit: 3 + protected_maintenance_mode_io_priority_policy: + policy: "unlimited" + rebalance_io_priority_policy: + policy: "unlimited" + vtree_migration_io_priority_policy: + policy: "limitNumOfConcurrentIos" + concurrent_ios_per_device: 10 + persistent_checksum: + enable: false state: "present" - name: Set pool id @@ -47,15 +69,40 @@ storage_pool_new_name: "{{ pool_name }}" state: "present" - - name: Modify a Storage pool by name dellemc.powerflex.storagepool: hostname: "{{ hostname }}" username: "{{ username }}" password: "{{ password }}" validate_certs: "{{ validate_certs }}" - storage_pool_name: "ansible_test_pool" + storage_pool_name: "{{ pool_name }}" protection_domain_name: "{{ protection_domain_name }}" + storage_pool_new_name: "pool_name_new" + cap_alert_thresholds: + high_threshold: 50 + critical_threshold: 70 + enable_zero_padding: false + rep_cap_max_ratio: 60 + rmcache_write_handling_mode: "Passthrough" + spare_percentage: 90 + enable_rebalance: true + enable_fragmentation: true + enable_rebuild: true use_rmcache: true use_rfcache: true + parallel_rebuild_rebalance_limit: 6 + protected_maintenance_mode_io_priority_policy: + policy: "limitNumOfConcurrentIos" + concurrent_ios_per_device: 4 + rebalance_io_priority_policy: + policy: "favorAppIos" + concurrent_ios_per_device: 10 + bw_limit_per_device: 4096 + vtree_migration_io_priority_policy: + policy: "limitNumOfConcurrentIos" + concurrent_ios_per_device: 10 + persistent_checksum: + enable: true + validate_on_read: true + builder_limit: 1024 state: "present" diff --git a/plugins/modules/storagepool.py b/plugins/modules/storagepool.py index 9c8bb1d..5aca87b 100644 --- a/plugins/modules/storagepool.py +++ b/plugins/modules/storagepool.py @@ -1,6 +1,6 @@ #!/usr/bin/python -# Copyright: (c) 2021, Dell Technologies +# Copyright: (c) 2021-24, Dell Technologies # Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) """Ansible module for managing Dell Technologies (Dell) PowerFlex storage pool""" @@ -28,6 +28,7 @@ author: - Arindam Datta (@dattaarindam) - P Srinivas Rao (@srinivas-rao5) +- Trisha Datta (@trisha-dell) options: storage_pool_name: @@ -76,6 +77,160 @@ description: - Enable/Disable RMcache on a specific storage pool. type: bool + enable_zero_padding: + description: + - Enable/Disable zero padding on a specific storage pool. + type: bool + rep_cap_max_ratio: + description: + - Set replication journal capacity of a storage pool. + type: int + enable_rebalance: + description: + - Enable/Disable rebalance on a specific storage pool. + type: bool + spare_percentage: + description: + - Set the spare percentage of a specific storage pool. + type: int + rmcache_write_handling_mode : + description: + - Set RM cache write handling mode of a storage pool. + - I(Passthrough) Writes skip the cache and are stored in storage only. + - I(Cached) Writes are stored in both cache and storage (the default). + - Caching is only performed for IOs whose size is a multiple of 4k bytes. + type: str + choices: ['Cached', 'Passthrough'] + default: 'Cached' + enable_rebuild: + description: + - Enable/Disable rebuild of a specific storage pool. + type: bool + enable_fragmentation: + description: + - Enable/Disable fragmentation of a specific storage pool. + type: bool + parallel_rebuild_rebalance_limit: + description: + - Set rebuild/rebalance parallelism limit of a storage pool. + type: int + persistent_checksum: + description: + - Enable/Disable persistent checksum of a specific storage pool. + type: dict + suboptions: + enable: + description: + - Enable / disable persistent checksum. + type: bool + validate_on_read: + description: + - Validate checksum upon reading data. + type: bool + builder_limit: + description: + - Bandwidth limit in KB/s for the checksum building process. + - Valid range is 1024 to 10240. + default: 3072 + type: int + protected_maintenance_mode_io_priority_policy: + description: + - Set protected maintenance mode I/O priority policy of a storage pool. + type: dict + suboptions: + policy: + description: + - The I/O priority policy for protected maintenance mode. + - C(unlimited) Protected maintenance mode IOPS are not limited + - C(limitNumOfConcurrentIos)Limit the number of allowed concurrent protected maintenance mode + migration I/Os to the value defined for I(concurrent_ios_per_device). + - C(favorAppIos) Always limit the number of allowed concurrent protected maintenance mode + migration I/Os to value defined for I(concurrent_ios_per_device). + - If application I/Os are in progress, should also limit the bandwidth of + protected maintenance mode migration I/Os to the limit defined for the I(bw_limit_per_device). + type: str + choices: ['unlimited', 'limitNumOfConcurrentIos', 'favorAppIos'] + default: 'limitNumOfConcurrentIos' + concurrent_ios_per_device: + description: + - The maximum number of concurrent protected maintenance mode migration I/Os per device. + - Valid range is 1 to 20. + type: int + bw_limit_per_device: + description: + - The maximum bandwidth of protected maintenance mode migration I/Os, + in KB per second, per device. + - Valid range is 1024 to 1048576. + type: int + vtree_migration_io_priority_policy: + description: + - Set the I/O priority policy for V-Tree migration for a specific Storage Pool. + type: dict + suboptions: + policy: + description: + - The I/O priority policy for protected maintenance mode. + - C(limitNumOfConcurrentIos) Limit the number of allowed concurrent V-Tree + migration I/Os (default) to the I(concurrent_ios_per_device). + - C(favorAppIos) Always limit the number of allowed concurrent + V-Tree migration I/Os to defined for I(concurrent_ios_per_device). + - If application I/Os are in progress, should also limit the bandwidth of + V-Tree migration I/Os to the limit defined for the I(bw_limit_per_device). + type: str + choices: ['limitNumOfConcurrentIos', 'favorAppIos'] + concurrent_ios_per_device: + description: + - The maximum number of concurrent V-Tree migration I/Os per device. + - Valid range is 1 to 20 + type: int + bw_limit_per_device: + description: + - The maximum bandwidth of V-Tree migration I/Os, + in KB per second, per device. + - Valid range is 1024 to 25600. + type: int + rebalance_io_priority_policy: + description: + - Set the rebalance I/O priority policy for a Storage Pool. + type: dict + suboptions: + policy: + description: + - Policy to use for rebalance I/O priority. + - C(unlimited) Rebalance I/Os are not limited. + - C(limitNumOfConcurrentIos) Limit the number of allowed concurrent rebalance I/Os. + - C(favorAppIos) Limit the number and bandwidth of rebalance I/Os when application I/Os are in progress. + type: str + choices: ['unlimited', 'limitNumOfConcurrentIos', 'favorAppIos'] + default: 'favorAppIos' + concurrent_ios_per_device: + description: + - The maximum number of concurrent rebalance I/Os per device. + - Valid range is 1 to 20. + type: int + bw_limit_per_device: + description: + - The maximum bandwidth of rebalance I/Os, in KB/s, per device. + - Valid range is 1024 to 1048576. + type: int + cap_alert_thresholds: + description: + - Set the threshold for triggering capacity usage alerts. + - Alerts thresholds are calculated from each Storage Pool + capacity after deducting the defined amount of spare capacity. + type: dict + suboptions: + high_threshold: + description: + - Threshold of the non-spare capacity of the Storage Pool that will trigger a + high-priority alert, expressed as a percentage. + - This value must be lower than the I(critical_threshold). + type: int + critical_threshold: + description: + - Threshold of the non-spare capacity of the Storage Pool that will trigger a + critical-priority alert, expressed as a percentage. + type: int state: description: - State of the storage pool. @@ -84,7 +239,7 @@ required: true notes: - TRANSITIONAL media type is supported only during modification. - - The I(check_mode) is not supported. + - The I(check_mode) is supported. ''' EXAMPLES = r''' @@ -107,37 +262,75 @@ storage_pool_id: "abcd1234ab12r" state: "present" -- name: Create a new storage pool by name - dellemc.powerflex.storagepool: - hostname: "{{hostname}}" - username: "{{username}}" - password: "{{password}}" - validate_certs: "{{validate_certs}}" - storage_pool_name: "ansible_test_pool" - protection_domain_id: "1c957da800000000" - media_type: "HDD" - state: "present" - -- name: Modify a storage pool by name +- name: Create a new Storage pool dellemc.powerflex.storagepool: - hostname: "{{hostname}}" - username: "{{username}}" - password: "{{password}}" - validate_certs: "{{validate_certs}}" - storage_pool_name: "ansible_test_pool" - protection_domain_id: "1c957da800000000" + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + storage_pool_name: "{{ pool_name }}" + protection_domain_name: "{{ protection_domain_name }}" + cap_alert_thresholds: + high_threshold: 30 + critical_threshold: 50 + media_type: "TRANSITIONAL" + enable_zero_padding: true + rep_cap_max_ratio: 40 + rmcache_write_handling_mode: "Passthrough" + spare_percentage: 80 + enable_rebalance: false + enable_fragmentation: false + enable_rebuild: false use_rmcache: true use_rfcache: true + parallel_rebuild_rebalance_limit: 3 + protected_maintenance_mode_io_priority_policy: + policy: "unlimited" + rebalance_io_priority_policy: + policy: "unlimited" + vtree_migration_io_priority_policy: + policy: "limitNumOfConcurrentIos" + concurrent_ios_per_device: 10 + persistent_checksum: + enable: false state: "present" -- name: Rename storage pool by id +- name: Modify a Storage pool by name dellemc.powerflex.storagepool: - hostname: "{{hostname}}" - username: "{{username}}" - password: "{{password}}" - validate_certs: "{{validate_certs}}" - storage_pool_id: "abcd1234ab12r" - storage_pool_new_name: "new_ansible_pool" + hostname: "{{ hostname }}" + username: "{{ username }}" + password: "{{ password }}" + validate_certs: "{{ validate_certs }}" + storage_pool_name: "{{ pool_name }}" + protection_domain_name: "{{ protection_domain_name }}" + storage_pool_new_name: "pool_name_new" + cap_alert_thresholds: + high_threshold: 50 + critical_threshold: 70 + enable_zero_padding: false + rep_cap_max_ratio: 60 + rmcache_write_handling_mode: "Passthrough" + spare_percentage: 90 + enable_rebalance: true + enable_fragmentation: true + enable_rebuild: true + use_rmcache: true + use_rfcache: true + parallel_rebuild_rebalance_limit: 6 + protected_maintenance_mode_io_priority_policy: + policy: "limitNumOfConcurrentIos" + concurrent_ios_per_device: 4 + rebalance_io_priority_policy: + policy: "favorAppIos" + concurrent_ios_per_device: 10 + bw_limit_per_device: 4096 + vtree_migration_io_priority_policy: + policy: "limitNumOfConcurrentIos" + concurrent_ios_per_device: 10 + persistent_checksum: + enable: true + validate_on_read: true + builder_limit: 1024 state: "present" ''' @@ -558,75 +751,50 @@ ''' from ansible.module_utils.basic import AnsibleModule -from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell\ +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell.libraries.powerflex_base \ + import PowerFlexBase +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell.libraries.configuration \ + import Configuration +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \ import utils LOG = utils.get_logger('storagepool') -class PowerFlexStoragePool(object): +class PowerFlexStoragePool(PowerFlexBase): """Class with StoragePool operations""" def __init__(self): """ Define all parameters required by this module""" - - self.module_params = utils.get_powerflex_gateway_host_parameters() - self.module_params.update(get_powerflex_storagepool_parameters()) - """ initialize the ansible module """ - mut_ex_args = [['storage_pool_name', 'storage_pool_id'], - ['protection_domain_name', 'protection_domain_id'], - ['storage_pool_id', 'protection_domain_name'], - ['storage_pool_id', 'protection_domain_id']] - - required_one_of_args = [['storage_pool_name', 'storage_pool_id']] - self.module = AnsibleModule(argument_spec=self.module_params, - supports_check_mode=False, - mutually_exclusive=mut_ex_args, - required_one_of=required_one_of_args) + mutually_exclusive = [['storage_pool_name', 'storage_pool_id'], + ['protection_domain_name', 'protection_domain_id'], + ['storage_pool_id', 'protection_domain_name'], + ['storage_pool_id', 'protection_domain_id']] - utils.ensure_required_libs(self.module) - - try: - self.powerflex_conn = utils.get_powerflex_gateway_host_connection( - self.module.params) - LOG.info('Got the PowerFlex system connection object instance') - except Exception as e: - LOG.error(str(e)) - self.module.fail_json(msg=str(e)) - - def get_protection_domain(self, protection_domain_name=None, - protection_domain_id=None): - """Get protection domain details - :param protection_domain_name: Name of the protection domain - :param protection_domain_id: ID of the protection domain - :return: Protection domain details - """ - name_or_id = protection_domain_id if protection_domain_id \ - else protection_domain_name - try: - filter_fields = {} - if protection_domain_id: - filter_fields = {'id': protection_domain_id} - if protection_domain_name: - filter_fields = {'name': protection_domain_name} + required_one_of = [['storage_pool_name', 'storage_pool_id']] - pd_details = self.powerflex_conn.protection_domain.get( - filter_fields=filter_fields) - if pd_details: - return pd_details[0] + ansible_module_params = { + 'argument_spec': get_powerflex_storagepool_parameters(), + 'supports_check_mode': True, + 'mutually_exclusive': mutually_exclusive, + 'required_one_of': required_one_of + } + super().__init__(AnsibleModule, ansible_module_params) - if not pd_details: - err_msg = "Unable to find the protection domain with {0}. " \ - "Please enter a valid protection domain" \ - " name/id.".format(name_or_id) - self.module.fail_json(msg=err_msg) + utils.ensure_required_libs(self.module) + self.result = dict( + changed=False, + storage_pool_details={} + ) - except Exception as e: - errormsg = "Failed to get the protection domain {0} with" \ - " error {1}".format(name_or_id, str(e)) - LOG.error(errormsg) - self.module.fail_json(msg=errormsg) + def get_protection_domain( + self, protection_domain_name=None, protection_domain_id=None + ): + """Get the details of a protection domain in a given PowerFlex storage + system""" + return Configuration(self.powerflex_conn, self.module).get_protection_domain( + protection_domain_name=protection_domain_name, protection_domain_id=protection_domain_id) def get_storage_pool(self, storage_pool_id=None, storage_pool_name=None, pd_id=None): @@ -648,7 +816,7 @@ def get_storage_pool(self, storage_pool_id=None, storage_pool_name=None, filter_fields.update({'protectionDomainId': pd_id}) pool_details = self.powerflex_conn.storage_pool.get( filter_fields=filter_fields) - if pool_details: + if pool_details != []: if len(pool_details) > 1: err_msg = "More than one storage pool found with {0}," \ @@ -666,10 +834,9 @@ def get_storage_pool(self, storage_pool_id=None, storage_pool_name=None, protection_domain_id=pd_id)['name'] # adding protection domain name in the pool details pool_details['protectionDomainName'] = pd_name - else: - pool_details = None + return pool_details - return pool_details + return None except Exception as e: errormsg = "Failed to get the storage pool {0} with error " \ @@ -698,192 +865,206 @@ def create_storage_pool(self, pool_name, pd_id, media_type, self.module.fail_json( msg="Please provide protection domain details for " "creation of a storage pool") - self.powerflex_conn.storage_pool.create( - media_type=media_type, - protection_domain_id=pd_id, name=pool_name, - use_rfcache=use_rfcache, use_rmcache=use_rmcache) + if not self.module.check_mode: + pool_id = self.powerflex_conn.storage_pool.create( + media_type=media_type, + protection_domain_id=pd_id, name=pool_name, + use_rfcache=use_rfcache, use_rmcache=use_rmcache)['id'] + + return self.get_storage_pool(storage_pool_id=pool_id, + pd_id=pd_id) - return True except Exception as e: errormsg = "Failed to create the storage pool {0} with error " \ "{1}".format(pool_name, str(e)) LOG.error(errormsg) self.module.fail_json(msg=errormsg) - def modify_storage_pool(self, pool_id, modify_dict): - """ - Modify the parameters of the storage pool. - :param modify_dict: Dict containing parameters which are to be - modified - :param pool_id: Id of the pool. - :return: True, if the operation is successful. - """ - - try: - - if 'new_name' in modify_dict: - self.powerflex_conn.storage_pool.rename( - pool_id, modify_dict['new_name']) - if 'use_rmcache' in modify_dict: - self.powerflex_conn.storage_pool.set_use_rmcache( - pool_id, modify_dict['use_rmcache']) - if 'use_rfcache' in modify_dict: - self.powerflex_conn.storage_pool.set_use_rfcache( - pool_id, modify_dict['use_rfcache']) - if 'media_type' in modify_dict: - self.powerflex_conn.storage_pool.set_media_type( - pool_id, modify_dict['media_type']) - return True - - except Exception as e: - err_msg = "Failed to update the storage pool {0} with error " \ - "{1}".format(pool_id, str(e)) - LOG.error(err_msg) - self.module.fail_json(msg=err_msg) - - def verify_params(self, pool_details, pd_name, pd_id): + def verify_protection_domain(self, pool_details): """ :param pool_details: Details of the storage pool :param pd_name: Name of the protection domain :param pd_id: Id of the protection domain """ - if pd_id and pd_id != pool_details['protectionDomainId']: - self.module.fail_json(msg="Entered protection domain id does not" - " match with the storage pool's " - "protection domain id. Please enter " - "a correct protection domain id.") - - if pd_name and pd_name != pool_details['protectionDomainName']: - self.module.fail_json(msg="Entered protection domain name does" - " not match with the storage pool's " - "protection domain name. Please enter" - " a correct protection domain name.") - - def perform_module_operation(self): - """ Perform different actions on Storage Pool based on user input - in the playbook """ - - pool_name = self.module.params['storage_pool_name'] - pool_id = self.module.params['storage_pool_id'] - pool_new_name = self.module.params['storage_pool_new_name'] - state = self.module.params['state'] pd_name = self.module.params['protection_domain_name'] pd_id = self.module.params['protection_domain_id'] - use_rmcache = self.module.params['use_rmcache'] - use_rfcache = self.module.params['use_rfcache'] - media_type = self.module.params['media_type'] - if media_type == "TRANSITIONAL": - media_type = 'Transitional' - - result = dict( - storage_pool_details={} - ) - changed = False - pd_details = None - if pd_name or pd_id: - pd_details = self.get_protection_domain( - protection_domain_id=pd_id, - protection_domain_name=pd_name) - if pd_details: - pd_id = pd_details['id'] - - if pool_name is not None and (len(pool_name.strip()) == 0): + if pool_details is not None: + if pd_id and pd_id != pool_details['protectionDomainId']: + self.module.fail_json(msg="Entered protection domain id does not" + " match with the storage pool's " + "protection domain id. Please enter " + "a correct protection domain id.") + + if pd_name and pd_name != pool_details['protectionDomainName']: + self.module.fail_json(msg="Entered protection domain name does" + " not match with the storage pool's " + "protection domain name. Please enter" + " a correct protection domain name.") + + def verify_storage_pool_name(self): + if (self.module.params['storage_pool_name'] is not None and + (len(self.module.params['storage_pool_name'].strip()) == 0)) or \ + (self.module.params['storage_pool_new_name'] is not None and + (len(self.module.params['storage_pool_new_name'].strip()) == 0)): self.module.fail_json( - msg="Empty or white spaced string provided in " - "storage_pool_name. Please provide valid storage" + msg="Empty or white spaced string provided for " + "storage pool name. Provide valid storage" " pool name.") - # Get the details of the storage pool. - pool_details = self.get_storage_pool(storage_pool_id=pool_id, - storage_pool_name=pool_name, - pd_id=pd_id) - if pool_name and pool_details: - pool_id = pool_details['id'] - self.verify_params(pool_details, pd_name, pd_id) - - # create a storage pool - if state == 'present' and not pool_details: - LOG.info("Creating new storage pool") - if pool_id: - self.module.fail_json( - msg="storage_pool_name is missing & name required to " - "create a storage pool. Please enter a valid " - "storage_pool_name.") - if pool_new_name is not None: - self.module.fail_json( - msg="storage_pool_new_name is passed during creation. " - "storage_pool_new_name is not allowed during " - "creation of a storage pool.") - changed = self.create_storage_pool( - pool_name, pd_id, media_type, use_rfcache, use_rmcache) - if changed: - pool_id = self.get_storage_pool(storage_pool_id=pool_id, - storage_pool_name=pool_name, - pd_id=pd_id)['id'] - - # modify the storage pool parameters - if state == 'present' and pool_details: - # check if the parameters are to be updated or not - if pool_new_name is not None and len(pool_new_name.strip()) == 0: - self.module.fail_json( - msg="Empty/White spaced name is not allowed during " - "renaming of a storage pool. Please enter a valid " - "storage pool new name.") - modify_dict = to_modify(pool_details, use_rmcache, use_rfcache, - pool_new_name, media_type) - if bool(modify_dict): - LOG.info("Modify attributes of storage pool") - changed = self.modify_storage_pool(pool_id, modify_dict) - - # Delete a storage pool - if state == 'absent' and pool_details: - msg = "Deleting storage pool is not supported through" \ - " ansible module." - LOG.error(msg) - self.module.fail_json(msg=msg) - - # Show the updated storage pool details - if state == 'present': - pool_details = self.get_storage_pool(storage_pool_id=pool_id) - # fetching Id from pool details to address a case where - # protection domain is not passed - pd_id = pool_details['protectionDomainId'] - pd_name = self.get_protection_domain( - protection_domain_id=pd_id)['name'] - # adding protection domain name in the pool details - pool_details['protectionDomainName'] = pd_name - result['storage_pool_details'] = pool_details - result['changed'] = changed - - self.module.exit_json(**result) - - -def to_modify(pool_details, use_rmcache, use_rfcache, new_name, media_type): - """ - Check whether a parameter is required to be updated. - - :param media_type: Type of the media supported by the pool. - :param pool_details: Details of the storage pool - :param use_rmcache: Enable/Disable RMcache on pool - :param use_rfcache: Enable/Disable RFcache on pool - :param new_name: New name for the storage pool - :return: dict, containing parameters to be modified - """ - pool_name = pool_details['name'] - pool_use_rfcache = pool_details['useRfcache'] - pool_use_rmcache = pool_details['useRmcache'] - pool_media_type = pool_details['mediaType'] - modify_params = {} - - if new_name is not None and pool_name != new_name: - modify_params['new_name'] = new_name - if use_rfcache is not None and pool_use_rfcache != use_rfcache: - modify_params['use_rfcache'] = use_rfcache - if use_rmcache is not None and pool_use_rmcache != use_rmcache: - modify_params['use_rmcache'] = use_rmcache - if media_type is not None and media_type != pool_media_type: - modify_params['media_type'] = media_type - return modify_params + def set_persistent_checksum(self, pool_details, pool_params): + try: + if pool_params['persistent_checksum']['enable']: + if pool_details['persistentChecksumEnabled'] is not True: + self.powerflex_conn.storage_pool.set_persistent_checksum( + storage_pool_id=pool_details['id'], + enable=pool_params['persistent_checksum']['enable'], + validate=pool_params['persistent_checksum']['validate_on_read'], + builder_limit=pool_params['persistent_checksum']['builder_limit']) + else: + self.powerflex_conn.storage_pool.modify_persistent_checksum( + storage_pool_id=pool_details['id'], + validate=pool_params['persistent_checksum']['validate_on_read'], + builder_limit=pool_params['persistent_checksum']['builder_limit']) + + pool_details = self.get_storage_pool(storage_pool_id=pool_details['id']) + return pool_details + + except Exception as e: + err_msg = "Failed to set persistent checksum with error " \ + "{0}".format(str(e)) + LOG.error(err_msg) + self.module.fail_json(msg=err_msg) + + def to_modify_persistent_checksum(self, pool_details, pool_params): + checksum_dict = dict() + if pool_params['persistent_checksum']['enable'] is not None and \ + pool_params['persistent_checksum']['enable'] != pool_details['persistentChecksumEnabled']: + checksum_dict['enable'] = pool_params['persistent_checksum']['enable'] + + if pool_params['persistent_checksum']['validate_on_read'] is not None and \ + pool_params['persistent_checksum']['validate_on_read'] != pool_details['persistentChecksumValidateOnRead'] and \ + pool_params['persistent_checksum']['enable'] is True: + checksum_dict['validate_on_read'] = pool_params['persistent_checksum']['validate_on_read'] + + if pool_params['persistent_checksum']['builder_limit'] is not None and \ + pool_params['persistent_checksum']['builder_limit'] != pool_details['persistentChecksumBuilderLimitKb'] and \ + pool_params['persistent_checksum']['enable'] is True: + checksum_dict['builder_limit'] = pool_params['persistent_checksum']['builder_limit'] + + return checksum_dict + + def to_modify_rebalance_io_priority_policy(self, pool_details, pool_params): + + policy_dict = { + 'policy': None, + 'concurrent_ios': None, + 'bw_limit': None + } + modify = False + if pool_params['rebalance_io_priority_policy']['policy'] is not None and \ + pool_params['rebalance_io_priority_policy']['policy'] != pool_details['rebalanceIoPriorityPolicy']: + policy_dict['policy'] = pool_params['rebalance_io_priority_policy']['policy'] + modify = True + + if pool_params['rebalance_io_priority_policy']['concurrent_ios_per_device'] is not None and \ + pool_params['rebalance_io_priority_policy']['concurrent_ios_per_device'] != pool_details['rebalanceIoPriorityNumOfConcurrentIosPerDevice']: + policy_dict['concurrent_ios'] = str(pool_params['rebalance_io_priority_policy']['concurrent_ios_per_device']) + + if pool_params['rebalance_io_priority_policy']['bw_limit_per_device'] is not None and \ + pool_params['rebalance_io_priority_policy']['bw_limit_per_device'] != pool_details['rebalanceIoPriorityBwLimitPerDeviceInKbps']: + policy_dict['bw_limit'] = str(pool_params['rebalance_io_priority_policy']['bw_limit_per_device']) + + if policy_dict['policy'] is None and (policy_dict['concurrent_ios'] is not None or policy_dict['bw_limit'] is not None): + policy_dict['policy'] = pool_details['rebalanceIoPriorityPolicy'] + modify = True + + if modify is True: + return policy_dict + else: + return None + + def to_modify_vtree_migration_io_priority_policy(self, pool_details, pool_params): + policy_dict = { + 'policy': None, + 'concurrent_ios': None, + 'bw_limit': None + } + modify = False + if pool_params['vtree_migration_io_priority_policy']['policy'] is not None and \ + pool_params['vtree_migration_io_priority_policy']['policy'] != pool_details['vtreeMigrationIoPriorityPolicy']: + policy_dict['policy'] = pool_params['vtree_migration_io_priority_policy']['policy'] + modify = True + + if pool_params['vtree_migration_io_priority_policy']['concurrent_ios_per_device'] is not None and \ + pool_params['vtree_migration_io_priority_policy']['concurrent_ios_per_device'] != \ + pool_details['vtreeMigrationIoPriorityNumOfConcurrentIosPerDevice']: + policy_dict['concurrent_ios'] = str(pool_params['vtree_migration_io_priority_policy']['concurrent_ios_per_device']) + + if pool_params['vtree_migration_io_priority_policy']['bw_limit_per_device'] is not None and \ + pool_params['vtree_migration_io_priority_policy']['bw_limit_per_device'] != \ + pool_details['vtreeMigrationIoPriorityBwLimitPerDeviceInKbps']: + policy_dict['bw_limit'] = str(pool_params['vtree_migration_io_priority_policy']['bw_limit_per_device']) + + if policy_dict['policy'] is None and (policy_dict['concurrent_ios'] is not None or policy_dict['bw_limit'] is not None): + policy_dict['policy'] = pool_details['vtreeMigrationIoPriorityPolicy'] + modify = True + + if modify is True: + return policy_dict + else: + return None + + def to_modify_protected_maintenance_mode_io_priority_policy(self, pool_details, pool_params): + + policy_dict = { + 'policy': None, + 'concurrent_ios': None, + 'bw_limit': None + } + modify = False + if pool_params['protected_maintenance_mode_io_priority_policy']['policy'] is not None and \ + pool_params['protected_maintenance_mode_io_priority_policy']['policy'] != pool_details['protectedMaintenanceModeIoPriorityPolicy']: + policy_dict['policy'] = pool_params['protected_maintenance_mode_io_priority_policy']['policy'] + modify = True + + if pool_params['protected_maintenance_mode_io_priority_policy']['concurrent_ios_per_device'] is not None and \ + pool_params['protected_maintenance_mode_io_priority_policy']['concurrent_ios_per_device'] != \ + pool_details['protectedMaintenanceModeIoPriorityNumOfConcurrentIosPerDevice']: + policy_dict['concurrent_ios'] = str(pool_params['protected_maintenance_mode_io_priority_policy']['concurrent_ios_per_device']) + + if pool_params['protected_maintenance_mode_io_priority_policy']['bw_limit_per_device'] is not None and \ + pool_params['protected_maintenance_mode_io_priority_policy']['bw_limit_per_device'] != \ + pool_details['protectedMaintenanceModeIoPriorityBwLimitPerDeviceInKbps']: + policy_dict['bw_limit'] = str(pool_params['protected_maintenance_mode_io_priority_policy']['bw_limit_per_device']) + + if policy_dict['policy'] is None and (policy_dict['concurrent_ios'] is not None or policy_dict['bw_limit'] is not None): + policy_dict['policy'] = pool_details['protectedMaintenanceModeIoPriorityPolicy'] + modify = True + + if modify is True: + return policy_dict + else: + return None + + def to_modify_capacity_alert_thresholds(self, pool_details, pool_params, thresholds): + modify = False + threshold = dict() + if pool_params['cap_alert_thresholds']['high_threshold'] is not None and pool_params['cap_alert_thresholds'][ + 'high_threshold'] != pool_details['capacityAlertHighThreshold']: + threshold['high'] = str(pool_params['cap_alert_thresholds']['high_threshold']) + modify = True + if pool_params['cap_alert_thresholds']['critical_threshold'] is not None and \ + pool_params['cap_alert_thresholds']['critical_threshold'] != pool_details[ + 'capacityAlertCriticalThreshold']: + threshold['critical'] = str(pool_params['cap_alert_thresholds']['critical_threshold']) + modify = True + if modify is True: + if 'high' not in threshold: + threshold['high'] = str(pool_details['capacityAlertHighThreshold']) + if 'critical' not in threshold: + threshold['critical'] = str(pool_details['capacityAlertCriticalThreshold']) + + return threshold def get_powerflex_storagepool_parameters(): @@ -898,15 +1079,464 @@ def get_powerflex_storagepool_parameters(): choices=['HDD', 'SSD', 'TRANSITIONAL']), use_rfcache=dict(required=False, type='bool'), use_rmcache=dict(required=False, type='bool'), + enable_zero_padding=dict(type='bool'), + rep_cap_max_ratio=dict(type='int'), + rmcache_write_handling_mode=dict(choices=['Cached', 'Passthrough'], default='Cached'), + spare_percentage=dict(type='int'), + enable_rebalance=dict(type='bool'), + enable_fragmentation=dict(type='bool'), + enable_rebuild=dict(type='bool'), storage_pool_new_name=dict(required=False, type='str'), + parallel_rebuild_rebalance_limit=dict(type='int'), + cap_alert_thresholds=dict(type='dict', options=dict( + high_threshold=dict(type='int'), + critical_threshold=dict(type='int'))), + protected_maintenance_mode_io_priority_policy=dict(type='dict', options=dict( + policy=dict(choices=['unlimited', 'limitNumOfConcurrentIos', 'favorAppIos'], default='limitNumOfConcurrentIos'), + concurrent_ios_per_device=dict(type='int'), + bw_limit_per_device=dict(type='int'))), + rebalance_io_priority_policy=dict(type='dict', options=dict( + policy=dict(choices=['unlimited', 'limitNumOfConcurrentIos', 'favorAppIos'], default='favorAppIos'), + concurrent_ios_per_device=dict(type='int'), + bw_limit_per_device=dict(type='int'))), + vtree_migration_io_priority_policy=dict(type='dict', options=dict( + policy=dict(choices=['limitNumOfConcurrentIos', 'favorAppIos']), + concurrent_ios_per_device=dict(type='int'), + bw_limit_per_device=dict(type='int'))), + persistent_checksum=dict(type='dict', options=dict( + enable=dict(type='bool'), + validate_on_read=dict(type='bool'), + builder_limit=dict(type='int', default=3072))), state=dict(required=True, type='str', choices=['present', 'absent'])) +class StoragePoolExitHandler(): + def handle(self, pool_obj, pool_details): + if pool_details: + pool_details = pool_obj.get_storage_pool(storage_pool_id=pool_details['id']) + pool_obj.result['storage_pool_details'] = pool_details + + pool_obj.module.exit_json(**pool_obj.result) + + +class StoragePoolDeleteHandler(): + def handle(self, pool_obj, pool_params, pool_details): + if pool_params['state'] == 'absent' and pool_details: + msg = "Deleting storage pool is not supported through" \ + " ansible module." + LOG.error(msg) + pool_obj.module.fail_json(msg=msg) + + StoragePoolExitHandler().handle(pool_obj, pool_details) + + +class StoragePoolModifyPersistentChecksumHandler(): + def handle(self, pool_obj, pool_params, pool_details): + try: + if pool_params['state'] == 'present' and pool_details: + if pool_params['persistent_checksum'] is not None: + checksum_dict = pool_obj.to_modify_persistent_checksum( + pool_details=pool_details, + pool_params=pool_params) + if checksum_dict != {}: + if not pool_obj.module.check_mode: + pool_details = pool_obj.set_persistent_checksum( + pool_details=pool_details, + pool_params=pool_params) + pool_obj.result['changed'] = True + + StoragePoolDeleteHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + error_msg = (f"Modify Persistent Checksum failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolModifyRebalanceIOPriorityPolicyHandler(): + def handle(self, pool_obj, pool_params, pool_details): + try: + if pool_params['state'] == 'present' and pool_details: + if pool_params['rebalance_io_priority_policy'] is not None: + policy_dict = pool_obj.to_modify_rebalance_io_priority_policy( + pool_details=pool_details, + pool_params=pool_params + ) + if policy_dict is not None: + if not pool_obj.module.check_mode: + pool_details = pool_obj.powerflex_conn.storage_pool.rebalance_io_priority_policy( + storage_pool_id=pool_details['id'], + policy=policy_dict['policy'], + concurrent_ios_per_device=policy_dict['concurrent_ios'], + bw_limit_per_device=policy_dict['bw_limit']) + pool_obj.result['changed'] = True + + StoragePoolModifyPersistentChecksumHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + error_msg = (f"Modify rebalance IO Priority Policy failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolSetVtreeMigrationIOPriorityPolicyHandler(): + def handle(self, pool_obj, pool_params, pool_details): + try: + if pool_params['state'] == 'present' and pool_details: + if pool_params['vtree_migration_io_priority_policy'] is not None: + policy_dict = pool_obj.to_modify_vtree_migration_io_priority_policy( + pool_details=pool_details, + pool_params=pool_params + ) + if policy_dict is not None: + if not pool_obj.module.check_mode: + pool_details = pool_obj.powerflex_conn.storage_pool.set_vtree_migration_io_priority_policy( + storage_pool_id=pool_details['id'], + policy=policy_dict['policy'], + concurrent_ios_per_device=policy_dict['concurrent_ios'], + bw_limit_per_device=policy_dict['bw_limit']) + pool_obj.result['changed'] = True + + StoragePoolModifyRebalanceIOPriorityPolicyHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + error_msg = (f"Set Vtree Migration I/O Priority Policy operation failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolSetProtectedMaintenanceModeIOPriorityPolicyHandler(): + def handle(self, pool_obj, pool_params, pool_details): + try: + if pool_params['state'] == 'present' and pool_details: + if pool_params['protected_maintenance_mode_io_priority_policy'] is not None: + policy_dict = pool_obj.to_modify_protected_maintenance_mode_io_priority_policy( + pool_details=pool_details, + pool_params=pool_params + ) + if policy_dict is not None: + if not pool_obj.module.check_mode: + pool_details = pool_obj.powerflex_conn.storage_pool.set_protected_maintenance_mode_io_priority_policy( + storage_pool_id=pool_details['id'], + policy=policy_dict['policy'], + concurrent_ios_per_device=policy_dict['concurrent_ios'], + bw_limit_per_device=policy_dict['bw_limit']) + pool_obj.result['changed'] = True + + StoragePoolSetVtreeMigrationIOPriorityPolicyHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + error_msg = (f"Set Protected Maintenance Mode IO Priority Policy operation failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolModifyCapacityAlertThresholdsHandler(): + def handle(self, pool_obj, pool_params, pool_details): + try: + if pool_params['state'] == 'present' and pool_details: + if pool_params['cap_alert_thresholds'] is not None: + threshold = pool_obj.to_modify_capacity_alert_thresholds(pool_details=pool_details, + pool_params=pool_params, + thresholds=pool_params[ + 'cap_alert_thresholds']) + if threshold != {}: + if not pool_obj.module.check_mode: + pool_details = pool_obj.powerflex_conn.storage_pool.set_cap_alert_thresholds( + storage_pool_id=pool_details['id'], + cap_alert_high_threshold=threshold['high'], + cap_alert_critical_threshold=threshold['critical']) + pool_obj.result['changed'] = True + + StoragePoolSetProtectedMaintenanceModeIOPriorityPolicyHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + error_msg = (f"Modify Capacity Alert Thresholds operation failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolModifyRebuildRebalanceParallelismLimitHandler(): + def handle(self, pool_obj, pool_params, pool_details): + try: + if pool_params['state'] == 'present' and pool_details: + if pool_params['parallel_rebuild_rebalance_limit'] is not None and \ + pool_params['parallel_rebuild_rebalance_limit'] != pool_details['numOfParallelRebuildRebalanceJobsPerDevice']: + if not pool_obj.module.check_mode: + pool_details = pool_obj.powerflex_conn.storage_pool.set_rebuild_rebalance_parallelism_limit( + pool_details['id'], str(pool_params['parallel_rebuild_rebalance_limit'])) + pool_obj.result['changed'] = True + + StoragePoolModifyCapacityAlertThresholdsHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + error_msg = (f"Modify Rebuild/Rebalance Parallelism Limit operation failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolModifyRMCacheWriteHandlingModeHandler(): + def handle(self, pool_obj, pool_params, pool_details): + try: + if pool_params['state'] == 'present' and pool_details: + if pool_params['rmcache_write_handling_mode'] is not None and \ + pool_params['rmcache_write_handling_mode'] != pool_details['rmcacheWriteHandlingMode']: + if not pool_obj.module.check_mode: + pool_details = pool_obj.powerflex_conn.storage_pool.set_rmcache_write_handling_mode( + pool_details['id'], pool_params['rmcache_write_handling_mode']) + pool_obj.result['changed'] = True + + StoragePoolModifyRebuildRebalanceParallelismLimitHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + error_msg = (f"Modify RMCache Write Handling Mode failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolModifySparePercentageHandler(): + def handle(self, pool_obj, pool_params, pool_details): + try: + if pool_params['state'] == 'present' and pool_details: + if pool_params['spare_percentage'] is not None and pool_params['spare_percentage'] != pool_details['sparePercentage']: + if not pool_obj.module.check_mode: + pool_details = pool_obj.powerflex_conn.storage_pool.set_spare_percentage( + pool_details['id'], str(pool_params['spare_percentage'])) + pool_obj.result['changed'] = True + + StoragePoolModifyRMCacheWriteHandlingModeHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + error_msg = (f"Modify Spare Percentage operation failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolEnableFragmentationHandler(): + def handle(self, pool_obj, pool_params, pool_details): + try: + if pool_params['state'] == 'present' and pool_details: + if pool_params['enable_fragmentation'] is not None and pool_params['enable_fragmentation'] != pool_details['fragmentationEnabled']: + if not pool_obj.module.check_mode: + pool_details = pool_obj.powerflex_conn.storage_pool.set_fragmentation_enabled( + pool_details['id'], pool_params['enable_fragmentation']) + pool_obj.result['changed'] = True + + StoragePoolModifySparePercentageHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + + error_msg = (f"Enable/Disable Fragmentation operation failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolEnableRebuildHandler(): + def handle(self, pool_obj, pool_params, pool_details): + try: + if pool_params['state'] == 'present' and pool_details: + if pool_params['enable_rebuild'] is not None and pool_params['enable_rebuild'] != pool_details['rebuildEnabled']: + if not pool_obj.module.check_mode: + pool_details = pool_obj.powerflex_conn.storage_pool.set_rebuild_enabled( + pool_details['id'], pool_params['enable_rebuild']) + pool_obj.result['changed'] = True + + StoragePoolEnableFragmentationHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + error_msg = (f"Enable/Disable Rebuild operation failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolEnableRebalanceHandler(): + def handle(self, pool_obj, pool_params, pool_details): + try: + if pool_params['state'] == 'present' and pool_details: + if pool_params['enable_rebalance'] is not None and pool_params['enable_rebalance'] != pool_details['rebalanceEnabled']: + if not pool_obj.module.check_mode: + pool_details = pool_obj.powerflex_conn.storage_pool.set_rebalance_enabled( + pool_details['id'], pool_params['enable_rebalance']) + pool_obj.result['changed'] = True + + StoragePoolEnableRebuildHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + error_msg = (f"Enable/Disable Rebalance failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolModifyRepCapMaxRatioHandler(): + def handle(self, pool_obj, pool_params, pool_details): + try: + if pool_params['state'] == 'present' and pool_details: + if pool_params['rep_cap_max_ratio'] is not None and pool_params['rep_cap_max_ratio'] != pool_details['replicationCapacityMaxRatio']: + if not pool_obj.module.check_mode: + pool_details = pool_obj.powerflex_conn.storage_pool.set_rep_cap_max_ratio( + pool_details['id'], str(pool_params['rep_cap_max_ratio'])) + pool_obj.result['changed'] = True + + StoragePoolEnableRebalanceHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + error_msg = (f"Modify Replication Capacity max ratio operation failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolEnableZeroPaddingHandler(): + def handle(self, pool_obj, pool_params, pool_details): + try: + if pool_params['state'] == 'present' and pool_details: + if pool_params['enable_zero_padding'] is not None and pool_params['enable_zero_padding'] != pool_details['zeroPaddingEnabled']: + if not pool_obj.module.check_mode: + pool_details = pool_obj.powerflex_conn.storage_pool.set_zero_padding_policy( + pool_details['id'], pool_params['enable_zero_padding']) + pool_obj.result['changed'] = True + + StoragePoolModifyRepCapMaxRatioHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + error_msg = (f"Enable/Disable zero padding operation failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolUseRFCacheHandler(): + def handle(self, pool_obj, pool_params, pool_details): + try: + if pool_params['state'] == 'present' and pool_details: + if pool_params['use_rfcache'] is not None and pool_params['use_rfcache'] != pool_details['useRfcache']: + if not pool_obj.module.check_mode: + pool_details = pool_obj.powerflex_conn.storage_pool.set_use_rfcache( + pool_details['id'], pool_params['use_rfcache']) + pool_obj.result['changed'] = True + + StoragePoolEnableZeroPaddingHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + error_msg = (f"Modify RF cache operation failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolUseRMCacheHandler(): + def handle(self, pool_obj, pool_params, pool_details): + try: + if pool_params['state'] == 'present' and pool_details: + if pool_params['use_rmcache'] is not None and pool_params['use_rmcache'] != pool_details['useRmcache']: + if not pool_obj.module.check_mode: + pool_details = pool_obj.powerflex_conn.storage_pool.set_use_rmcache( + pool_details['id'], pool_params['use_rmcache']) + pool_obj.result['changed'] = True + + StoragePoolUseRFCacheHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + error_msg = (f"Modify RM cache operation failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolRenameHandler(): + def handle(self, pool_obj, pool_params, pool_details): + try: + if pool_params['state'] == 'present' and pool_details: + if pool_params['storage_pool_new_name'] is not None and pool_params['storage_pool_new_name'] != pool_details['name']: + if not pool_obj.module.check_mode: + pool_obj.powerflex_conn.storage_pool.rename(pool_details['id'], pool_params['storage_pool_new_name']) + pool_obj.result['changed'] = True + + StoragePoolUseRMCacheHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + error_msg = (f"Modify storage pool name failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolModifyMediaTypeHandler(): + def handle(self, pool_obj, pool_params, pool_details, media_type): + try: + if pool_params['state'] == 'present' and pool_details: + if media_type is not None and media_type != pool_details['mediaType']: + if not pool_obj.module.check_mode: + pool_details = pool_obj.powerflex_conn.storage_pool.set_media_type( + pool_details['id'], media_type) + pool_obj.result['changed'] = True + + StoragePoolRenameHandler().handle(pool_obj, pool_params, pool_details) + + except Exception as e: + error_msg = (f"Modify Media Type failed " + f"with error {str(e)}") + LOG.error(error_msg) + pool_obj.module.fail_json(msg=error_msg) + + +class StoragePoolCreateHandler(): + def handle(self, pool_obj, pool_params, pool_details, pd_id, media_type): + if pool_params['state'] == 'present' and pool_details is None: + if not pool_obj.module.check_mode: + LOG.info("Creating new storage pool") + if pool_params['storage_pool_id']: + self.module.fail_json( + msg="storage_pool_name is missing & name required to " + "create a storage pool. Please enter a valid " + "storage_pool_name.") + + pool_details = pool_obj.create_storage_pool( + pool_name=pool_params['storage_pool_name'], + pd_id=pd_id, + media_type=media_type, + use_rfcache=pool_params['use_rfcache'], + use_rmcache=pool_params['use_rmcache']) + + pool_obj.result['changed'] = True + + StoragePoolModifyMediaTypeHandler().handle(pool_obj, pool_params, pool_details, media_type) + + +class StoragePoolHandler(): + def handle(self, pool_obj, pool_params): + pool_obj.verify_storage_pool_name() + media_type = pool_params['media_type'] + if media_type == "TRANSITIONAL": + media_type = 'Transitional' + pd_id = None + if pool_params['protection_domain_id'] or pool_params['protection_domain_name']: + pd_id = pool_obj.get_protection_domain( + protection_domain_id=pool_params['protection_domain_id'], + protection_domain_name=pool_params['protection_domain_name'])['id'] + pool_details = pool_obj.get_storage_pool(storage_pool_id=pool_params['storage_pool_id'], + storage_pool_name=pool_params['storage_pool_name'], + pd_id=pd_id) + pool_obj.verify_protection_domain(pool_details=pool_details) + StoragePoolCreateHandler().handle(pool_obj, pool_params, pool_details, pd_id, media_type) + + def main(): - """ Create PowerFlex Storage Pool object and perform action on it + """ Create PowerFlex storage pool object and perform action on it based on user input from playbook""" obj = PowerFlexStoragePool() - obj.perform_module_operation() + StoragePoolHandler().handle(obj, obj.module.params) if __name__ == '__main__': diff --git a/tests/sanity/ignore-2.14.txt b/tests/sanity/ignore-2.14.txt index 5714021..438f0a7 100644 --- a/tests/sanity/ignore-2.14.txt +++ b/tests/sanity/ignore-2.14.txt @@ -43,3 +43,5 @@ plugins/modules/replication_consistency_group.py compile-3.5 plugins/modules/resource_group.py validate-modules:missing-gplv3-license plugins/modules/resource_group.py compile-2.7 plugins/modules/resource_group.py import-2.7 +plugins/modules/storagepool.py compile-2.7 +plugins/modules/storagepool.py import-2.7 diff --git a/tests/sanity/ignore-2.15.txt b/tests/sanity/ignore-2.15.txt index 5714021..438f0a7 100644 --- a/tests/sanity/ignore-2.15.txt +++ b/tests/sanity/ignore-2.15.txt @@ -43,3 +43,5 @@ plugins/modules/replication_consistency_group.py compile-3.5 plugins/modules/resource_group.py validate-modules:missing-gplv3-license plugins/modules/resource_group.py compile-2.7 plugins/modules/resource_group.py import-2.7 +plugins/modules/storagepool.py compile-2.7 +plugins/modules/storagepool.py import-2.7 diff --git a/tests/sanity/ignore-2.16.txt b/tests/sanity/ignore-2.16.txt index 0dbde68..5f24f9f 100644 --- a/tests/sanity/ignore-2.16.txt +++ b/tests/sanity/ignore-2.16.txt @@ -29,3 +29,5 @@ plugins/modules/info.py import-2.7 plugins/modules/resource_group.py validate-modules:missing-gplv3-license plugins/modules/resource_group.py compile-2.7 plugins/modules/resource_group.py import-2.7 +plugins/modules/storagepool.py compile-2.7 +plugins/modules/storagepool.py import-2.7 diff --git a/tests/unit/plugins/module_utils/mock_storagepool_api.py b/tests/unit/plugins/module_utils/mock_storagepool_api.py index 87af1d6..3f0a895 100644 --- a/tests/unit/plugins/module_utils/mock_storagepool_api.py +++ b/tests/unit/plugins/module_utils/mock_storagepool_api.py @@ -22,13 +22,44 @@ class MockStoragePoolApi: "use_rmcache": None, "use_rfcache": None, "media_type": None, + "enable_zero_padding": None, + "rep_cap_max_ratio": None, + "rmcache_write_handling_mode": None, + "spare_percentage": None, + "enable_rebalance": None, + "enable_fragmentation": None, + "enable_rebuild": None, + "parallel_rebuild_rebalance_limit": None, + "cap_alert_thresholds": { + "high_threshold": 30, + "critical_threshold": 50 + }, + "protected_maintenance_mode_io_priority_policy": { + "policy": None, + "concurrent_ios_per_device": None, + "bw_limit_per_device": None + }, + "rebalance_io_priority_policy": { + "policy": None, + "concurrent_ios_per_device": None, + "bw_limit_per_device": None + }, + "vtree_migration_io_priority_policy": { + "policy": None, + "concurrent_ios_per_device": None, + "bw_limit_per_device": None}, + "persistent_checksum": { + "enable": None, + "validate_on_read": None, + "builder_limit": None + }, 'state': None } STORAGE_POOL_GET_LIST = [ { - 'protectionDomainId': '4eeb304600000000', - 'protectionDomainName': 'test_pd', + 'protectionDomainId': "7bd6457000000000", + 'protectionDomainName': "test_pd_1", 'rebuildEnabled': True, 'dataLayout': 'MediumGranularity', 'persistentChecksumState': 'Protected', @@ -98,8 +129,8 @@ class MockStoragePoolApi: STORAGE_POOL_GET_MULTI_LIST = [ { - 'protectionDomainId': '4eeb304600000000', - 'protectionDomainName': 'test_pd', + 'protectionDomainId': "7bd6457000000000", + 'protectionDomainName': "test_pd_1", 'rebuildEnabled': True, 'dataLayout': 'MediumGranularity', 'persistentChecksumState': 'Protected', @@ -166,7 +197,7 @@ class MockStoragePoolApi: 'id': 'test_pool_id_1' }, { - 'protectionDomainId': '4eeb304600000002', + 'protectionDomainId': "7bd6457000000000", 'protectionDomainName': 'test_pd_1', 'rebuildEnabled': True, 'dataLayout': 'MediumGranularity', @@ -239,6 +270,30 @@ class MockStoragePoolApi: PROTECTION_DETAILS_1 = [{"id": "4eeb304600000001", "name": "test_pd_name"}] + PROTECTION_DOMAIN = { + "protectiondomain": [ + { + "id": "7bd6457000000000", + "name": "test_pd_1", + "protectionDomainState": "Active", + "overallIoNetworkThrottlingInKbps": 20480, + "rebalanceNetworkThrottlingInKbps": 10240, + "rebuildNetworkThrottlingInKbps": 10240, + "vtreeMigrationNetworkThrottlingInKbps": 10240, + "rfcacheEnabled": "false", + "rfcacheMaxIoSizeKb": 128, + "rfcacheOpertionalMode": "None", + "rfcachePageSizeKb": 64, + "storagePools": [ + { + "id": "8d1cba1700000000", + "name": "pool1" + } + ] + } + ] + } + STORAGE_POOL_STATISTICS = { 'backgroundScanFixedReadErrorCount': 0, 'pendingMovingOutBckRebuildJobs': 0, @@ -616,10 +671,26 @@ class MockStoragePoolApi: "get_multi_details": "More than one storage pool found", "create_wo_pd": "Please provide protection domain details", "create_transitional": "TRANSITIONAL media type is not supported during creation.", - "create_pool_name_empty": "Empty or white spaced string provided in storage_pool_name.", + "create_pool_name_empty": "Empty or white spaced string provided for storage pool name. Provide valid storage pool name", "create_pool_new_name": "storage_pool_new_name is passed during creation.", - "rename_storage_pool_empty": "Empty/White spaced name is not allowed during renaming of a storage pool.", - "delete_storage_pool": "Deleting storage pool is not supported through ansible module." + "rename_storage_pool_empty": "Empty or white spaced string provided for storage pool name. Provide valid storage pool name", + "delete_storage_pool": "Deleting storage pool is not supported through ansible module.", + "rename_pool": "Modify storage pool name failed", + "modify_pool_rmcache": "Modify RM cache operation failed", + "modify_pool_rfcache": "Modify RF cache operation failed", + "modify_pool_zero_padding_enabled": "Enable/Disable zero padding operation failed", + "modify_pool_rep_cap_max_ratio": "Modify Replication Capacity max ratio operation failed", + "modify_pool_enable_rebalance": "Enable/Disable Rebalance failed", + "modify_pool_enable_rebuild": "Enable/Disable Rebuild operation failed", + "modify_pool_enable_fragmentation": "Enable/Disable Fragmentation operation failed", + "modify_pool_spare_percentage": "Modify Spare Percentage operation failed", + "modify_pool_rmcache_write_handling_mode": "Modify RMCache Write Handling Mode failed", + "modify_pool_rebuild_rebalance_parallelism_limit": "Modify Rebuild/Rebalance Parallelism Limit operation failed", + "modify_pool_capacity_alert_thresholds": "Modify Capacity Alert Thresholds operation failed", + "modify_pool_protected_maintenance_mode_io_priority_policy": "Set Protected Maintenance Mode IO Priority Policy operation failed", + "modify_pool_vtree_migration_io_priority_policy": "Set Vtree Migration I/O Priority Policy operation failed", + "modify_pool_rebalance_io_priority_policy": "Modify rebalance IO Priority Policy failed", + "modify_pool_persistent_checksum": "Modify Persistent Checksum failed" } @staticmethod diff --git a/tests/unit/plugins/modules/test_storagepool.py b/tests/unit/plugins/modules/test_storagepool.py index 6780ed7..c60e1fd 100644 --- a/tests/unit/plugins/modules/test_storagepool.py +++ b/tests/unit/plugins/modules/test_storagepool.py @@ -4,29 +4,26 @@ """Unit Tests for storage pool module on PowerFlex""" + from __future__ import (absolute_import, division, print_function) __metaclass__ = type - import pytest -from mock.mock import MagicMock # pylint: disable=unused-import from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.libraries import initial_mock -from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_storagepool_api import MockStoragePoolApi +from mock.mock import MagicMock +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_storagepool_api \ + import MockStoragePoolApi from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_api_exception \ import MockApiException -from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \ - import utils from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.libraries.powerflex_unit_base \ import PowerFlexUnitBase - -utils.get_logger = MagicMock() -utils.get_powerflex_gateway_host_connection = MagicMock() -utils.PowerFlexClient = MagicMock() - -from ansible.module_utils import basic -basic.AnsibleModule = MagicMock() -from ansible_collections.dellemc.powerflex.plugins.modules.storagepool import PowerFlexStoragePool +from ansible_collections.dellemc.powerflex.plugins.modules.storagepool \ + import PowerFlexStoragePool +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \ + import utils +from ansible_collections.dellemc.powerflex.plugins.modules.storagepool import \ + StoragePoolHandler class TestPowerflexStoragePool(PowerFlexUnitBase): @@ -37,23 +34,22 @@ class TestPowerflexStoragePool(PowerFlexUnitBase): def module_object(self): return PowerFlexStoragePool - def test_get_storagepool_details(self, powerflex_module_mock): - self.get_module_args.update({ - "storage_pool_name": "test_pool", - "state": "present" - }) - powerflex_module_mock.module.params = self.get_module_args - storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + def test_get_storage_pool_response(self, powerflex_module_mock): + self.set_module_params( + powerflex_module_mock, + self.get_module_args, + { + "storage_pool_name": "test_pool", + "state": "present" + }) powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( - return_value=storagepool_resp - ) - storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS - powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( - return_value=storagepool_statistics_resp - ) - powerflex_module_mock.perform_module_operation() + return_value=MockStoragePoolApi.STORAGE_POOL_GET_LIST) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) + StoragePoolHandler().handle( + powerflex_module_mock, powerflex_module_mock.module.params) powerflex_module_mock.powerflex_conn.storage_pool.get.assert_called() - powerflex_module_mock.powerflex_conn.storage_pool.get_statistics.assert_called() def test_get_storagepool_details_multi(self, powerflex_module_mock): self.get_module_args.update({ @@ -71,7 +67,7 @@ def test_get_storagepool_details_multi(self, powerflex_module_mock): ) self.capture_fail_json_call( MockStoragePoolApi.get_exception_response('get_multi_details'), - powerflex_module_mock, invoke_perform_module=True) + powerflex_module_mock, StoragePoolHandler) def test_get_storagepool_details_with_exception(self, powerflex_module_mock): self.get_module_args.update({ @@ -88,50 +84,7 @@ def test_get_storagepool_details_with_exception(self, powerflex_module_mock): powerflex_module_mock.create_storage_pool = MagicMock(return_value=None) self.capture_fail_json_call( MockStoragePoolApi.get_exception_response('get_details'), - powerflex_module_mock, invoke_perform_module=True) - - @pytest.mark.parametrize("params", [ - {"pd_id": "4eeb304600000000"}, - {"pd_name": "test"}, - ]) - def test_get_protection_domain(self, powerflex_module_mock, params): - pd_id = params.get("pd_id", None) - pd_name = params.get("pd_name", None) - powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( - return_value=MockStoragePoolApi.PROTECTION_DETAILS - ) - pd_details = powerflex_module_mock.get_protection_domain(pd_name, pd_id) - assert MockStoragePoolApi.PROTECTION_DETAILS[0] == pd_details - - def test_get_protection_domain_exception(self, powerflex_module_mock): - self.set_module_params( - powerflex_module_mock, - self.get_module_args, - { - "storage_pool_name": "test_pool", - "protection_domain_id": "4eeb304600000001", - "state": "present" - }) - powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( - side_effect=MockApiException) - self.capture_fail_json_call( - MockStoragePoolApi.get_exception_response('get_pd_exception'), - powerflex_module_mock, invoke_perform_module=True) - - def test_get_protection_domain_non_exist(self, powerflex_module_mock): - self.set_module_params( - powerflex_module_mock, - self.get_module_args, - { - "storage_pool_name": "test_pool", - "protection_domain_id": "4eeb304600000001", - "state": "present" - }) - powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( - return_value=None) - self.capture_fail_json_call( - MockStoragePoolApi.get_exception_response('get_pd_non_exist'), - powerflex_module_mock, invoke_perform_module=True) + powerflex_module_mock, StoragePoolHandler) def test_get_storagepool_details_with_invalid_pd_id(self, powerflex_module_mock): self.get_module_args.update({ @@ -152,78 +105,33 @@ def test_get_storagepool_details_with_invalid_pd_id(self, powerflex_module_mock) ) self.capture_fail_json_call( MockStoragePoolApi.get_exception_response('invalid_pd_id'), - powerflex_module_mock, invoke_perform_module=True) + powerflex_module_mock, StoragePoolHandler) def test_create_storagepool_response(self, powerflex_module_mock): self.get_module_args.update({ "storage_pool_name": "test_pool", - "protection_domain_name": "test_pd_name", + "protection_domain_name": "test_pd_1", "media_type": "HDD", "state": "present" }) powerflex_module_mock.module.params = self.get_module_args + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( - return_value=MockStoragePoolApi.PROTECTION_DETAILS_1) + return_value=pd_resp['protectiondomain']) powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( return_value=[] ) powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( return_value=[] ) - powerflex_module_mock.powerflex_conn.storage_pool.create = MagicMock( - return_value=None - ) - resp = powerflex_module_mock.create_storage_pool(pool_name="test_pool", - pd_id=MockStoragePoolApi.PROTECTION_DETAILS_1[0]['id'], - media_type="HDD") - assert resp is True + StoragePoolHandler().handle( + powerflex_module_mock, powerflex_module_mock.module.params) powerflex_module_mock.powerflex_conn.storage_pool.create.assert_called() - def test_create_storagepool_only_pool_id(self, powerflex_module_mock): - self.get_module_args.update({ - "storage_pool_id": "test_pool_id", - "protection_domain_name": "test_pd_name", - "media_type": "HDD", - "state": "present" - }) - powerflex_module_mock.module.params = self.get_module_args - powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( - return_value=MockStoragePoolApi.PROTECTION_DETAILS_1) - powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( - return_value=[] - ) - powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( - return_value=[] - ) - self.capture_fail_json_call( - MockStoragePoolApi.get_exception_response('create_pool_id'), - powerflex_module_mock, invoke_perform_module=True) - - def test_create_storagepool_new_name(self, powerflex_module_mock): - self.get_module_args.update({ - "storage_pool_name": "test_pool", - "storage_pool_new_name": "pool_new_name", - "protection_domain_name": "test_pd_name", - "media_type": "HDD", - "state": "present" - }) - powerflex_module_mock.module.params = self.get_module_args - powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( - return_value=MockStoragePoolApi.PROTECTION_DETAILS_1) - powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( - return_value=[] - ) - powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( - return_value=[] - ) - self.capture_fail_json_call( - MockStoragePoolApi.get_exception_response('create_pool_new_name'), - powerflex_module_mock, invoke_perform_module=True) - def test_create_storagepool_empty_name(self, powerflex_module_mock): self.get_module_args.update({ "storage_pool_name": " ", - "protection_domain_name": "test_pd_name", + "protection_domain_name": "test_pd_1", "media_type": "HDD", "state": "present" }) @@ -232,7 +140,7 @@ def test_create_storagepool_empty_name(self, powerflex_module_mock): return_value=MockStoragePoolApi.PROTECTION_DETAILS_1) self.capture_fail_json_call( MockStoragePoolApi.get_exception_response('create_pool_name_empty'), - powerflex_module_mock, invoke_perform_module=True) + powerflex_module_mock, StoragePoolHandler) def test_create_storagepool_wo_pd(self, powerflex_module_mock): self.get_module_args.update({ @@ -251,12 +159,12 @@ def test_create_storagepool_wo_pd(self, powerflex_module_mock): ) self.capture_fail_json_call( MockStoragePoolApi.get_exception_response('create_wo_pd'), - powerflex_module_mock, invoke_perform_module=True) + powerflex_module_mock, StoragePoolHandler) def test_create_storagepool_transitional_exception(self, powerflex_module_mock): self.get_module_args.update({ "storage_pool_name": "test_pool", - "protection_domain_name": "test_pd_name", + "protection_domain_name": "test_pd_1", "media_type": "TRANSITIONAL", "state": "present" }) @@ -274,7 +182,7 @@ def test_create_storagepool_transitional_exception(self, powerflex_module_mock): ) self.capture_fail_json_call( MockStoragePoolApi.get_exception_response('create_transitional'), - powerflex_module_mock, invoke_perform_module=True) + powerflex_module_mock, StoragePoolHandler) def test_create_storagepool_exception(self, powerflex_module_mock): self.get_module_args.update({ @@ -297,15 +205,43 @@ def test_create_storagepool_exception(self, powerflex_module_mock): ) self.capture_fail_json_call( MockStoragePoolApi.get_exception_response('create_storage_pool'), - powerflex_module_mock, invoke_perform_module=True) + powerflex_module_mock, StoragePoolHandler) def test_modify_storagepool_details(self, powerflex_module_mock): self.get_module_args.update({ "storage_pool_name": "test_pool", + "protection_domain_name": "test_pd_1", "storage_pool_new_name": "new_ansible_pool", "use_rfcache": True, "use_rmcache": True, - "media_type": "TRANSITIONAL", + "cap_alert_thresholds": { + "high_threshold": 30, + "critical_threshold": 50 + }, + "enable_zero_padding": True, + "rep_cap_max_ratio": 40, + "rmcache_write_handling_mode": "Passthrough", + "spare_percentage": 80, + "enable_rebalance": False, + "enable_fragmentation": False, + "enable_rebuild": False, + "parallel_rebuild_rebalance_limit": 3, + "protected_maintenance_mode_io_priority_policy": { + "policy": "unlimited", + "concurrent_ios_per_device": 1, + "bw_limit_per_device": 1024}, + "rebalance_io_priority_policy": { + "policy": "limitNumOfConcurrentIos", + "concurrent_ios_per_device": 10, + "bw_limit_per_device": 1024}, + "vtree_migration_io_priority_policy": { + "policy": "limitNumOfConcurrentIos", + "concurrent_ios_per_device": 10, + "bw_limit_per_device": 1024}, + "persistent_checksum": { + "enable": True, + "validate_on_read": True, + "builder_limit": 1024}, "state": "present" }) powerflex_module_mock.module.params = self.get_module_args @@ -313,20 +249,58 @@ def test_modify_storagepool_details(self, powerflex_module_mock): powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( return_value=storagepool_resp ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( return_value=storagepool_statistics_resp ) - powerflex_module_mock.perform_module_operation() + StoragePoolHandler().handle( + powerflex_module_mock, powerflex_module_mock.module.params) powerflex_module_mock.powerflex_conn.storage_pool.rename.assert_called() powerflex_module_mock.powerflex_conn.storage_pool.set_use_rmcache.assert_called() powerflex_module_mock.powerflex_conn.storage_pool.set_use_rfcache.assert_called() - powerflex_module_mock.powerflex_conn.storage_pool.set_media_type.assert_called() + powerflex_module_mock.powerflex_conn.storage_pool.set_fragmentation_enabled.assert_called() + powerflex_module_mock.powerflex_conn.storage_pool.set_persistent_checksum.assert_called() + powerflex_module_mock.powerflex_conn.storage_pool.set_rebuild_rebalance_parallelism_limit.assert_called() + powerflex_module_mock.powerflex_conn.storage_pool.set_rmcache_write_handling_mode.assert_called() + powerflex_module_mock.powerflex_conn.storage_pool.rebalance_io_priority_policy.assert_called() + powerflex_module_mock.powerflex_conn.storage_pool.set_vtree_migration_io_priority_policy.assert_called() + powerflex_module_mock.powerflex_conn.storage_pool.set_protected_maintenance_mode_io_priority_policy.assert_called() + powerflex_module_mock.powerflex_conn.storage_pool.set_cap_alert_thresholds.assert_called() + powerflex_module_mock.powerflex_conn.storage_pool.set_zero_padding_policy.assert_called() + powerflex_module_mock.powerflex_conn.storage_pool.set_spare_percentage.assert_called() + powerflex_module_mock.powerflex_conn.storage_pool.set_rebuild_enabled.assert_called() + powerflex_module_mock.powerflex_conn.storage_pool.set_rebalance_enabled.assert_called() + powerflex_module_mock.powerflex_conn.storage_pool.set_rep_cap_max_ratio.assert_called() - def test_rename_storagepool_exception(self, powerflex_module_mock): + def test_delete_storagepool_exception(self, powerflex_module_mock): self.get_module_args.update({ "storage_pool_name": "test_pool", - "storage_pool_new_name": "new_ansible_pool", + "state": "absent" + }) + powerflex_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) + storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS + powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + return_value=storagepool_statistics_resp + ) + self.capture_fail_json_call( + MockStoragePoolApi.get_exception_response('delete_storage_pool'), + powerflex_module_mock, StoragePoolHandler) + + def test_modify_name_exception(self, powerflex_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool", + "protection_domain_name": "test_pd_1", + "storage_pool_new_name": "test_pool_new", "state": "present" }) powerflex_module_mock.module.params = self.get_module_args @@ -334,6 +308,9 @@ def test_rename_storagepool_exception(self, powerflex_module_mock): powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( return_value=storagepool_resp ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( return_value=storagepool_statistics_resp @@ -342,13 +319,14 @@ def test_rename_storagepool_exception(self, powerflex_module_mock): side_effect=MockApiException ) self.capture_fail_json_call( - MockStoragePoolApi.get_exception_response('rename_storage_pool'), - powerflex_module_mock, invoke_perform_module=True) + MockStoragePoolApi.get_exception_response('rename_pool'), + powerflex_module_mock, StoragePoolHandler) - def test_rename_storagepool_empty_exception(self, powerflex_module_mock): + def test_modify_rmcache_exception(self, powerflex_module_mock): self.get_module_args.update({ "storage_pool_name": "test_pool", - "storage_pool_new_name": " ", + "protection_domain_name": "test_pd_1", + "use_rmcahe": True, "state": "present" }) powerflex_module_mock.module.params = self.get_module_args @@ -356,28 +334,395 @@ def test_rename_storagepool_empty_exception(self, powerflex_module_mock): powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( return_value=storagepool_resp ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( return_value=storagepool_statistics_resp ) + powerflex_module_mock.powerflex_conn.storage_pool.set_use_rmcache = MagicMock( + side_effect=MockApiException + ) self.capture_fail_json_call( - MockStoragePoolApi.get_exception_response('rename_storage_pool_empty'), - powerflex_module_mock, invoke_perform_module=True) + MockStoragePoolApi.get_exception_response('modify_pool_rmcache'), + powerflex_module_mock, StoragePoolHandler) - def test_delete_storagepool_exception(self, powerflex_module_mock): + def test_modify_rfcache_exception(self, powerflex_module_mock): self.get_module_args.update({ "storage_pool_name": "test_pool", - "state": "absent" + "protection_domain_name": "test_pd_1", + "use_rfcahe": True, + "state": "present" }) powerflex_module_mock.module.params = self.get_module_args storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( return_value=storagepool_resp ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( return_value=storagepool_statistics_resp ) + powerflex_module_mock.powerflex_conn.storage_pool.set_use_rfcache = MagicMock( + side_effect=MockApiException + ) self.capture_fail_json_call( - MockStoragePoolApi.get_exception_response('delete_storage_pool'), - powerflex_module_mock, invoke_perform_module=True) + MockStoragePoolApi.get_exception_response('modify_pool_rfcache'), + powerflex_module_mock, StoragePoolHandler) + + def test_modify_enable_zero_padding_exception(self, powerflex_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool", + "protection_domain_name": "test_pd_1", + "enable_zero_padding": False, + "state": "present" + }) + powerflex_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) + storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS + powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + return_value=storagepool_statistics_resp + ) + powerflex_module_mock.powerflex_conn.storage_pool.set_zero_padding_policy = MagicMock( + side_effect=MockApiException + ) + self.capture_fail_json_call( + MockStoragePoolApi.get_exception_response('modify_pool_enable_zero_padding'), + powerflex_module_mock, StoragePoolHandler) + + def test_modify_rep_cap_max_ratio_exception(self, powerflex_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool", + "protection_domain_name": "test_pd_1", + "rep_cap_max_ratio": 10, + "state": "present" + }) + powerflex_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) + storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS + powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + return_value=storagepool_statistics_resp + ) + powerflex_module_mock.powerflex_conn.storage_pool.set_rep_cap_max_ratio = MagicMock( + side_effect=MockApiException + ) + self.capture_fail_json_call( + MockStoragePoolApi.get_exception_response('modify_pool_rep_cap_max_ratio'), + powerflex_module_mock, StoragePoolHandler) + + def test_modify_enable_rebalance_exception(self, powerflex_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool", + "protection_domain_name": "test_pd_1", + "enable_rebalance": False, + "state": "present" + }) + powerflex_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) + storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS + powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + return_value=storagepool_statistics_resp + ) + powerflex_module_mock.powerflex_conn.storage_pool.set_rebalance_enabled = MagicMock( + side_effect=MockApiException + ) + self.capture_fail_json_call( + MockStoragePoolApi.get_exception_response('modify_pool_enable_rebalance'), + powerflex_module_mock, StoragePoolHandler) + + def test_modify_enable_rebuild_exception(self, powerflex_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool", + "protection_domain_name": "test_pd_1", + "enable_rebuild": False, + "state": "present" + }) + powerflex_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) + storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS + powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + return_value=storagepool_statistics_resp + ) + powerflex_module_mock.powerflex_conn.storage_pool.set_rebuild_enabled = MagicMock( + side_effect=MockApiException + ) + self.capture_fail_json_call( + MockStoragePoolApi.get_exception_response('modify_pool_enable_rebuild'), + powerflex_module_mock, StoragePoolHandler) + + def test_modify_enable_fragmentation_exception(self, powerflex_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool", + "protection_domain_name": "test_pd_1", + "enable_fragmentaion": False, + "state": "present" + }) + powerflex_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) + storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS + powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + return_value=storagepool_statistics_resp + ) + powerflex_module_mock.powerflex_conn.storage_pool.set_fragmentation_enabled = MagicMock( + side_effect=MockApiException + ) + self.capture_fail_json_call( + MockStoragePoolApi.get_exception_response('modify_pool_enable_fragmentation'), + powerflex_module_mock, StoragePoolHandler) + + def test_modify_spare_percentage_exception(self, powerflex_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool", + "protection_domain_name": "test_pd_1", + "spare_percentage": 20, + "state": "present" + }) + powerflex_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) + storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS + powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + return_value=storagepool_statistics_resp + ) + powerflex_module_mock.powerflex_conn.storage_pool.set_spare_percentage = MagicMock( + side_effect=MockApiException + ) + self.capture_fail_json_call( + MockStoragePoolApi.get_exception_response('modify_pool_spare_percentage'), + powerflex_module_mock, StoragePoolHandler) + + def test_modify_rmcache_write_handling_mode_exception(self, powerflex_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool", + "protection_domain_name": "test_pd_1", + "rmcache_write_handling_mode": "Cached", + "state": "present" + }) + powerflex_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) + storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS + powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + return_value=storagepool_statistics_resp + ) + powerflex_module_mock.powerflex_conn.storage_pool.set_rmcache_write_handling_mode = MagicMock( + side_effect=MockApiException + ) + self.capture_fail_json_call( + MockStoragePoolApi.get_exception_response('modify_pool_rmcache_write_handling_mode'), + powerflex_module_mock, StoragePoolHandler) + + def test_modify_rebuild_rebalance_parallelism_limit_exception(self, powerflex_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool", + "protection_domain_name": "test_pd_1", + "parallel_rebuild_rebalance_limit": 4, + "state": "present" + }) + powerflex_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) + storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS + powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + return_value=storagepool_statistics_resp + ) + powerflex_module_mock.powerflex_conn.storage_pool.set_rebuild_rebalance_parallelism_limit = MagicMock( + side_effect=MockApiException + ) + self.capture_fail_json_call( + MockStoragePoolApi.get_exception_response('modify_pool_rebuild_rebalance_parallelism_limit'), + powerflex_module_mock, StoragePoolHandler) + + def test_modify_capacity_alert_thresholds_exception(self, powerflex_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool", + "protection_domain_name": "test_pd_1", + "capacity_alert_thresholds": { + "high_threshold": 60, + "critical_threshold": 70 + }, + "state": "present" + }) + powerflex_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) + storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS + powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + return_value=storagepool_statistics_resp + ) + powerflex_module_mock.powerflex_conn.storage_pool.set_capacity_alert_thresholds = MagicMock( + side_effect=MockApiException + ) + self.capture_fail_json_call( + MockStoragePoolApi.get_exception_response('modify_pool_capacity_alert_thresholds'), + powerflex_module_mock, StoragePoolHandler) + + def test_modify_protected_maintenance_mode_io_priority_policy_exception(self, powerflex_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool", + "protection_domain_name": "test_pd_1", + "protected_maintenance_mode_io_priority_policy": { + "policy": "unlimited", + "concurrent_ios_per_device": 1, + "bw_limit_per_device": 1024}, + "state": "present" + }) + powerflex_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) + storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS + powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + return_value=storagepool_statistics_resp + ) + powerflex_module_mock.powerflex_conn.storage_pool.set_protected_maintenance_mode_io_priority_policy = MagicMock( + side_effect=MockApiException + ) + self.capture_fail_json_call( + MockStoragePoolApi.get_exception_response('modify_pool_protected_maintenance_mode_io_priority_policy'), + powerflex_module_mock, StoragePoolHandler) + + def test_modify_vtree_migration_io_priority_policy_exception(self, powerflex_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool", + "protection_domain_name": "test_pd_1", + "vtree_migration_io_priority_policy": { + "policy": "favorAppIos", + "concurrent_ios_per_device": 1, + "bw_limit_per_device": 1024}, + "state": "present" + }) + powerflex_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) + storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS + powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + return_value=storagepool_statistics_resp + ) + powerflex_module_mock.powerflex_conn.storage_pool.set_vtree_migration_io_priority_policy = MagicMock( + side_effect=MockApiException + ) + self.capture_fail_json_call( + MockStoragePoolApi.get_exception_response('modify_pool_vtree_migration_io_priority_policy'), + powerflex_module_mock, StoragePoolHandler) + + def test_modify_rebalance_io_priority_policy_exception(self, powerflex_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool", + "protection_domain_name": "test_pd_1", + "rebalance_io_priority_policy": { + "policy": "favorAppIos", + "concurrent_ios_per_device": 1, + "bw_limit_per_device": 1024}, + "state": "present" + }) + powerflex_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) + storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS + powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + return_value=storagepool_statistics_resp + ) + powerflex_module_mock.powerflex_conn.storage_pool.rebalance_io_priority_policy = MagicMock( + side_effect=MockApiException + ) + self.capture_fail_json_call( + MockStoragePoolApi.get_exception_response('modify_pool_rebalance_io_priority_policy'), + powerflex_module_mock, StoragePoolHandler) + + def test_modify_persistent_checksum_exception(self, powerflex_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool", + "protection_domain_name": "test_pd_1", + "persistent_checksum": { + "enable": True, + "validate_on_read": True, + "builder_limit": 1024}, + "state": "present" + }) + powerflex_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + pd_resp = MockStoragePoolApi.PROTECTION_DOMAIN + powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock( + return_value=pd_resp['protectiondomain']) + storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS + powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + return_value=storagepool_statistics_resp + ) + powerflex_module_mock.powerflex_conn.storage_pool.set_persistent_checksum = MagicMock( + side_effect=MockApiException + ) + self.capture_fail_json_call( + MockStoragePoolApi.get_exception_response('modify_pool_persistent_checksum'), + powerflex_module_mock, StoragePoolHandler)