diff --git a/examples/tutorial/time_series/ts_classification/tmp.py b/examples/tutorial/time_series/ts_classification/tmp.py index 4b32134e0..a0c253bc1 100644 --- a/examples/tutorial/time_series/ts_classification/tmp.py +++ b/examples/tutorial/time_series/ts_classification/tmp.py @@ -78,6 +78,7 @@ def plot_mean_sample_multi(X, y, labels: list = [], n_channel: int = None): metric='accuracy', timeout=15, pop_size=20, + backend='dask', with_tunig=False, n_jobs=-1, logging_level=20) diff --git a/fedot_ind/api/main.py b/fedot_ind/api/main.py index 4f35773b7..235591e5b 100644 --- a/fedot_ind/api/main.py +++ b/fedot_ind/api/main.py @@ -74,7 +74,7 @@ def __init_solver(self): self.repo = IndustrialModels().setup_default_repository() self.config_dict['optimizer'] = None else: - self.repo = IndustrialModels().setup_repository() + self.repo = IndustrialModels().setup_repository(backend=self.api_controller.backend_method) self.logger.info(f'-------------------------------------------------') self.logger.info('Initialising Dask Server') self.config_dict['initial_assumption'] = self.config_dict['initial_assumption'].build() @@ -83,8 +83,6 @@ def __init_solver(self): self.logger.info(f'-------------------------------------------------') self.logger.info('Initialising solver') self.solver = Fedot(**self.config_dict) - # if self.api_controller.is_default_fedot_context: - # self.solver = self.api_controller._check_mutations(self.solver) def _process_input_data(self, input_data): train_data = deepcopy(input_data) # we do not want to make inplace changes diff --git a/fedot_ind/api/utils/api_init.py b/fedot_ind/api/utils/api_init.py index 4ad5845ff..f5fd5a266 100644 --- a/fedot_ind/api/utils/api_init.py +++ b/fedot_ind/api/utils/api_init.py @@ -2,16 +2,14 @@ from pathlib import Path from fedot.core.repository.tasks import TsForecastingParams -from golem.core.optimisers.adaptive.operator_agent import RandomAgent from pymonad.either import Either from fedot_ind.api.utils.industrial_strategy import IndustrialStrategy from fedot_ind.api.utils.path_lib import DEFAULT_PATH_RESULTS as default_path_to_save_results from fedot_ind.core.architecture.preprocessing.data_convertor import ApiConverter -from fedot_ind.core.architecture.settings.computational import BackendMethods from fedot_ind.core.optimizer.IndustrialEvoOptimizer import IndustrialEvoOptimizer from fedot_ind.core.repository.constanst_repository import \ - FEDOT_API_PARAMS, fedot_init_assumptions, FEDOT_MUTATION_STRATEGY + FEDOT_API_PARAMS, fedot_init_assumptions from fedot_ind.core.repository.model_repository import default_industrial_availiable_operation from fedot_ind.tools.explain.explain import PointExplainer, RecurrenceExplainer @@ -118,26 +116,11 @@ def __init_experiment_setup(self): self.logger.info('Initialising experiment setup') industrial_params = set(self.config_dict.keys()) - \ - set(FEDOT_API_PARAMS.keys()) + set(FEDOT_API_PARAMS.keys()) for param in industrial_params: self.config_dict.pop(param, None) - backend_method_current, backend_scipy_current = BackendMethods( - self.backend_method).backend - globals()['backend_methods'] = backend_method_current - globals()['backend_scipy'] = backend_scipy_current - - def _check_mutations(self, solver): - for mutation in solver.api_composer.params.optimizer_params.mutation_types.mutation_types: - try: - is_invalid = mutation.__name__.__contains__('resample') - except Exception: - is_invalid = mutation.name.__contains__('resample') - if is_invalid: - solver.api_composer.params.optimizer_params.mutation_types.mutation_types.remove(mutation) - - solver.api_composer.params.optimizer_params.adaptive_mutation_type = RandomAgent( - actions=solver.api_composer.params.optimizer_params.mutation_types, - probs=FEDOT_MUTATION_STRATEGY[ - 'params_mutation_strategy']) - return solver + # backend_method_current, backend_scipy_current = BackendMethods( + # self.backend_method).backend + # globals()['backend_methods'] = backend_method_current + # globals()['backend_scipy'] = backend_scipy_current diff --git a/fedot_ind/core/operation/decomposition/matrix_decomposition/column_sampling_decomposition.py b/fedot_ind/core/operation/decomposition/matrix_decomposition/column_sampling_decomposition.py index ae2055be6..e630933ed 100644 --- a/fedot_ind/core/operation/decomposition/matrix_decomposition/column_sampling_decomposition.py +++ b/fedot_ind/core/operation/decomposition/matrix_decomposition/column_sampling_decomposition.py @@ -5,6 +5,7 @@ from sklearn.random_projection import johnson_lindenstrauss_min_dim from fedot_ind.core.architecture.settings.computational import backend_methods as np +from fedot_ind.core.repository.model_repository import DEFAULT_SVD_SOLVER class CURDecomposition: @@ -59,7 +60,7 @@ def fit_transform(self, feature_tensor: np.ndarray, sampled_tensor = sampled_tensor[self.row_indices, :] else: # evaluate pseudoinverse for W - U^-1 - X, Sigma, y_T = np.linalg.svd(w, full_matrices=False) + X, Sigma, y_T = DEFAULT_SVD_SOLVER(w, full_matrices=False) Sigma_plus = np.linalg.pinv(np.diag(Sigma)) # aprox U using pseudoinverse u = y_T.T @ Sigma_plus @ Sigma_plus @ X.T @@ -135,46 +136,3 @@ def matrix_to_ts(matrix: np.ndarray) -> np.ndarray: ts[i:i + matrix.shape[1]] += matrix[i] return ts - -def get_random_sparse_matrix(size: tuple): - """Generate random sparse matrix with size = size""" - - matrix = np.zeros(size) - for i in range(size[0]): - for j in range(size[1]): - if np.random.rand() < 0.1: - matrix[i, j] = np.random.rand() - return matrix - - -if __name__ == '__main__': - from fedot_ind.tools.loader import DataLoader - - arr = np.array([[1, 1, 1, 0, 0], - [3, 3, 3, 0, 0], - [4, 4, 4, 0, 0], - [5, 5, 5, 0, 0], - [0, 0, 0, 4, 4], - [0, 0, 0, 5, 5], - [0, 0, 0, 2, 2]]) - - (X_train, y_train), (X_test, y_test) = DataLoader('Lightning7').load_data() - - # init_ts = train[0].iloc[0, :].values - # scaler = MinMaxScaler() - # scaler.fit(init_ts.reshape(-1, 1)) - # single_ts = scaler.transform(init_ts.reshape(-1, 1)).reshape(-1) - - cur = CURDecomposition(rank=20) - # M = cur.ts_to_matrix(single_ts, 30) - C, U, R = cur.fit_transform(X_train) - basis = cur.reconstruct_basis(C, U, R, X_train.shape[1]) - - # rec_ts = cur.matrix_to_ts(C @ U @ R) - # err = np.linalg.norm(single_ts - rec_ts) - - # plt.plot(init_ts, label='init_ts') - # plt.plot(scaler.inverse_transform(rec_ts.reshape(-1, 1)), label='rec_ts') - # plt.legend() - # plt.show() - _ = 1 diff --git a/fedot_ind/core/operation/decomposition/matrix_decomposition/dmd_decomposition.py b/fedot_ind/core/operation/decomposition/matrix_decomposition/dmd_decomposition.py index 27af8bb8f..a55903634 100644 --- a/fedot_ind/core/operation/decomposition/matrix_decomposition/dmd_decomposition.py +++ b/fedot_ind/core/operation/decomposition/matrix_decomposition/dmd_decomposition.py @@ -1,10 +1,9 @@ from fedot_ind.core.architecture.settings.computational import backend_methods as np -from numpy.linalg import svd - +from fedot_ind.core.repository.model_repository import DEFAULT_SVD_SOLVER, DEFAULT_QR_SOLVER def rq(A): n, m = A.shape - Q, R = np.linalg.qr(np.flipud(A).T, mode='complete') + Q, R = DEFAULT_QR_SOLVER(np.flipud(A).T, mode='complete') R = np.rot90(R.T, 2) Q = np.flipud(Q.T) if n > m: @@ -18,7 +17,7 @@ def tls(A, B): if A.shape[0] != B.shape[0]: raise ValueError('Matrices are not conformant.') R1 = np.hstack((A, B)) - U, S, V = np.linalg.svd(R1) + U, S, V = DEFAULT_SVD_SOLVER(R1) r = B.shape[1] R, Q = rq(V[:, r:]) Gamma = R[n:, n - r:] @@ -28,7 +27,7 @@ def tls(A, B): def exact_dmd_decompose(X, Y, rank): - Ux, Sx, Vx = svd(X) + Ux, Sx, Vx = DEFAULT_SVD_SOLVER(X) Ux = Ux[:, :rank] Sx = Sx[:rank] Sx = np.diag(Sx) @@ -46,14 +45,14 @@ def A(v): return np.dot(a=Ux, b=np.dot(a=Atilde, b=np.dot(a=Ux.T, b=v))) def orthogonal_dmd_decompose(X, Y, rank): - Ux, _, _ = svd(X) + Ux, _, _ = DEFAULT_SVD_SOLVER(X) Ux = Ux[:, :rank] # Project X (current state) and Y (future state) on leading components of X Yproj = Ux.T @ Y Xproj = Ux.T @ X # A_proj is constrained to be a unitary matrix and the minimization problem is argmin (A.T @ A = I) |Y-AX|_frob # The solution of A_proj is obtained by Schonemann A = Uyx,@ Vyx.T - Uyx, _, Vyx = svd(Yproj @ Xproj.T) + Uyx, _, Vyx = DEFAULT_SVD_SOLVER(Yproj @ Xproj.T) Aproj = Uyx @ Vyx.T def A(x): return np.dot(a=Ux, b=np.dot(a=Aproj, b=np.dot(a=Ux.T, b=x))) # Diagonalise unitary operator @@ -65,7 +64,7 @@ def A(x): return np.dot(a=Ux, b=np.dot(a=Aproj, b=np.dot(a=Ux.T, b=x))) def symmetric_decompose(X, Y, rank): - Ux, S, V = np.linalg.svd(X) + Ux, S, V = DEFAULT_SVD_SOLVER(X) C = np.dot(Ux.T, np.dot(Y, V)) C1 = C if rank is None: diff --git a/fedot_ind/core/operation/decomposition/matrix_decomposition/power_iteration_decomposition.py b/fedot_ind/core/operation/decomposition/matrix_decomposition/power_iteration_decomposition.py index 1846d3e80..7bb9a94dc 100644 --- a/fedot_ind/core/operation/decomposition/matrix_decomposition/power_iteration_decomposition.py +++ b/fedot_ind/core/operation/decomposition/matrix_decomposition/power_iteration_decomposition.py @@ -7,6 +7,7 @@ from fedot_ind.core.operation.filtration.channel_filtration import _detect_knee_point from fedot_ind.core.operation.transformation.regularization.spectrum import singular_value_hard_threshold, \ sv_to_explained_variance_ratio, eigencorr_matrix +from fedot_ind.core.repository.model_repository import DEFAULT_SVD_SOLVER, DEFAULT_QR_SOLVER class RSVDDecomposition: @@ -81,7 +82,7 @@ def rsvd(self, # thresholding if not approximation: # classic svd decomposition - Ut, St, Vt = np.linalg.svd(tensor, full_matrices=False) + Ut, St, Vt = DEFAULT_SVD_SOLVER(tensor, full_matrices=False) # Compute low rank. low_rank = self._spectrum_regularization(St, reg_type=reg_type) if regularized_rank is not None: @@ -110,14 +111,13 @@ def rsvd(self, AAT, self.poly_deg) @ tensor @ self.random_projection # Fourth step. Orthogonalization of the resulting "sampled" matrix # creates for us a basis of eigenvectors. - sampled_tensor_orto, _ = np.linalg.qr( - sampled_tensor, mode='reduced') + sampled_tensor_orto, _ = DEFAULT_QR_SOLVER(sampled_tensor, mode='reduced') # Fifth step. Project initial Gramm matrix on new basis obtained # from "sampled matrix". M = sampled_tensor_orto.T @ AAT @ sampled_tensor_orto # Six step. Classical svd decomposition with choosen type of # spectrum thresholding - Ut, St, Vt = np.linalg.svd(M, full_matrices=False) + Ut, St, Vt = DEFAULT_SVD_SOLVER(M, full_matrices=False) # Compute low rank. low_rank = self._spectrum_regularization(St, reg_type=reg_type) # Seven step. Compute matrix approximation and choose new low_rank @@ -127,6 +127,6 @@ def rsvd(self, # Eight step. Return matrix approximation. reconstr_tensor = self._compute_matrix_approximation( Ut, sampled_tensor_orto, tensor, regularized_rank) - U_, S_, V_ = np.linalg.svd(reconstr_tensor, full_matrices=False) + U_, S_, V_ = DEFAULT_SVD_SOLVER(reconstr_tensor, full_matrices=False) return [U_, S_, V_] diff --git a/fedot_ind/core/repository/constanst_repository.py b/fedot_ind/core/repository/constanst_repository.py index b7dd2859a..c660fb79a 100644 --- a/fedot_ind/core/repository/constanst_repository.py +++ b/fedot_ind/core/repository/constanst_repository.py @@ -1,4 +1,5 @@ import math +import pathlib from enum import Enum from multiprocessing import cpu_count @@ -16,6 +17,7 @@ minkowski from torch import nn +from fedot_ind.api.utils.path_lib import PROJECT_PATH from fedot_ind.core.metrics.metrics_implementation import calculate_classification_metric, calculate_regression_metric, \ calculate_forecasting_metric, calculate_detection_metric from fedot_ind.core.models.nn.network_modules.losses import CenterLoss, CenterPlusLoss, ExpWeightedLoss, FocalLoss, \ @@ -62,7 +64,7 @@ def beta_thr(beta): return 0.56 * np.power(beta, 3) - 0.95 * \ - np.power(beta, 2) + 1.82 * beta + 1.43 + np.power(beta, 2) + 1.82 * beta + 1.43 def get_default_industrial_model_params(model_name): @@ -147,6 +149,15 @@ class DataTypeConstant(Enum): TRAJECTORY_MATRIX = HankelMatrix +class PathConstant(Enum): + IND_DATA_OPERATION_PATH = pathlib.Path(PROJECT_PATH, 'fedot_ind', 'core', 'repository', 'data', + 'industrial_data_operation_repository.json') + DEFAULT_DATA_OPERATION_PATH = pathlib.Path('data_operation_repository.json') + IND_MODEL_OPERATION_PATH = pathlib.Path(PROJECT_PATH, 'fedot_ind', 'core', 'repository', 'data', + 'industrial_model_repository.json') + DEFAULT_MODEL_OPERATION_PATH = pathlib.Path('model_repository.json') + + class FeatureConstant(Enum): STAT_METHODS = { 'mean_': np.mean, @@ -781,6 +792,11 @@ class UnitTestConstant(Enum): MATRIX = DataTypeConstant.MATRIX.value TRAJECTORY_MATRIX = DataTypeConstant.TRAJECTORY_MATRIX.value +IND_MODEL_OPERATION_PATH = PathConstant.IND_MODEL_OPERATION_PATH.value +IND_DATA_OPERATION_PATH = PathConstant.IND_DATA_OPERATION_PATH.value +DEFAULT_DATA_OPERATION_PATH = PathConstant.DEFAULT_DATA_OPERATION_PATH.value +DEFAULT_MODEL_OPERATION_PATH = PathConstant.DEFAULT_MODEL_OPERATION_PATH.value + ENERGY_THR = ModelCompressionConstant.ENERGY_THR.value DECOMPOSE_MODE = ModelCompressionConstant.DECOMPOSE_MODE.value FORWARD_MODE = ModelCompressionConstant.FORWARD_MODE.value diff --git a/fedot_ind/core/repository/initializer_industrial_models.py b/fedot_ind/core/repository/initializer_industrial_models.py index 172cb5d27..5a62f4188 100644 --- a/fedot_ind/core/repository/initializer_industrial_models.py +++ b/fedot_ind/core/repository/initializer_industrial_models.py @@ -1,7 +1,4 @@ -import pathlib - import fedot.core.data.data_split as fedot_data_split -from fedot.api.api_utils.api_composer import ApiComposer from fedot.api.api_utils.api_params_repository import ApiParamsRepository from fedot.core.data.merge.data_merger import ImageDataMerger, TSDataMerger from fedot.core.operations.evaluation.operation_implementations.data_operations.topological.fast_topological_extractor \ @@ -14,7 +11,9 @@ from fedot.core.pipelines.verification import class_rules from fedot.core.repository.operation_types_repository import OperationTypesRepository -from fedot_ind.api.utils.path_lib import PROJECT_PATH +import fedot_ind.core.repository.model_repository as MODEL_REPO +from fedot_ind.core.repository.constanst_repository import IND_DATA_OPERATION_PATH, IND_MODEL_OPERATION_PATH, \ + DEFAULT_DATA_OPERATION_PATH, DEFAULT_MODEL_OPERATION_PATH from fedot_ind.core.repository.industrial_implementations.abstract import preprocess_industrial_predicts, \ transform_lagged_for_fit_industrial, transform_smoothing_industrial, transform_lagged_industrial, \ merge_industrial_predicts, merge_industrial_targets, build_industrial, postprocess_industrial_predicts, \ @@ -24,6 +23,8 @@ from fedot_ind.core.repository.industrial_implementations.optimisation import _get_default_industrial_mutations from fedot_ind.core.repository.industrial_implementations.optimisation import \ has_no_data_flow_conflicts_in_industrial_pipeline +from fedot_ind.core.repository.model_repository import SKLEARN_REG_MODELS, SKLEARN_CLF_MODELS, FEDOT_PREPROC_MODEL +from fedot_ind.core.repository.model_repository import overload_model_implementation from fedot_ind.core.tuning.search_space import get_industrial_search_space FEDOT_METHOD_TO_REPLACE = [(PipelineSearchSpace, "get_parameters_dict"), @@ -66,43 +67,33 @@ transform_lagged_for_fit_industrial, _check_and_correct_window_size_industrial, transform_smoothing_industrial] + DEFAULT_METHODS = [getattr(class_impl[0], class_impl[1]) for class_impl in FEDOT_METHOD_TO_REPLACE] +DEFAULT_MODELS_TO_REPLACE = [(MODEL_REPO, 'SKLEARN_REG_MODELS'), + (MODEL_REPO, 'SKLEARN_CLF_MODELS'), + (MODEL_REPO, 'FEDOT_PREPROC_MODEL')] class IndustrialModels: def __init__(self): - self.industrial_data_operation_path = pathlib.Path( - PROJECT_PATH, - 'fedot_ind', - 'core', - 'repository', - 'data', - 'industrial_data_operation_repository.json') - - self.base_data_operation_path = pathlib.Path( - 'data_operation_repository.json') - - self.industrial_model_path = pathlib.Path( - PROJECT_PATH, - 'fedot_ind', - 'core', - 'repository', - 'data', - 'industrial_model_repository.json') - - self.base_model_path = pathlib.Path('model_repository.json') + self.industrial_data_operation_path = IND_DATA_OPERATION_PATH + self.base_data_operation_path = IND_MODEL_OPERATION_PATH + self.industrial_model_path = DEFAULT_DATA_OPERATION_PATH + self.base_model_path = DEFAULT_MODEL_OPERATION_PATH - def _replace_operation(self, to_industrial=True): - if to_industrial: - method = INDUSTRIAL_REPLACE_METHODS - else: - method = DEFAULT_METHODS + def _replace_operation(self, to_industrial=True, backend: str = 'default'): + method = INDUSTRIAL_REPLACE_METHODS if to_industrial else DEFAULT_METHODS for class_impl, method_to_replace in zip(FEDOT_METHOD_TO_REPLACE, method): setattr(class_impl[0], class_impl[1], method_to_replace) + if backend.__contains__('dask'): + model_to_overload = [SKLEARN_REG_MODELS, SKLEARN_CLF_MODELS, FEDOT_PREPROC_MODEL] + overloaded_model = overload_model_implementation(model_to_overload, backend=backend) + for model_impl, new_backend_impl in zip(DEFAULT_MODELS_TO_REPLACE, overloaded_model): + setattr(model_impl[0], model_impl[1], new_backend_impl) - def setup_repository(self): + def setup_repository(self, backend: str = 'default'): OperationTypesRepository.__repository_dict__.update( {'data_operation': {'file': self.industrial_data_operation_path, 'initialized_repo': True, @@ -118,12 +109,12 @@ def setup_repository(self): OperationTypesRepository.assign_repo( 'model', self.industrial_model_path) # replace mutations - self._replace_operation(to_industrial=True) + self._replace_operation(to_industrial=True, backend=backend) class_rules.append(has_no_data_flow_conflicts_in_industrial_pipeline) return OperationTypesRepository - def setup_default_repository(self): + def setup_default_repository(self, backend: str = 'default'): """ Switching to fedot models. """ @@ -140,47 +131,6 @@ def setup_default_repository(self): 'initialized_repo': None, 'default_tags': []}}) OperationTypesRepository.assign_repo('model', self.base_model_path) - self._replace_operation(to_industrial=False) + self._replace_operation(to_industrial=False, backend=backend) return OperationTypesRepository - def __enter__(self): - """ - Switching to industrial models - """ - OperationTypesRepository.__repository_dict__.update( - {'data_operation': {'file': self.industrial_data_operation_path, - 'initialized_repo': True, - 'default_tags': []}}) - - OperationTypesRepository.assign_repo( - 'data_operation', self.industrial_data_operation_path) - - OperationTypesRepository.__repository_dict__.update( - {'model': {'file': self.industrial_model_path, - 'initialized_repo': True, - 'default_tags': []}}) - OperationTypesRepository.assign_repo( - 'model', self.industrial_model_path) - - setattr(PipelineSearchSpace, "get_parameters_dict", - get_industrial_search_space) - setattr(ApiComposer, "_get_default_mutations", - _get_default_industrial_mutations) - - def __exit__(self, exc_type, exc_val, exc_tb): - """ - Switching to fedot models. - """ - OperationTypesRepository.__repository_dict__.update( - {'data_operation': {'file': self.base_data_operation_path, - 'initialized_repo': None, - 'default_tags': [ - OperationTypesRepository.DEFAULT_DATA_OPERATION_TAGS]}}) - OperationTypesRepository.assign_repo( - 'data_operation', self.base_data_operation_path) - - OperationTypesRepository.__repository_dict__.update( - {'model': {'file': self.base_model_path, - 'initialized_repo': None, - 'default_tags': []}}) - OperationTypesRepository.assign_repo('model', self.base_model_path) diff --git a/fedot_ind/core/repository/model_repository.py b/fedot_ind/core/repository/model_repository.py index 3e9dcbf99..3e11e739d 100644 --- a/fedot_ind/core/repository/model_repository.py +++ b/fedot_ind/core/repository/model_repository.py @@ -2,6 +2,7 @@ from itertools import chain from dask_ml.decomposition import PCA as DaskKernelPCA +from dask_ml.decomposition import TruncatedSVD as DaskSVD from dask_ml.linear_model import LogisticRegression as DaskLogReg, LinearRegression as DaskLinReg from fedot.core.operations.evaluation.operation_implementations.data_operations.decompose import \ DecomposerClassImplementation @@ -200,9 +201,14 @@ class AtomizedModel(Enum): DASK_MODELS = {'logit': DaskLogReg, 'kernel_pca': DaskKernelPCA, - 'ridge': DaskLinReg, + 'ridge': DaskLinReg } + SOLVER_MODELS = {'np_svd_solver': np.linalg.svd, + 'np_qr_solver': np.linalg.qr, + 'dask_svd_solver': DaskSVD + } + def default_industrial_availiable_operation(problem: str = 'regression'): operation_dict = {'regression': SKLEARN_REG_MODELS.keys(), @@ -241,26 +247,32 @@ def default_industrial_availiable_operation(problem: str = 'regression'): return operations -def overload_model_implementation(list_of_model): +def overload_model_implementation(list_of_model, backend: str = 'default'): overload_list = [] for model_dict in list_of_model: for model_impl in model_dict.keys(): - if model_impl in DASK_MODELS.keys() and USE_DASK_MODEL_BACKEND: + if model_impl in DASK_MODELS.keys() and backend.__contains__('dask'): model_dict[model_impl] = DASK_MODELS[model_impl] overload_list.append(model_dict) return overload_list +MODELS_WITH_DASK_ALTERNATIVE = [ + AtomizedModel.FEDOT_PREPROC_MODEL.value, + AtomizedModel.SKLEARN_CLF_MODELS.value, + AtomizedModel.SKLEARN_REG_MODELS.value +] +DASK_MODELS = AtomizedModel.DASK_MODELS.value +SKLEARN_REG_MODELS, SKLEARN_CLF_MODELS, FEDOT_PREPROC_MODEL = overload_model_implementation( + MODELS_WITH_DASK_ALTERNATIVE) INDUSTRIAL_PREPROC_MODEL = AtomizedModel.INDUSTRIAL_PREPROC_MODEL.value INDUSTRIAL_CLF_PREPROC_MODEL = AtomizedModel.INDUSTRIAL_CLF_PREPROC_MODEL.value -FEDOT_PREPROC_MODEL = AtomizedModel.FEDOT_PREPROC_MODEL.value -SKLEARN_CLF_MODELS = AtomizedModel.SKLEARN_CLF_MODELS.value ANOMALY_DETECTION_MODELS = AtomizedModel.ANOMALY_DETECTION_MODELS.value -SKLEARN_REG_MODELS = AtomizedModel.SKLEARN_REG_MODELS.value NEURAL_MODEL = AtomizedModel.NEURAL_MODEL.value FORECASTING_MODELS = AtomizedModel.FORECASTING_MODELS.value FORECASTING_PREPROC = AtomizedModel.FORECASTING_PREPROC.value -DASK_MODELS = AtomizedModel.DASK_MODELS.value -MODELS_WITH_DASK_ALTERNATIVE = [SKLEARN_REG_MODELS, SKLEARN_CLF_MODELS, FEDOT_PREPROC_MODEL] -SKLEARN_REG_MODELS, SKLEARN_CLF_MODELS, FEDOT_PREPROC_MODEL = overload_model_implementation( - MODELS_WITH_DASK_ALTERNATIVE) + +SOLVER_MODELS = AtomizedModel.SOLVER_MODELS.value +DEFAULT_SVD_SOLVER = SOLVER_MODELS['np_svd_solver'] +DEFAULT_QR_SOLVER = SOLVER_MODELS['np_qr_solver'] +DASK_SVD_SOLVER = SOLVER_MODELS['dask_svd_solver'] diff --git a/tests/unit/core/operation/filtration/test_feature_space_reducer.py b/tests/unit/core/operation/filtration/test_feature_space_reducer.py index 5ff4b054a..c3f7a6edb 100644 --- a/tests/unit/core/operation/filtration/test_feature_space_reducer.py +++ b/tests/unit/core/operation/filtration/test_feature_space_reducer.py @@ -37,7 +37,7 @@ def test__drop_correlated_features(): features = get_features(add_stable=True) cls = FeatureSpaceReducer() result = cls._drop_correlated_features(corr_threshold=0.99, features=features) - assert result is None + assert result is not None def test__drop_stable_features():