From b3a3a81e2ca9b6c6b79ee48eb42f815992a68428 Mon Sep 17 00:00:00 2001 From: Sourcery AI <> Date: Tue, 2 Jan 2024 16:11:54 +0000 Subject: [PATCH] 'Refactored by Sourcery' --- bdgd_tools/core/Core.py | 28 +++++++++++++-------------- bdgd_tools/model/LineCode.py | 8 ++------ bdgd_tools/model/Load.py | 10 +++++----- bdgd_tools/model/Transformer.py | 10 +++++----- tests/errors_logs_generator.py | 34 ++++++++++++++++++--------------- 5 files changed, 44 insertions(+), 46 deletions(-) diff --git a/bdgd_tools/core/Core.py b/bdgd_tools/core/Core.py index d15c2b8..f22457b 100644 --- a/bdgd_tools/core/Core.py +++ b/bdgd_tools/core/Core.py @@ -170,9 +170,9 @@ def run_gui(folder_bdgd: str) -> None: def run(folder: Optional[str] = None, feeder: Optional[str] = None, all_feeders: Optional[bool] = None) -> None: - if feeder == None: + if feeder is None: all_feeders = True - + s = Sample() folder_bdgd = folder or s.mux_energia json_file_name = os.path.join(os.getcwd(), "bdgd2dss.json") @@ -180,25 +180,23 @@ def run(folder: Optional[str] = None, feeder: Optional[str] = None, all_feeders json_data = JsonData(json_file_name) geodataframes = json_data.create_geodataframes(folder_bdgd) - - + + for alimentador in geodataframes["CTMT"]['gdf']['COD_ID'].tolist(): - + if alimentador == feeder or all_feeders == True: - case = Case() - list_files_name = [] + case = Case() case.dfs = geodataframes - + case.id = alimentador - - case.circuitos, aux = Circuit.create_circuit_from_json(json_data.data, case.dfs['CTMT']['gdf'].query("COD_ID==@alimentador")) - list_files_name.append(aux) - + + case.circuitos, aux = Circuit.create_circuit_from_json(json_data.data, case.dfs['CTMT']['gdf'].query("COD_ID==@alimentador")) + list_files_name = [aux] case.line_codes, aux= LineCode.create_linecode_from_json(json_data.data, case.dfs['SEGCON']['gdf'], alimentador) list_files_name.append(aux) for entity in ['SSDMT', 'UNSEMT', 'SSDBT', 'UNSEBT', 'RAMLIG']: - + if not case.dfs[entity]['gdf'].query("CTMT == @alimentador").empty: case.lines_SSDMT, aux = Line.create_line_from_json(json_data.data, case.dfs[entity]['gdf'].query("CTMT==@alimentador"), entity) list_files_name.append(aux) @@ -210,10 +208,10 @@ def run(folder: Optional[str] = None, feeder: Optional[str] = None, all_feeders list_files_name.append(aux) else: print("No RegControls found for this feeder.\n") - + case.transformers, aux= Transformer.create_transformer_from_json(json_data.data, inner_entities_tables(case.dfs['EQTRMT']['gdf'], case.dfs['UNTRMT']['gdf'].query("CTMT==@alimentador"), left_column='UNI_TR_MT', right_column='COD_ID')) list_files_name.append(aux) - + case.load_shapes, aux = LoadShape.create_loadshape_from_json(json_data.data, case.dfs['CRVCRG']['gdf'], alimentador) list_files_name.append(aux) diff --git a/bdgd_tools/model/LineCode.py b/bdgd_tools/model/LineCode.py index 9393bea..778a372 100644 --- a/bdgd_tools/model/LineCode.py +++ b/bdgd_tools/model/LineCode.py @@ -203,14 +203,10 @@ def create_linecode_from_json(json_data: Any, dataframe: gpd.geodataframe.GeoDat for i in range(1, interactive['nphases'] + 1): linecode_.nphases = i LineCode.rename_linecode_string(linecode_, i, linecode_.pattern_string()) - linecodes.append(linecode_) - - else: - linecodes.append(linecode_) + linecodes.append(linecode_) - progress_bar.set_description(f"Processing Linecode {_ + 1}") - + file_name = create_output_file(linecodes, linecode_config["arquivo"], feeder=feeder ) return linecodes, file_name diff --git a/bdgd_tools/model/Load.py b/bdgd_tools/model/Load.py index 999a081..94b5de1 100644 --- a/bdgd_tools/model/Load.py +++ b/bdgd_tools/model/Load.py @@ -501,15 +501,15 @@ def _create_load_from_row(load_config, row, entity, id): for key, value in load_config.items(): - if key == "static": - load_._process_static(load_, value) + if key == "calculated": + load_._process_calculated(load_, value, row) + elif key == "direct_mapping": load_._process_direct_mapping(load_, value,row) elif key == "indirect_mapping": load_._process_indirect_mapping(load_, value,row) - elif key == "calculated": - load_._process_calculated(load_, value, row) - + elif key == "static": + load_._process_static(load_, value) return load_ @staticmethod diff --git a/bdgd_tools/model/Transformer.py b/bdgd_tools/model/Transformer.py index 64a822a..9af9698 100644 --- a/bdgd_tools/model/Transformer.py +++ b/bdgd_tools/model/Transformer.py @@ -430,15 +430,15 @@ def _create_transformer_from_row(transformer_config, row): transformer_ = Transformer() for key, value in transformer_config.items(): - if key == "static": - transformer_._process_static(transformer_, value) + if key == "calculated": + transformer_._process_calculated(transformer_, value, row) + elif key == "direct_mapping": transformer_._process_direct_mapping(transformer_, value,row) elif key == "indirect_mapping": transformer_._process_indirect_mapping(transformer_, value,row) - elif key == "calculated": - transformer_._process_calculated(transformer_, value, row) - + elif key == "static": + transformer_._process_static(transformer_, value) return transformer_ @staticmethod diff --git a/tests/errors_logs_generator.py b/tests/errors_logs_generator.py index 0b2ae5f..8276967 100644 --- a/tests/errors_logs_generator.py +++ b/tests/errors_logs_generator.py @@ -18,15 +18,15 @@ def log_errors_elements2(df_aneel, df_tools,arquivo="log_errors"): print(os.getcwd()) if not os.path.exists("logs_errors"): os.mkdir("logs_errors") - - output_directory = os.path.join(f'logs_errors') + + output_directory = os.path.join('logs_errors') path = os.path.join(output_directory, f'{arquivo}.csv') df_aneel = df_aneel.sort_values(by='name') df_aneel = df_aneel.reset_index(drop=True) - + df_tools = df_tools.sort_values(by='name') df_tools = df_tools.reset_index(drop=True) @@ -35,21 +35,21 @@ def log_errors_elements2(df_aneel, df_tools,arquivo="log_errors"): # Abre um arquivo CSV para escrita no caminho especificado with open(path, mode='w', newline='') as file: writer = csv.writer(file) - + # Escreve o cabeçalho no arquivo CSV writer.writerow(['Coluna', 'Nome', 'Valor Aneel', 'Valor Df']) for col_A in df_aneel.columns: if col_A == 'name': continue - + comparison_result = merged_df[f'{col_A}_df1'] == merged_df[f'{col_A}_df2'] - false_elements = comparison_result[comparison_result == False] + false_elements = comparison_result[not comparison_result] for index, value in false_elements.items(): # Adiciona as informações ao arquivo CSV writer.writerow([col_A, merged_df.loc[index, 'name'], merged_df.loc[index, f'{col_A}_df1'], merged_df.loc[index, f'{col_A}_df2']]) - + print(f'Arquivo {arquivo} gerado') @@ -73,7 +73,7 @@ def remove_middle_number(name): df_trafos = study.model.transformers_df df_loadshape = study.model.loadshapes_df -study = py_dss_tools.CreateStudy.power_flow(name="Test", dss_file=str(dss_file)) +study = py_dss_tools.CreateStudy.power_flow(name="Test", dss_file=dss_file) study.dss.text("solve") study.view.voltage_profile() resultado_nos = study.results.voltage_ln_nodes[0] @@ -90,7 +90,9 @@ def remove_middle_number(name): df_s_aneel = study_aneel.model.summary_df resultado_nos_aneel = study_aneel.results.voltage_ln_nodes[0] -study_aneel = py_dss_tools.CreateStudy.power_flow(name="Test", dss_file=str(dss_file_aneel)) +study_aneel = py_dss_tools.CreateStudy.power_flow( + name="Test", dss_file=dss_file_aneel +) study.dss.text("solve") study_aneel.view.voltage_profile() @@ -102,15 +104,17 @@ def remove_middle_number(name): log_errors_elements2(df_aneel_entidade, df_entidade, arquivo=f'{entidade}_logs_errors') except KeyError: print("there is no {entidade} element found")# Handle the KeyError exception here if needed - pass - -log_errors_elements2(df_aneel_trafos, df_trafos, arquivo=f'trafos_logs_errors') +log_errors_elements2(df_aneel_trafos, df_trafos, arquivo='trafos_logs_errors') -log_errors_elements2(df_aneel_loadshapes, df_loadshape, arquivo=f'loadshape_logs_errors') +log_errors_elements2( + df_aneel_loadshapes, df_loadshape, arquivo='loadshape_logs_errors' +) -log_errors_elements2(df_aneel_linecodes, df_linecodes, arquivo=f'linecodes_logs_errors') +log_errors_elements2( + df_aneel_linecodes, df_linecodes, arquivo='linecodes_logs_errors' +) df_loads['name'] = df_loads['name'].apply(remove_middle_number) -log_errors_elements2(df_aneel_loads, df_loads, arquivo=f'loads_logs_errors') \ No newline at end of file +log_errors_elements2(df_aneel_loads, df_loads, arquivo='loads_logs_errors') \ No newline at end of file