Skip to content

Commit

Permalink
first functional version (Sourcery refactored) (#27)
Browse files Browse the repository at this point in the history
Pull Request #26 refactored by [Sourcery](https://sourcery.ai/github/).

Since the original Pull Request was opened as a fork in a contributor's
repository, we are unable to create a Pull Request branching from it.

To incorporate these changes, you can either:

1. Merge this Pull Request instead of the original, or

2. Ask your contributor to locally incorporate these commits and push
them to
the original Pull Request

   <details>
   <summary>Incorporate changes via command line</summary>
   <pre>
   git fetch https://github.com/eniocc/bdgd-tools pull/26/head
   git merge --ff-only FETCH_HEAD
   git push</pre>
   </details>

**NOTE**: As code is pushed to the original Pull Request, Sourcery will
re-run and update (force-push) this Pull Request with new refactorings
as
necessary. If Sourcery finds no refactorings at any point, this Pull
Request
will be closed automatically.

See our documentation
[here](https://docs.sourcery.ai/GitHub/Using-Sourcery-for-GitHub/).

<details>
<summary>Run Sourcery locally</summary>
<p>
Reduce the feedback loop during development by using the Sourcery editor
plugin:
</p>
<ul>
<li><a href="https://sourcery.ai/download/?editor=vscode">VS
Code</a></li>
<li><a
href="https://sourcery.ai/download/?editor=pycharm">PyCharm</a></li>
</ul>
</details>

Help us
[improve](https://research.typeform.com/to/j06Spdfr?type=fork_refactor&github_login=migueldcga&base_repo=https%3A%2F%2Fgithub.com%2Feniocc%2Fbdgd-tools.git&base_remote_ref=refs%2Fpull%2F26%2Fhead&base_ref=main&base_sha=ba609709f556e2756e8bae3db50baf6422a3ca95&head_repo=https%3A%2F%2Fgithub.com%2Feniocc%2Fbdgd-tools.git&head_ref=sourcery%2Fpull-26&base_pr_number=26&base_pr_state=open)
this pull request!
  • Loading branch information
PauloRadatz authored Feb 13, 2024
2 parents 96b7627 + b3a3a81 commit 396cc34
Show file tree
Hide file tree
Showing 5 changed files with 44 additions and 46 deletions.
28 changes: 13 additions & 15 deletions bdgd_tools/core/Core.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,35 +170,33 @@ def run_gui(folder_bdgd: str) -> None:

def run(folder: Optional[str] = None, feeder: Optional[str] = None, all_feeders: Optional[bool] = None) -> None:

if feeder == None:
if feeder is None:
all_feeders = True

s = Sample()
folder_bdgd = folder or s.mux_energia
json_file_name = os.path.join(os.getcwd(), "bdgd2dss.json")

json_data = JsonData(json_file_name)

geodataframes = json_data.create_geodataframes(folder_bdgd)


for alimentador in geodataframes["CTMT"]['gdf']['COD_ID'].tolist():

if alimentador == feeder or all_feeders == True:
case = Case()
list_files_name = []
case = Case()
case.dfs = geodataframes

case.id = alimentador

case.circuitos, aux = Circuit.create_circuit_from_json(json_data.data, case.dfs['CTMT']['gdf'].query("COD_ID==@alimentador"))
list_files_name.append(aux)


case.circuitos, aux = Circuit.create_circuit_from_json(json_data.data, case.dfs['CTMT']['gdf'].query("COD_ID==@alimentador"))
list_files_name = [aux]
case.line_codes, aux= LineCode.create_linecode_from_json(json_data.data, case.dfs['SEGCON']['gdf'], alimentador)
list_files_name.append(aux)

for entity in ['SSDMT', 'UNSEMT', 'SSDBT', 'UNSEBT', 'RAMLIG']:

if not case.dfs[entity]['gdf'].query("CTMT == @alimentador").empty:
case.lines_SSDMT, aux = Line.create_line_from_json(json_data.data, case.dfs[entity]['gdf'].query("CTMT==@alimentador"), entity)
list_files_name.append(aux)
Expand All @@ -210,10 +208,10 @@ def run(folder: Optional[str] = None, feeder: Optional[str] = None, all_feeders
list_files_name.append(aux)
else:
print("No RegControls found for this feeder.\n")

case.transformers, aux= Transformer.create_transformer_from_json(json_data.data, inner_entities_tables(case.dfs['EQTRMT']['gdf'], case.dfs['UNTRMT']['gdf'].query("CTMT==@alimentador"), left_column='UNI_TR_MT', right_column='COD_ID'))
list_files_name.append(aux)

case.load_shapes, aux = LoadShape.create_loadshape_from_json(json_data.data, case.dfs['CRVCRG']['gdf'], alimentador)
list_files_name.append(aux)

Expand Down
8 changes: 2 additions & 6 deletions bdgd_tools/model/LineCode.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,14 +203,10 @@ def create_linecode_from_json(json_data: Any, dataframe: gpd.geodataframe.GeoDat
for i in range(1, interactive['nphases'] + 1):
linecode_.nphases = i
LineCode.rename_linecode_string(linecode_, i, linecode_.pattern_string())
linecodes.append(linecode_)

else:
linecodes.append(linecode_)
linecodes.append(linecode_)


progress_bar.set_description(f"Processing Linecode {_ + 1}")

file_name = create_output_file(linecodes, linecode_config["arquivo"], feeder=feeder )

return linecodes, file_name
10 changes: 5 additions & 5 deletions bdgd_tools/model/Load.py
Original file line number Diff line number Diff line change
Expand Up @@ -501,15 +501,15 @@ def _create_load_from_row(load_config, row, entity, id):


for key, value in load_config.items():
if key == "static":
load_._process_static(load_, value)
if key == "calculated":
load_._process_calculated(load_, value, row)

elif key == "direct_mapping":
load_._process_direct_mapping(load_, value,row)
elif key == "indirect_mapping":
load_._process_indirect_mapping(load_, value,row)
elif key == "calculated":
load_._process_calculated(load_, value, row)

elif key == "static":
load_._process_static(load_, value)
return load_

@staticmethod
Expand Down
10 changes: 5 additions & 5 deletions bdgd_tools/model/Transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -430,15 +430,15 @@ def _create_transformer_from_row(transformer_config, row):
transformer_ = Transformer()

for key, value in transformer_config.items():
if key == "static":
transformer_._process_static(transformer_, value)
if key == "calculated":
transformer_._process_calculated(transformer_, value, row)

elif key == "direct_mapping":
transformer_._process_direct_mapping(transformer_, value,row)
elif key == "indirect_mapping":
transformer_._process_indirect_mapping(transformer_, value,row)
elif key == "calculated":
transformer_._process_calculated(transformer_, value, row)

elif key == "static":
transformer_._process_static(transformer_, value)
return transformer_

@staticmethod
Expand Down
34 changes: 19 additions & 15 deletions tests/errors_logs_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,15 @@ def log_errors_elements2(df_aneel, df_tools,arquivo="log_errors"):
print(os.getcwd())
if not os.path.exists("logs_errors"):
os.mkdir("logs_errors")
output_directory = os.path.join(f'logs_errors')

output_directory = os.path.join('logs_errors')
path = os.path.join(output_directory, f'{arquivo}.csv')



df_aneel = df_aneel.sort_values(by='name')
df_aneel = df_aneel.reset_index(drop=True)

df_tools = df_tools.sort_values(by='name')
df_tools = df_tools.reset_index(drop=True)

Expand All @@ -35,21 +35,21 @@ def log_errors_elements2(df_aneel, df_tools,arquivo="log_errors"):
# Abre um arquivo CSV para escrita no caminho especificado
with open(path, mode='w', newline='') as file:
writer = csv.writer(file)

# Escreve o cabeçalho no arquivo CSV
writer.writerow(['Coluna', 'Nome', 'Valor Aneel', 'Valor Df'])

for col_A in df_aneel.columns:
if col_A == 'name':
continue

comparison_result = merged_df[f'{col_A}_df1'] == merged_df[f'{col_A}_df2']
false_elements = comparison_result[comparison_result == False]
false_elements = comparison_result[not comparison_result]

for index, value in false_elements.items():
# Adiciona as informações ao arquivo CSV
writer.writerow([col_A, merged_df.loc[index, 'name'], merged_df.loc[index, f'{col_A}_df1'], merged_df.loc[index, f'{col_A}_df2']])

print(f'Arquivo {arquivo} gerado')


Expand All @@ -73,7 +73,7 @@ def remove_middle_number(name):

df_trafos = study.model.transformers_df
df_loadshape = study.model.loadshapes_df
study = py_dss_tools.CreateStudy.power_flow(name="Test", dss_file=str(dss_file))
study = py_dss_tools.CreateStudy.power_flow(name="Test", dss_file=dss_file)
study.dss.text("solve")
study.view.voltage_profile()
resultado_nos = study.results.voltage_ln_nodes[0]
Expand All @@ -90,7 +90,9 @@ def remove_middle_number(name):
df_s_aneel = study_aneel.model.summary_df
resultado_nos_aneel = study_aneel.results.voltage_ln_nodes[0]

study_aneel = py_dss_tools.CreateStudy.power_flow(name="Test", dss_file=str(dss_file_aneel))
study_aneel = py_dss_tools.CreateStudy.power_flow(
name="Test", dss_file=dss_file_aneel
)
study.dss.text("solve")
study_aneel.view.voltage_profile()

Expand All @@ -102,15 +104,17 @@ def remove_middle_number(name):
log_errors_elements2(df_aneel_entidade, df_entidade, arquivo=f'{entidade}_logs_errors')
except KeyError:
print("there is no {entidade} element found")# Handle the KeyError exception here if needed
pass

log_errors_elements2(df_aneel_trafos, df_trafos, arquivo=f'trafos_logs_errors')
log_errors_elements2(df_aneel_trafos, df_trafos, arquivo='trafos_logs_errors')

log_errors_elements2(df_aneel_loadshapes, df_loadshape, arquivo=f'loadshape_logs_errors')
log_errors_elements2(
df_aneel_loadshapes, df_loadshape, arquivo='loadshape_logs_errors'
)

log_errors_elements2(df_aneel_linecodes, df_linecodes, arquivo=f'linecodes_logs_errors')
log_errors_elements2(
df_aneel_linecodes, df_linecodes, arquivo='linecodes_logs_errors'
)

df_loads['name'] = df_loads['name'].apply(remove_middle_number)


log_errors_elements2(df_aneel_loads, df_loads, arquivo=f'loads_logs_errors')
log_errors_elements2(df_aneel_loads, df_loads, arquivo='loads_logs_errors')

0 comments on commit 396cc34

Please sign in to comment.