Skip to content

Commit

Permalink
Adding transformer element & RAMLIG and SSDBT entities (#22)
Browse files Browse the repository at this point in the history
  • Loading branch information
PauloRadatz authored Nov 7, 2023
2 parents ee74cd4 + c79db89 commit 7abff5e
Show file tree
Hide file tree
Showing 7 changed files with 600 additions and 35 deletions.
66 changes: 47 additions & 19 deletions bdgd2dss.json
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,9 @@
"npts": 24
},
"direct_mapping": {
"tipocc": "COD_ID",
"tipodia": "TIP_DIA",
"grupotensao": "GRU_TEN"
},
"indirect_mapping": {
},
Expand Down Expand Up @@ -179,15 +182,15 @@
"indirect_mapping": {
"phases": [
"FAS_CON",
"TFASCON"
],
"suffix_linecode": [
"FAS_CON",
"TFASCON"
"convert_tfascon_phases"
],
"bus_nodes": [
"FAS_CON",
"convert_tfascon_bus"
],
"suffix_linecode": [
"FAS_CON",
"convert_tfascon_quant_fios"
]
},
"calculated": {
Expand All @@ -209,7 +212,6 @@
"bus2": "PAC_2",
"bus3": "PAC_3",
"transformer": "COD_ID",
"ten_lin_se": "TEN_LIN_SE",
"kvas": "POT_NOM",
"tap": "TAP"
},
Expand All @@ -230,10 +232,18 @@
"FAS_CON_T",
"convert_tfascon_bus"
],
"kvs": [
"kv1": [
"TEN_PRI",
"convert_tten"
],
"kv2": [
"TEN_SEC",
"convert_tten"
],
"kv3": [
"TEN_TER",
"convert_tten"
],
"windings": [
"TIP_TRAFO",
"convert_ttranf_windings"
Expand All @@ -252,16 +262,14 @@
]
},
"calculated": {
"%loadloss": [
"PER_TOT",
"PER_FER",
"POT_NOM",
"calc_loadloss"
"loadloss": [
"(","PER_TOT","-",
"PER_FER",")","/",
"POT_NOM","/1000*100"
],
"%noloadloss": [
"PER_FER",
"POT_NOM",
"calc_noloadloss"
"noloadloss": [
"(","PER_FER","/",
"POT_NOM","/1000)","* 100"
]
}
},
Expand Down Expand Up @@ -592,14 +600,17 @@
"POT_NOM",
"PER_FER",
"PER_TOT",
"CTMT"
"CTMT",
"TIP_TRAFO"

],
"type": {
"COD_ID": "category",
"FAS_CON_P": "category",
"FAS_CON_S": "category",
"FAS_CON_T": "category",
"CTMT": "category"
"CTMT": "category",
"TIP_TRAFO": "category"
},
"ignore_geometry": "False"
},
Expand Down Expand Up @@ -665,7 +676,24 @@
"FAS_CON",
"PAC_1",
"PAC_2",
"TOP_CND",
"TIP_CND",
"COMP",
"CTMT"
],
"type": {
"COD_ID": "category",
"FAS_CON": "category",
"CTMT": "category"
},
"ignore_geometry": "False"
},
"RAMLIG": {
"columns": [
"COD_ID",
"FAS_CON",
"PAC_1",
"PAC_2",
"TIP_CND",
"COMP",
"CTMT"
],
Expand Down
2 changes: 2 additions & 0 deletions bdgd_tools/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@
from bdgd_tools.model.Circuit import *
from bdgd_tools.model.LineCode import *
from bdgd_tools.model.Line import *
from bdgd_tools.model.LoadShape import *
from bdgd_tools.model.Transformer import *
from bdgd_tools.model.Case import *
from bdgd_tools.core.Core import *
from bdgd_tools.gui.GUI import *
Expand Down
58 changes: 53 additions & 5 deletions bdgd_tools/core/Core.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@
* Date: 22/03/2023
* Time: 12:02
*
* Edited by: eniocc
* Date: 22/03/2023
* Time: 12:02
* Edited by: migueldcga
* Date: 05/11/2023
* Time: 00:24
"""
import inspect
import json
Expand All @@ -18,7 +18,7 @@

import geopandas as gpd

from bdgd_tools import Sample, Case, Circuit, LineCode, Line
from bdgd_tools import Sample, Case, Circuit, LineCode, Line, LoadShape, Transformer
from bdgd_tools.core.Utils import load_json
from bdgd_tools.gui.GUI import GUI

Expand Down Expand Up @@ -144,6 +144,38 @@ def get_caller_directory(caller_frame: inspect) -> pathlib.Path:
caller_file = inspect.getfile(caller_frame)
return pathlib.Path(caller_file).resolve().parent

def get_caller_directory(caller_frame: inspect) -> pathlib.Path:
"""
Returns the file directory that calls this function.
:param caller_frame: The frame that call the function.
:return: A Pathlib.path object representing the file directory that called this function.
"""
caller_file = inspect.getfile(caller_frame)
return pathlib.Path(caller_file).resolve().parent



def merge_entities_tables(dataframe1: gpd.geodataframe.GeoDataFrame,dataframe2: gpd.geodataframe.GeoDataFrame):
"""
Merge two GeoDataFrames of entities based on their indices and handle duplicated columns.
It's necessary when the element needs more of one table of the BDGD.
Parameters:
dataframe1 (gpd.geodataframe.GeoDataFrame): The first GeoDataFrame (entity table) to be merged.
dataframe2 (gpd.geodataframe.GeoDataFrame): The second GeoDataFrame (entity table) to be merged.
Returns:
gpd.geodataframe.GeoDataFrame: The merged GeoDataFrame with duplicated columns removed.
"""

merged_dfs= dataframe2.join(dataframe1, lsuffix='_left')
duplicated_columns = [col for col in merged_dfs.columns if '_left' in col]
merged_dfs.drop(columns=duplicated_columns, inplace=True)

return merged_dfs

def run_gui(folder_bdgd: str) -> None:
caller_frame = inspect.currentframe().f_back
Expand Down Expand Up @@ -177,6 +209,22 @@ def run(folder: Optional[str] = None) -> None:
for l_ in case.line_codes:
print(l_)

case.lines = Line.create_line_from_json(json_data.data, case.dfs['SSDMT']['gdf'])
case.lines = Line.create_line_from_json(json_data.data, case.dfs['SSDMT']['gdf'], "SSDMT")
case.lines.extend(Line.create_line_from_json(json_data.data, case.dfs['SSDBT']['gdf'], "SSDBT"))
case.lines.extend(Line.create_line_from_json(json_data.data, case.dfs['RAMLIG']['gdf'], "RAMLIG"))


for li_ in case.lines:
print(li_)



case.transformers = Transformer.create_transformer_from_json(json_data.data, merge_entities_tables(case.dfs['EQTRMT']['gdf'], case.dfs['UNTRMT']['gdf']))

for tr_ in case.transformers:
print(tr_)


# case.load_shapes = LoadShape.create_loadshape_from_json(json_data.data, case.dfs['CRVCRG']['gdf'])
# for ls_ in case.load_shapes:
# print(ls_)
26 changes: 25 additions & 1 deletion bdgd_tools/model/Case.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,16 @@
* Time: 10:05
"""
from dataclasses import dataclass, field
from bdgd_tools import Circuit, LineCode, Line
from bdgd_tools import Circuit, LineCode, Line, LoadShape, Transformer


@dataclass
class Case:
_circuitos: list[Circuit] = field(init=False)
_line_codes: list[LineCode] = field(init=False)
_lines: list[Line] = field(init=False)
_load_shapes: list[LoadShape] = field(init=False)
_transformers: list[Transformer] = field(init=False)
_dfs: dict = field(init=False)

@property
Expand All @@ -44,6 +46,22 @@ def lines(self):
def lines(self, value):
self._lines = value

@property
def load_shapes(self):
return self._load_shapes

@load_shapes.setter
def load_shapes(self, value):
self._load_shapes = value

@property
def transformers(self):
return self._transformers

@transformers.setter
def transformers(self, value):
self._transformers = value

@property
def dfs(self):
return self._dfs
Expand All @@ -61,3 +79,9 @@ def line_code_names(self):

def line_name(self):
return [l.line for l in self.lines]

def load_shape_names(self):
return [ls.load_shape for ls in self.load_shapes]

def transformers_names(self):
return [tr.transformer for tr in self.transformers]
44 changes: 42 additions & 2 deletions bdgd_tools/model/Converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
"""
import json
import pathlib

import numpy as np

def convert_tten(case):
switch_dict = {
Expand Down Expand Up @@ -122,7 +122,7 @@ def convert_tten(case):
return switch_dict.get(case, 'Invalid case')


def process_loadshape(loadshape_list):
def process_loadshape2(loadshape_list):
"""
Process a list of 96 floating point numbers and return a list of 24
floating point numbers. The function computes the mean of every four
Expand All @@ -148,6 +148,44 @@ def process_loadshape(loadshape_list):
min_ = min(medias)
return [(x - min_) / (max_ - min_) for x in medias]

def process_loadshape(loadshape_list):
"""
Process a list of 96 floating point numbers and return a list of 24
floating point numbers. The function computes the mean of every four
numbers in the input list and normalizes the resulting list between 0
and 1.
Parameters
----------
loadshape_list : list of float
The input list containing 96 floating point numbers.
Returns
-------
list of float
A list containing 24 floating point numbers, which are the mean of
every four numbers in the input list, normalized between 0 and 1.
"""

medias = [sum(loadshape_list[i:i + 4]) / 4 for i in range(0, len(loadshape_list), 4)]



# Calculate the minimum and maximum values in the array
min_value = min(medias)
max_value = max(medias)

# Check if the range is zero
if max_value - min_value == 0:
# Handle the case when the range is zero (all values are the same)
return [0.5 for _ in medias] # Set all values to 0.5 (midpoint)

else:
# Normalize the array to the range [0, 1]
return (medias - min_value) / (max_value - min_value)



def convert_tfascon_bus(case):
switch_dict = {
Expand Down Expand Up @@ -268,6 +306,8 @@ def convert_tfascon_conn(case):
'B': 'Delta',
'C': 'Delta',
'N': 'Wye',
'0':'',
' ':'',
0: ''
}
return switch_dict.get(case, 'Invalid case')
Expand Down
Loading

0 comments on commit 7abff5e

Please sign in to comment.