Skip to content

Commit

Permalink
fix: formatting
Browse files Browse the repository at this point in the history
  • Loading branch information
[email protected] committed Aug 15, 2023
1 parent 18082f6 commit 241994e
Show file tree
Hide file tree
Showing 9 changed files with 104 additions and 142 deletions.
9 changes: 4 additions & 5 deletions perun/backend/nvml.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,10 +66,11 @@ def func() -> np.number:
return np.uint32(pynvml.nvmlDeviceGetPowerUsage(handle))

return func

def getUsedMemCallback(handle) -> Callable[[], np.number]:
def func() -> np.number:
return np.uint64(pynvml.nvmlDeviceGetMemoryInfo(handle).used)

return func

devices = []
Expand Down Expand Up @@ -125,12 +126,10 @@ def func() -> np.number:
device_type,
device_metadata,
data_type,
getUsedMemCallback(handle)
getUsedMemCallback(handle),
)
)




except NVMLError as e:
log.warning(f"Could not find device {deviceId}")
log.warning(e)
Expand Down
26 changes: 11 additions & 15 deletions perun/comm.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,31 +24,27 @@ def __init__(self):
log.warning(e)

def Get_rank(self) -> int:
"""Return MPI rank.
"""Get local MPI rank.
Returns:
int: MPI Rank
Returns
-------
int
MPI Rank
"""
return self._comm.Get_rank() if self._enabled else self._rank

def Get_size(self) -> int:
"""Return MPI world size.
"""MPI World size.
Returns:
int: MPI world size
Returns
-------
int
World Size
"""
return self._comm.Get_size() if self._enabled else self._size

def gather(self, obj: Any, root: int = 0) -> Optional[List[Any]]:
"""MPI gather operation at selected rank.
Args:
obj (Any): Object to be gathererd.
root (int, optional): Rank to gather information at. Defaults to 0.
Returns:
Optional[List[Any]]: List with objects from all the ranks.
"""
"""MPI Gather operation."""
return self._comm.gather(obj, root=root) if self._enabled else [obj]

def allgather(self, obj: Any) -> List[Any]:
Expand Down
56 changes: 37 additions & 19 deletions perun/coordination.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,17 @@
def getHostRankDict(comm: Comm, hostname: str) -> Dict[str, List[int]]:
"""Return a dictionary with all the host names with each MPI rank in them.
Args:
comm (Comm): MPI Communicator
Returns:
Dict[str, List[int]]: Dictionary with key hostnames and mpi ranks as values.
Parameters
----------
comm : Comm
MPI Communicator
hostname : str
Local rank Hostname
Returns
-------
Dict[str, List[int]]
Global host and mpi ranks dictionary.
"""
rank = comm.Get_rank()

Expand All @@ -34,12 +40,19 @@ def getGlobalSensorRankConfiguration(
) -> List[Dict[str, Set[str]]]:
"""Gather available sensor information from every MPI rank and assign/unassign sensors to each rank to avoid over sampling.
Args:
comm (Comm): MPI Communicator
backends (List[Backend]): List of available backends in the current rank.
Returns:
Tuple[Dict[str, List[int]], List[Dict[str, Set[str]]]]: Global rank and sensor configuration objects.
Parameters
----------
comm : Comm
MPI Communicator
backends : Dict[str, Backend]
Backend dictionary
globalHostRanks : Dict[str, List[int]]
Mapping from host to MPI ranks
Returns
-------
List[Dict[str, Set[str]]]
List with apointed backend and sensors for each MPI rank.
"""
visibleSensorsByBackend: Dict[str, Set[str]] = {
backend.name: backend.visibleSensors() for backend in backends.values()
Expand All @@ -55,14 +68,19 @@ def getGlobalSensorRankConfiguration(
def assignSensors(
hostBackends: List[Dict[str, Set[str]]], hostNames: Dict[str, List[int]]
) -> List[Dict[str, Set[str]]]:
"""Assign found devices to the lowest rank in each host.
Args:
hostSensors (List[Set[str]]): List with lenght of the mpi world size, with each index containing the devices of each rank.
hostNames (List[str]): Hostname of the mpi rank at the index.
Returns:
List[Set[str]]: New list with the devices assiged to each rank.
"""Assings each mpi rank a sensor based on available backends and Host to rank mapping.
Parameters
----------
hostBackends : List[Dict[str, Set[str]]]
List with global backends
hostNames : Dict[str, List[int]]
Host to MPI Rank mapping
Returns
-------
List[Dict[str, Set[str]]]
List with apointed backend and sensors for each MPI rank.
"""
for host, ranks in hostNames.items():
firstRank = ranks[0]
Expand Down
1 change: 0 additions & 1 deletion perun/extras/__init__.py

This file was deleted.

58 changes: 0 additions & 58 deletions perun/extras/horeka.py

This file was deleted.

28 changes: 18 additions & 10 deletions perun/io/pickle.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,24 +5,32 @@


def exportPickle(dataNode: DataNode) -> bytes:
"""Pickle data node.
"""Export data node to pickle file.
Args:
dataNode (DataNode): DataNode to be pickled.
Parameters
----------
dataNode : DataNode
Data Node
Returns:
bytes: Pickled DataNode
Returns
-------
bytes
Binary data to write to file.
"""
return pickle.dumps(dataNode)


def importPickle(pickleData: bytes) -> DataNode:
"""Unpickle DataNode.
"""Import DataNode from pickled data file.
Args:
pickleData (bytes): Pickled DataNode
Parameters
----------
pickleData : bytes
Binary Data
Returns:
DataNode: Unpickled DataNode
Returns
-------
DataNode
DataNode
"""
return pickle.loads(pickleData)
15 changes: 11 additions & 4 deletions perun/logging.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,18 @@
import logging.config


def init_logging(level: str):
"""Initialize default stdout logger.
def init_logging(level: str) -> logging.Logger:
"""Initialize logging object.
Args:
level (str, optional): Desired log level. Defaults to "DEBUG".
Parameters
----------
level : str
Logging level
Returns
-------
Logger
Logger object
"""
logConfig = {
"version": 1,
Expand Down
44 changes: 22 additions & 22 deletions perun/processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ def processPowerData(
return energy_J, avg_power_W


def processSensorData( sensorData: DataNode ) -> DataNode:
def processSensorData(sensorData: DataNode) -> DataNode:
"""Calculate metrics based on raw values.
Parameters
Expand Down Expand Up @@ -268,7 +268,7 @@ def processSensorData( sensorData: DataNode ) -> DataNode:
)
elif rawData.v_md.unit == Unit.BYTE:
bytes_v = rawData.values

if sensorData.deviceType == DeviceType.NET:
if "READ" in sensorData.id:
metricType = MetricType.NET_READ
Expand Down Expand Up @@ -296,12 +296,8 @@ def processSensorData( sensorData: DataNode ) -> DataNode:
result = bytes_v.mean()
aggType = AggregateType.SUM


sensorData.metrics[metricType] = Metric(
metricType,
result.astype(rawData.v_md.dtype),
rawData.v_md,
aggType
metricType, result.astype(rawData.v_md.dtype), rawData.v_md, aggType
)

sensorData.processed = True
Expand Down Expand Up @@ -440,38 +436,42 @@ def processRegionsWithSensorData(regions: List[Region], dataNode: DataNode):
events[i * 2 + 1],
)
power[region_idx][rank][i] += power_W
elif measuring_unit == Unit.PERCENT and deviceNode.deviceType == DeviceType.CPU:
elif (
measuring_unit == Unit.PERCENT
and deviceNode.deviceType == DeviceType.CPU
):
_, values = getInterpolatedValues(
raw_data.timesteps.astype(
"float32"
),
raw_data.timesteps.astype("float32"),
raw_data.values,
events[i * 2],
events[i * 2 + 1],
)
cpu_util[region_idx][rank][
i
] += np.mean(values)
elif measuring_unit == Unit.BYTE and deviceNode.deviceType == DeviceType.GPU:
cpu_util[region_idx][rank][i] += np.mean(
values
)
elif (
measuring_unit == Unit.BYTE
and deviceNode.deviceType == DeviceType.GPU
):
_, values = getInterpolatedValues(
raw_data.timesteps.astype(
"float32"
),
raw_data.timesteps.astype("float32"),
raw_data.values,
events[i * 2],
events[i * 2 + 1],
)
gpu_util[region_idx][rank][i] += (np.mean(values) * 100/ raw_data.v_md.max).astype("float32")
gpu_util[region_idx][rank][i] += (
np.mean(values)
* 100
/ raw_data.v_md.max
).astype("float32")
gpu_count[region_idx][rank][i] += 1



for region_idx, region in enumerate(regions):
r_power = np.array(list(chain(*power[region_idx])))
r_cpu_util = np.array(list(chain(*cpu_util[region_idx])))
r_gpu_util = np.array(list(chain(*gpu_util[region_idx])))
r_gpu_count = np.array(list(chain(*gpu_count[region_idx])))

r_gpu_util /= r_gpu_count

region.cpu_util = Stats(
Expand Down
9 changes: 1 addition & 8 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -51,13 +51,6 @@ optional = true
[tool.poetry.group.mpi.dependencies]
mpi4py = "^3.1"


# [tool.poetry.group.horeka]
# optional = true
#
# [tool.poetry.group.horeka.dependencies]
# influxdb-client = "*"

[tool.semantic_release]
version_variable = [ "perun/__init__.py:__version__" ]
version_toml = ["pyproject.toml:tool.poetry.version"]
Expand All @@ -73,7 +66,7 @@ testpaths = ["tests"]

[tool.isort]
skip = ["perun/__init__.py"]
known_third_party = ["click", "cpuinfo", "h5py", "influxdb_client", "numpy", "pandas", "psutil", "pynvml", "pytest"]
known_third_party = ["click", "cpuinfo", "h5py", "numpy", "pandas", "psutil", "pynvml", "pytest"]
profile = "black"

[tool.pydocstyle]
Expand Down

0 comments on commit 241994e

Please sign in to comment.