Skip to content

Commit

Permalink
Merge pull request #266 from sthaha/pyproject-toml
Browse files Browse the repository at this point in the history
chore(project): add pyproject.toml
  • Loading branch information
sthaha authored Jul 3, 2024
2 parents ce97df2 + 8132df5 commit c8db10f
Show file tree
Hide file tree
Showing 4 changed files with 174 additions and 59 deletions.
99 changes: 99 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"

[project]
name = "kepler-model-server"
dynamic = ["version"]
description = "kepler model server for serving kepler models"
readme = "README.md"
requires-python = ">= 3.8"
license = "Apache-2.0"
keywords = [
"kepler", "models",
"model-server", "estimator"
]

authors = [
{ name = "Sunyanan Choochotkaew", email = "[email protected]" },
{ name = "Sunil Thaha", email = "[email protected]" },
]

classifiers = [
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
]
dependencies = [
"flask==2.1.2",
"Werkzeug==2.2.2",
"protobuf==3.19.4",
"pandas==1.4.4",
"numpy==1.22.4",
"prometheus-client==0.14.1",
"prometheus-api-client==0.5.1",
"joblib==1.2.0",
"pyyaml_env_tag==0.1",
"scipy==1.9.1",
"xgboost==2.0.1",
"scikit-learn==1.1.2",
"py-cpuinfo==9.0.0",
"seaborn==0.12.2",
"psutil==5.9.8",
"pyudev==0.24.1",
]

[project.scripts]
model-server = "server.model_server:run"
estimator = "estimate.estimator:run"

[project.urls]
Documentation = "https://github.com/sustainable-computing-io/kepler-model-server#readme"
Issues = "https://github.com/sustainable-computing-io/kepler-model-server/issues"
Source = "https://github.com/sustainable-computing-io/kepler-model-server"

[tool.hatch.version]
path = "src/__about__.py"

[tool.hatch.build.targets.wheel]
packages = [
"src/server",
"src/estimate",
]

[tool.hatch.envs.default]
python = "3.8"
extra-dependencies = [
"ipython",
"ipdb",
]

[tool.hatch.envs.types]
extra-dependencies = [
"mypy>=1.0.0",
]
[tool.hatch.envs.types.scripts]
check = "mypy --install-types --non-interactive {args:src/kepler_model_server tests}"


[tool.coverage.run]
source_pkgs = ["server", "tests"]
branch = true
parallel = true
omit = [
"src/server/__about__.py",
]

[tool.coverage.paths]
server = ["src/server", "*/kepler-model-server/src"]
tests = ["tests", "*/kepler-model-server/tests"]

[tool.coverage.report]
exclude_lines = [
"no cov",
"if __name__ == .__main__.:",
"if TYPE_CHECKING:",
]

[tool.ruff]
line-length = 320
4 changes: 4 additions & 0 deletions src/__about__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
# SPDX-FileCopyrightText: 2024-present
#
# SPDX-License-Identifier: Apache-2.0
__version__ = "0.7.7"
47 changes: 27 additions & 20 deletions src/estimate/estimator.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,16 +5,17 @@
import sys
import pandas as pd

fpath = os.path.join(os.path.dirname(__file__), 'model')
fpath = os.path.join(os.path.dirname(__file__), "model")
sys.path.append(fpath)

util_path = os.path.join(os.path.dirname(__file__), '..', 'util')
util_path = os.path.join(os.path.dirname(__file__), "..", "util")
sys.path.append(util_path)

###############################################
# power request
# power request

class PowerRequest():

class PowerRequest:
def __init__(self, metrics, values, output_type, source, system_features, system_values, trainer_name="", filter=""):
self.trainer_name = trainer_name
self.metrics = metrics
Expand All @@ -25,7 +26,8 @@ def __init__(self, metrics, values, output_type, source, system_features, system
self.datapoint = pd.DataFrame(values, columns=metrics)
data_point_size = len(self.datapoint)
for i in range(len(system_features)):
self.datapoint[system_features[i]] = [system_values[i]]*data_point_size
self.datapoint[system_features[i]] = [system_values[i]] * data_point_size


###############################################
# serve
Expand All @@ -42,20 +44,21 @@ def __init__(self, metrics, values, output_type, source, system_features, system

loaded_model = dict()


def handle_request(data):
try:
power_request = json.loads(data, object_hook = lambda d : PowerRequest(**d))
power_request = json.loads(data, object_hook=lambda d: PowerRequest(**d))
except Exception as e:
msg = 'fail to handle request: {}'.format(e)
msg = "fail to handle request: {}".format(e)
return {"powers": dict(), "msg": msg}

if not is_support_output_type(power_request.output_type):
msg = "output type {} is not supported".format(power_request.output_type)
return {"powers": dict(), "msg": msg}

output_type = ModelOutputType[power_request.output_type]
# TODO: need revisit if get more than one rapl energy source
if power_request.energy_source is None or 'rapl' in power_request.energy_source:
if power_request.energy_source is None or "rapl" in power_request.energy_source:
power_request.energy_source = "intel_rapl"

if output_type.name not in loaded_model:
Expand Down Expand Up @@ -99,6 +102,7 @@ def handle_request(data):
shutil.rmtree(output_path)
return {"powers": powers, "msg": msg}


class EstimatorServer:
def __init__(self, socket_path):
self.socket_path = socket_path
Expand All @@ -119,43 +123,46 @@ def start(self):
pass

def accepted(self, connection):
data = b''
data = b""
while True:
shunk = connection.recv(1024).strip()
data += shunk
if shunk is None or shunk.decode()[-1] == '}':
if shunk is None or shunk.decode()[-1] == "}":
break
decoded_data = data.decode()
y = handle_request(decoded_data)
response = json.dumps(y)
connection.send(response.encode())


def clean_socket():
print("clean socket")
if os.path.exists(SERVE_SOCKET):
os.unlink(SERVE_SOCKET)


def sig_handler(signum, frame) -> None:
clean_socket()
sys.exit(1)


import argparse

if __name__ == '__main__':

def run():
set_env_from_model_config()
clean_socket()
signal.signal(signal.SIGTERM, sig_handler)
try:
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--err',
required=False,
type=str,
default='mae',
metavar="<error metric>",
help="Error metric for determining the model with minimum error value" )
parser.add_argument("-e", "--err", required=False, type=str, default="mae", metavar="<error metric>", help="Error metric for determining the model with minimum error value")
args = parser.parse_args()
DEFAULT_ERROR_KEYS = args.err.split(',')
DEFAULT_ERROR_KEYS = args.err.split(",")
server = EstimatorServer(SERVE_SOCKET)
server.start()
finally:
clean_socket()
clean_socket()


if __name__ == "__main__":
run()
Loading

0 comments on commit c8db10f

Please sign in to comment.