Skip to content

Commit

Permalink
fix lint issues
Browse files Browse the repository at this point in the history
  • Loading branch information
mfaizanse committed Jan 27, 2025
1 parent eaa79e8 commit 5b5e27f
Show file tree
Hide file tree
Showing 4 changed files with 26 additions and 11 deletions.
1 change: 1 addition & 0 deletions src/utils/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ def load_env_from_json() -> None:
logging.error(f"Error loading config from {config_path}: {e}")
raise


# Load the environment variables from the json file.
load_env_from_json()

Expand Down
23 changes: 15 additions & 8 deletions tests/blackbox/src/common/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,23 +42,32 @@ def __init__(self) -> None:
self.test_data_path = config("TEST_DATA_PATH", default="./data")
self.namespace_scoped_test_data_path = f"{self.test_data_path}/namespace-scoped"

self.companion_api_url = config("COMPANION_API_URL", default="http://localhost:8000")
self.companion_api_url = config(
"COMPANION_API_URL", default="http://localhost:8000"
)
self.companion_token = config("COMPANION_TOKEN", default="not-needed")
self.test_cluster_url = config("TEST_CLUSTER_URL")
self.test_cluster_ca_data = config("TEST_CLUSTER_CA_DATA")
self.test_cluster_auth_token = config("TEST_CLUSTER_AUTH_TOKEN")

self.model_name = config("MODEL_NAME", default="gpt-4o-mini")
self.iterations = config("ITERATIONS", default=3, cast=int)
self.streaming_response_timeout = config("STREAMING_RESPONSE_TIMEOUT", default=600, cast=int) # seconds
self.streaming_response_timeout = config(
"STREAMING_RESPONSE_TIMEOUT", default=600, cast=int
) # seconds
self.max_workers = config("MAX_WORKERS", default=1, cast=int)
self.retry_wait_time = config("RETRY_WAIT_TIME", default=60, cast=int) # seconds
self.retry_max_wait_time = config("RETRY_MAX_WAIT_TIME", default=600, cast=int) # seconds

self.retry_wait_time = config(
"RETRY_WAIT_TIME", default=60, cast=int
) # seconds
self.retry_max_wait_time = config(
"RETRY_MAX_WAIT_TIME", default=600, cast=int
) # seconds

def __load_env_from_json(self) -> None:
"""Load the configuration from the config.json file."""
default_config_path = Path(__file__).parent.parent.parent.parent.parent / "config" / "config.json"
default_config_path = (
Path(__file__).parent.parent.parent.parent.parent / "config" / "config.json"
)
config_path = Path(os.getenv("CONFIG_PATH", default_config_path))

try:
Expand All @@ -85,14 +94,12 @@ def __load_env_from_json(self) -> None:
logging.error(f"Error loading config from {config_path}: {e}")
raise


def get_model_config(self, model_name: str) -> dict:
for model in self.models:
if model["name"] == model_name:
return model

raise ValueError(f"Model {model_name} not found in the configuration.")


def get_models(self) -> dict:
return self.models
6 changes: 5 additions & 1 deletion tests/blackbox/src/evaluation/validator/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,10 @@
def create_validator(config: Config) -> IValidator:
if config.model_name == AIModel.CHATGPT_4_O_MINI:
model_config = config.get_model_config(config.model_name)
return ChatOpenAIValidator(model_config["name"], model_config["temperature"], model_config["deployment_id"])
return ChatOpenAIValidator(
model_config["name"],
model_config["temperature"],
model_config["deployment_id"],
)

raise ValueError(f"Unsupported model name: {config.model_name}")
7 changes: 5 additions & 2 deletions tests/blackbox/src/validation/utils/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,9 @@

proxy_client: BaseProxyClient | None = None


def get_gen_ai_proxy_client() -> BaseProxyClient:
global proxy_client # noqa
global proxy_client # noqa
if proxy_client is None:
proxy_client = get_proxy_client("gen-ai-hub")
return proxy_client
Expand All @@ -39,7 +40,9 @@ class OpenAIModel(Model):
def __init__(self, config: ModelConfig):
self._name = config.name
self.model = ChatOpenAI(
deployment_id=config.deployment_id, proxy_client=get_gen_ai_proxy_client(), temperature=0
deployment_id=config.deployment_id,
proxy_client=get_gen_ai_proxy_client(),
temperature=0,
)

def invoke(self, content: str) -> str:
Expand Down

0 comments on commit 5b5e27f

Please sign in to comment.