diff --git a/README.md b/README.md index 00dd863..5186475 100644 --- a/README.md +++ b/README.md @@ -40,7 +40,7 @@ pip install -r requirements.txt AG-A supports using both AWS Bedrock and OpenAI as LLM model providers. You will need to set up API keys for the respective provider you choose. By default, AG-A uses AWS Bedrock for its language models. #### AWS Bedrock Setup -AG-A integrates with AWS Bedrock by default. To use AWS Bedrock, you will need to configure your AWS credentials and region settings: +To use AWS Bedrock, you will need to configure your AWS credentials and region settings: ```bash export AWS_DEFAULT_REGION="" @@ -48,11 +48,11 @@ export AWS_ACCESS_KEY_ID="" export AWS_SECRET_ACCESS_KEY="" ``` -Ensure you have an active AWS account and appropriate permissions set up for using Bedrock models. You can manage your AWS credentials through the AWS Management Console. See [Bedrock supported AWS regions](https://docs.aws.amazon.com/bedrock/latest/userguide/bedrock-regions.html) +Ensure you have an active AWS account and appropriate permissions set up for using Bedrock models. You can manage your AWS credentials through the AWS Management Console. See [Bedrock supported AWS regions](https://docs.aws.amazon.com/bedrock/latest/userguide/models-regions.html). #### OpenAI Setup -To use OpenAI, you'll need to set your OpenAI API key as an environment variable: +To use OpenAI, you will need to set your OpenAI API key as an environment variable: ```bash export OPENAI_API_KEY="sk-..." diff --git a/src/autogluon/assistant/llm/llm.py b/src/autogluon/assistant/llm/llm.py index f35b80f..3cc2f76 100644 --- a/src/autogluon/assistant/llm/llm.py +++ b/src/autogluon/assistant/llm/llm.py @@ -115,8 +115,7 @@ def get_openai_models() -> List[str]: @staticmethod def get_bedrock_models() -> List[str]: try: - # TODO: Remove hardcoding AWS region - bedrock = boto3.client("bedrock", region_name="us-west-2") + bedrock = boto3.client("bedrock") response = bedrock.list_foundation_models() return [model["modelId"] for model in response["modelSummaries"]] except Exception as e: @@ -166,8 +165,6 @@ def _get_bedrock_chat_model(config: DictConfig) -> AssistantChatBedrock: "temperature": config.temperature, "max_tokens": config.max_tokens, }, - # TODO: Remove hardcoding AWS region - region_name="us-west-2", verbose=config.verbose, ) @@ -177,9 +174,11 @@ def get_chat_model(cls, config: DictConfig) -> Union[AssistantChatOpenAI, Assist assert config.provider in valid_providers, f"{config.provider} is not a valid provider in: {valid_providers}" valid_models = cls.get_valid_models(config.provider) - assert ( - config.model in valid_models - ), f"{config.model} is not a valid model in: {valid_models} for provider {config.provider}" + assert config.model in valid_models, ( + f"{config.model} is not a valid model in: {valid_models} for provider {config.provider}. " + "Please check if the requested model is available in " + f"`AWS_DEFAULT_REGION={os.environ.get('AWS_DEFAULT_REGION')}`." + ) if config.model not in WHITE_LIST_LLM: logger.warning(f"{config.model} is not on the white list. Our white list models include {WHITE_LIST_LLM}") diff --git a/src/autogluon/assistant/transformer/feature_transformers/caafe.py b/src/autogluon/assistant/transformer/feature_transformers/caafe.py index e7638ef..587a955 100644 --- a/src/autogluon/assistant/transformer/feature_transformers/caafe.py +++ b/src/autogluon/assistant/transformer/feature_transformers/caafe.py @@ -29,8 +29,7 @@ def __init__( num_iterations: int = 2, optimization_metric: str = "roc", eval_model: str = "lightgbm", - # TODO: Remove hardcoding AWS region - region_name: str = "us-west-2", + region_name: str = os.environ.get("AWS_DEFAULT_REGION", "us-west-2"), **kwargs, ) -> None: # Set up credentials if using OpenAI