diff --git a/dlc_developer_config.toml b/dlc_developer_config.toml index e54e9a26290b..a50584e6e742 100644 --- a/dlc_developer_config.toml +++ b/dlc_developer_config.toml @@ -37,7 +37,7 @@ deep_canary_mode = false [build] # Add in frameworks you would like to build. By default, builds are disabled unless you specify building an image. # available frameworks - ["autogluon", "huggingface_tensorflow", "huggingface_pytorch", "huggingface_tensorflow_trcomp", "huggingface_pytorch_trcomp", "pytorch_trcomp", "tensorflow", "mxnet", "pytorch", "stabilityai_pytorch"] -build_frameworks = [] +build_frameworks = ["autogluon"] # By default we build both training and inference containers. Set true/false values to determine which to build. build_training = true @@ -107,7 +107,7 @@ use_scheduler = false dlc-pr-mxnet-training = "" dlc-pr-pytorch-training = "" dlc-pr-tensorflow-2-training = "" -dlc-pr-autogluon-training = "" +dlc-pr-autogluon-training = "autogluon/training/buildspec.yml" # HuggingFace Training dlc-pr-huggingface-tensorflow-training = "" @@ -136,7 +136,7 @@ dlc-pr-tensorflow-2-habana-training = "" dlc-pr-mxnet-inference = "" dlc-pr-pytorch-inference = "" dlc-pr-tensorflow-2-inference = "" -dlc-pr-autogluon-inference = "" +dlc-pr-autogluon-inference = "autogluon/inference/buildspec.yml" # Neuron Inference dlc-pr-mxnet-neuron-inference = ""