diff --git a/ultravox/training/configs/llama_70b.yaml b/ultravox/training/configs/llama_70b.yaml index 4e3a8432..1be46ef0 100644 --- a/ultravox/training/configs/llama_70b.yaml +++ b/ultravox/training/configs/llama_70b.yaml @@ -8,6 +8,9 @@ exp_name: "ultravox-v0_4-llama3.1-70B-whisper_m" text_model: "meta-llama/Meta-Llama-3.1-70B-Instruct" audio_model: "openai/whisper-medium" +# We need to shard the model in order to fit on the GPU memory +enable_fsdp: True + batch_size: 5 # We increase the number of steps by 2x, but with a lower batch_size, we still won't be training on as many samples as the 8B model # We would revisit this later on when