From c3936510d134b502819af3af62f1f3d5ea897b21 Mon Sep 17 00:00:00 2001 From: Farzad Abdolhosseini Date: Tue, 8 Oct 2024 13:19:15 -0700 Subject: [PATCH] [bugfix] Missing enable_fsdp in 70b config --- ultravox/training/configs/llama_70b.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ultravox/training/configs/llama_70b.yaml b/ultravox/training/configs/llama_70b.yaml index 4e3a8432..1be46ef0 100644 --- a/ultravox/training/configs/llama_70b.yaml +++ b/ultravox/training/configs/llama_70b.yaml @@ -8,6 +8,9 @@ exp_name: "ultravox-v0_4-llama3.1-70B-whisper_m" text_model: "meta-llama/Meta-Llama-3.1-70B-Instruct" audio_model: "openai/whisper-medium" +# We need to shard the model in order to fit on the GPU memory +enable_fsdp: True + batch_size: 5 # We increase the number of steps by 2x, but with a lower batch_size, we still won't be training on as many samples as the 8B model # We would revisit this later on when