From 74c0100883b058cb8d495125e9670a4b67ea4ab8 Mon Sep 17 00:00:00 2001 From: Rohan Mehta Date: Mon, 7 Aug 2023 20:17:32 -0400 Subject: [PATCH] Fix finetuning batch size --- llama_finetuning.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama_finetuning.py b/llama_finetuning.py index ccf8c6845..a405e7bca 100644 --- a/llama_finetuning.py +++ b/llama_finetuning.py @@ -180,7 +180,7 @@ def main(**kwargs): # Create DataLoaders for the training and validation dataset train_dataloader = torch.utils.data.DataLoader( dataset_train, - batch_size=train_config.batch_size_training, + batch_size=train_config.micro_batch_size, num_workers=train_config.num_workers_dataloader, pin_memory=True, sampler=train_sampler if train_sampler else None,