forked from THUDM/ChatGLM2-6B
-
Notifications
You must be signed in to change notification settings - Fork 0
/
train_chat.sh
29 lines (27 loc) · 823 Bytes
/
train_chat.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
PRE_SEQ_LEN=128
LR=1e-2
NUM_GPUS=1
torchrun --standalone --nnodes=1 --nproc-per-node=$NUM_GPUS main.py \
--do_train \
--train_file $CHAT_TRAIN_DATA \
--validation_file $CHAT_VAL_DATA \
--preprocessing_num_workers 10 \
--prompt_column prompt \
--response_column response \
--history_column history \
--overwrite_cache \
--model_name_or_path THUDM/chatglm2-6b \
--output_dir $CHECKPOINT_NAME \
--overwrite_output_dir \
--max_source_length 256 \
--max_target_length 256 \
--per_device_train_batch_size 1 \
--per_device_eval_batch_size 1 \
--gradient_accumulation_steps 16 \
--predict_with_generate \
--max_steps 3000 \
--logging_steps 10 \
--save_steps 1000 \
--learning_rate $LR \
--pre_seq_len $PRE_SEQ_LEN \
--quantization_bit 4