forked from pytorch/torchtune
-
Notifications
You must be signed in to change notification settings - Fork 0
/
7B_lora.yaml
73 lines (60 loc) · 1.66 KB
/
7B_lora.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
# This config uses hyperparameters based on small set of experiments and information
# available on various forums. These are not meant to replicate the numbers
# from the paper
#
# Run this config on 4 GPUs using the following:
# tune run --nproc_per_node 4 lora_finetune_distributed --config mistral/7B_lora
# Tokenizer
tokenizer:
_component_: torchtune.models.mistral.mistral_tokenizer
path: /tmp/Mistral-7B-v0.1/tokenizer.model
# Dataset
dataset:
_component_: torchtune.datasets.alpaca_dataset
train_on_input: True
seed: null
shuffle: True
# Model Arguments
model:
_component_: torchtune.models.mistral.lora_mistral_7b
lora_attn_modules: ['q_proj', 'k_proj', 'v_proj']
apply_lora_to_mlp: True
apply_lora_to_output: True
lora_rank: 64
lora_alpha: 16
checkpointer:
_component_: torchtune.utils.FullModelHFCheckpointer
checkpoint_dir: /tmp/Mistral-7B-v0.1
checkpoint_files: [
pytorch_model-00001-of-00002.bin,
pytorch_model-00002-of-00002.bin
]
recipe_checkpoint: null
output_dir: /tmp/Mistral-7B-v0.1
model_type: MISTRAL
resume_from_checkpoint: False
optimizer:
_component_: torch.optim.AdamW
lr: 2e-5
lr_scheduler:
_component_: torchtune.modules.get_cosine_schedule_with_warmup
num_warmup_steps: 100
loss:
_component_: torch.nn.CrossEntropyLoss
# Fine-tuning arguments
batch_size: 4
epochs: 3
max_steps_per_epoch: null
gradient_accumulation_steps: 1
# Training env
device: cuda
# Memory management
enable_activation_checkpointing: True
# Reduced precision
dtype: bf16
# Logging
metric_logger:
_component_: torchtune.utils.metric_logging.DiskLogger
log_dir: ${output_dir}
output_dir: /tmp/Mistral-7B-v0.1
log_every_n_steps: null