forked from pytorch/torchtitan
-
Notifications
You must be signed in to change notification settings - Fork 0
/
llama2_70b.toml
57 lines (47 loc) · 1.22 KB
/
llama2_70b.toml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
# torchtitan Config.toml
# NOTE: this toml config is a preset for 64 A100 GPUs.
[job]
dump_folder = "./outputs"
description = "Llama2 70B training"
[profiling]
enable_profiling = true
save_traces_folder = "profile_trace"
profile_freq = 100
[metrics]
log_freq = 1
enable_tensorboard = false
save_tb_folder = "tb"
[model]
name = "llama2"
flavor = "70B"
norm_type = "fused_rmsnorm" # layernorm / np_layernorm / rmsnorm / fused_rmsnorm
tokenizer_path = "/mnt/petrelfs/share_data/yanhang/tokenizes/v13.model"
[optimizer]
name = "AdamW"
lr = 1e-4
[training]
batch_size = 2
seq_len = 4096
warmup_steps = 10 # lr scheduler warm up, 0.01 of the train steps
max_norm = 1.0 # grad norm clipping
steps = 1000
data_parallel_replicate_degree = 1
data_parallel_shard_degree = -1
tensor_parallel_degree = 1
compile = false
dataset = "tinystories"
[experimental]
context_parallel_degree = 1
pipeline_parallel_degree = 1
[checkpoint]
enable_checkpoint = false
folder = "checkpoint"
interval_type = "steps"
interval = 500
model_weights_only = false
export_dtype = "float32"
async_mode = "disabled" # ["disabled", "async", "async_with_pinned_mem"]
[activation_checkpoint]
mode = 'full' # ['none', 'selective', 'full']
[float8]
enable_float8_linear = false