-
Notifications
You must be signed in to change notification settings - Fork 1
/
utils.py
49 lines (40 loc) · 1.74 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
from scipy.stats import kurtosis
def calc_ku(model, layer_kind=None):
""" calculate kurtosis of a model """
model_ku = 0
for name, layer in model.state_dict().items():
if len(layer.shape) != 2 or layer.shape[0] != layer.shape[1]:
continue
if layer_kind is not None:
if layer_kind not in name:
continue
ku = kurtosis(layer.flatten())
model_ku += ku
return model_ku
def _get_layer_kinds(vit: bool = False, llama: bool = False):
""" get layer kinds for a model """
assert sum([vit, llama]) == 1, 'Exactly one of the flags should be set to True'
if vit:
return ['attention.query', 'attention.key', 'attention.value', 'output.dense']
if llama:
return ['self_attn.q_proj', 'self_attn.k_proj', 'self_attn.v_proj', 'self_attn.o_proj',
'mlp.gate_proj', 'mlp.up_proj', 'mlp.down_proj', 'input_layernorm', 'post_attention_layernorm']
def _get_nodes(llama: bool = False, sd: bool = False):
""" get nodes for a model """
assert sum([llama, sd]) == 1, 'Exactly one of the flags should be set to True'
if llama:
return [
('0-X-X', 'meta-llama/Llama-2-7b-hf'),
('0-0-X', 'meta-llama/CodeLlama-7b-hf'),
('0-0-0', 'meta-llama/CodeLlama-7b-Instruct-hf'),
('0-1-X', 'meta-llama/CodeLlama-7b-Python-hf'),
('0-2-X', 'meta-llama/Llama-2-7b-chat-hf'),
]
if sd:
return [
('0-X-X', 'CompVis/stable-diffusion-v1-1'),
('0-0-X', 'CompVis/stable-diffusion-v1-2'),
('0-0-0', 'CompVis/stable-diffusion-v1-3'),
('0-0-1', 'CompVis/stable-diffusion-v1-4'),
('0-0-2', 'runwayml/stable-diffusion-v1-5'),
]