Skip to content
This repository was archived by the owner on Oct 25, 2024. It is now read-only.

Commit e68000f

Browse files
committed
added missing rope_scale param
Signed-off-by: Luke Nezda <[email protected]>
1 parent 0131371 commit e68000f

File tree

1 file changed

+3
-2
lines changed

1 file changed

+3
-2
lines changed

intel_extension_for_transformers/llm/runtime/graph/scripts/convert_mistral.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -192,6 +192,7 @@ def loadHFTransformerJson(model: 'LazyModel', config_path: Path) -> 'Params':
192192
ffn_hidden_size=ffn_hidden_size,
193193
rms_norm_eps=rms_norm_eps,
194194
rope_theta=rope_theta,
195+
rope_scale=rope_scale,
195196
)
196197

197198
# LLaMA v2 70B params.json
@@ -1064,8 +1065,8 @@ def write_file_header(self, params: Params, file_type: NEFileType) -> None:
10641065

10651066
self.fout.write(
10661067
struct.pack("i", 1)
1067-
)
1068-
# TODO, bos_token_id = 0 in https://huggingface.co/decapoda-research/llama-7b-hf/blob/main/config.json
1068+
)
1069+
# TODO, bos_token_id = 0 in https://huggingface.co/decapoda-research/llama-7b-hf/blob/main/config.json
10691070
# but bos_token_id = 1 in llama.cpp
10701071
self.fout.write(struct.pack("i", 2))
10711072

0 commit comments

Comments
 (0)