Skip to content

Commit c78d1f7

Browse files
raise error if token count exceeds 1024 instead of attempting to re-chunk
Signed-off-by: Khaled Sulayman <[email protected]>
1 parent d96f286 commit c78d1f7

File tree

1 file changed

+12
-15
lines changed

1 file changed

+12
-15
lines changed

notebooks/instructlab-knowledge/utils/create_seed_dataset.py

Lines changed: 12 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -159,24 +159,21 @@ def add_icls(qna_yaml: Dict[str, str], chunked_document: Dataset) -> Dataset:
159159
)
160160
)
161161
chunked_document_all_icl = safe_concatenate_datasets(chunked_document_all_icl)
162-
chunked_document_all_icl = chunked_document_all_icl.map(
163-
lambda x: {
164-
"chunks": chunk_document(
165-
[x["document"]], server_ctx_size=4096, chunk_word_count=1024
166-
)
167-
if get_token_count(x["document"], tokenizer) > 1024
168-
else [x["document"]]
169-
}
170-
)
162+
for c in chunked_document_all_icl:
163+
if get_token_count(c["document"], tokenizer) > 1024:
164+
raise ValueError("Chunk exceeds token count of 1024")
165+
166+
# chunked_document_all_icl = [{"chunks": [c["document"]]} for c in chunked_document_all_icl]
167+
171168
df = chunked_document_all_icl.to_pandas()
172-
df_exploded = df.explode("chunks").reset_index(drop=True)
173-
new_ds = Dataset.from_pandas(df_exploded)
174-
new_ds = new_ds.remove_columns("document").rename_columns(
175-
{"chunks": "document"}
176-
)
169+
# df_exploded = df.explode("chunks").reset_index(drop=True)
170+
new_ds = Dataset.from_pandas(df)
171+
# new_ds = new_ds.remove_columns("document").rename_columns(
172+
# {"chunks": "document"}
173+
# )
177174

178175
# Only keep document greater than 100 tokens
179176
new_ds = new_ds.filter(
180-
lambda x: get_token_count(x["document"], tokenizer) > 100
177+
lambda c: get_token_count(c["document"], tokenizer) > 100
181178
)
182179
return new_ds

0 commit comments

Comments
 (0)