forked from avnermay/Sequoia
-
Notifications
You must be signed in to change notification settings - Fork 0
/
data_converter.py
64 lines (53 loc) · 3 KB
/
data_converter.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import torch
from accelerate.logging import get_logger
from datasets import load_dataset
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
check_min_version('4.28.0.dev0')
logger = get_logger(__name__)
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/translation/requirements.txt')
def convert_wiki_dataset(tokenizer, seq_len = 256):
dataset = load_dataset('wikimedia/wikipedia', '20231101.en', split='train[0:2000]')
def tokenize_function(examples):
return tokenizer(examples['text'], return_tensors='pt',max_length=seq_len,padding=True,truncation=True)
dataset = dataset.map(tokenize_function, batched=True, remove_columns=['text'])
dataset.set_format(type='torch', columns=['input_ids', 'attention_mask'])
return dataset
def convert_cnn_dataset(tokenizer, seq_len = 256):
dataset = load_dataset('cnn_dailymail', '1.0.0', split='test[0:2000]')
def tokenize_function(examples):
return tokenizer(examples['article'], return_tensors='pt',max_length=seq_len,padding=True,truncation=True)
dataset = dataset.map(tokenize_function, batched=True, remove_columns=['article'])
dataset.set_format(type='torch', columns=['input_ids', 'attention_mask'])
return dataset
def convert_c4_dataset_eval(tokenizer, seq_len = 256):
dataset = load_dataset('c4', 'en', split='validation[0:2000]')
def tokenize_function(examples):
return tokenizer(examples['text'], return_tensors='pt',max_length=seq_len,padding=True,truncation=True)
dataset = dataset.map(tokenize_function, batched=True, remove_columns=['text', 'timestamp', 'url'])
dataset.set_format(type='torch', columns=['input_ids', 'attention_mask'])
return dataset
def convert_jsonl_file(tokenizer, data_files, seq_len=256, text_field='text', remove_columns=None):
if remove_columns is None:
remove_columns = [text_field]
dataset = load_dataset('json', data_files=data_files, split='train', streaming=False)
def tokenize_function(examples):
return tokenizer(examples['text'], return_tensors='pt', max_length=seq_len, padding=True, truncation=True)
dataset = dataset.map(tokenize_function, batched=True, remove_columns=remove_columns)
dataset.set_format(type='torch', columns=['input_ids', 'attention_mask'])
return dataset
def convert_dataset(tokenizer, file_path):
dataset = load_dataset('json', data_files=file_path, split='train')
def tokenize_function(examples):
input_ids = torch.Tensor(examples['input_ids'])
labels = input_ids.clone()
if tokenizer.pad_token_id is not None:
labels[labels == tokenizer.pad_token_id] = -100
ret = {
'input_ids': input_ids,
'labels': labels
}
return ret
dataset = dataset.map(tokenize_function, batched=True, remove_columns=['input_tokens'])
dataset.set_format(type='torch', columns=['input_ids', 'labels'])
return dataset