forked from nickaggarwal/Llama-2-7b-hf
-
Notifications
You must be signed in to change notification settings - Fork 0
/
app.py
85 lines (71 loc) · 2.82 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
from threading import Thread
from typing import Iterator
import os
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
model_id = 'meta-llama/Llama-2-7b-chat-hf'
class InferlessPythonModel:
def get_prompt(self, message, chat_history,
system_prompt):
texts = [f'[INST] <<SYS>>\n{system_prompt}\n<</SYS>>\n\n']
for user_input, response in chat_history:
texts.append(f'{user_input.strip()} [/INST] {response.strip()} </s><s> [INST] ')
texts.append(f'{message.strip()} [/INST]')
return ''.join(texts)
def get_input_token_length(self, message, chat_history, system_prompt):
prompt = self.get_prompt(message, chat_history, system_prompt)
input_ids = self.tokenizer([prompt], return_tensors='np')['input_ids']
return input_ids.shape[-1]
def run_function(self, message,
chat_history,
system_prompt,
max_new_tokens=1024,
temperature=0.8,
top_p=0.95,
top_k=5):
prompt = self.get_prompt(message, chat_history, system_prompt)
inputs = self.tokenizer([prompt], return_tensors='pt').to('cuda')
streamer = TextIteratorStreamer(self.tokenizer,
timeout=10.,
skip_prompt=True,
skip_special_tokens=True)
generate_kwargs = dict(
inputs,
streamer=streamer,
max_new_tokens=max_new_tokens,
do_sample=True,
top_p=top_p,
top_k=top_k,
temperature=temperature,
num_beams=1,
)
t = Thread(target=self.model.generate, kwargs=generate_kwargs)
t.start()
outputs = ''
for text in streamer:
outputs += text
return outputs
def initialize(self):
self.tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token='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')
if torch.cuda.is_available():
self.model = AutoModelForCausalLM.from_pretrained(
model_id,
torch_dtype=torch.float16,
device_map='auto',
use_auth_token='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'
)
else:
self.model = None
def infer(self, inputs):
message = inputs['message']
chat_history = inputs['chat_history'] if 'chat_history' in inputs else []
system_prompt = inputs['system_prompt'] if 'system_prompt' in inputs else ''
result = self.run_function(
message=message,
chat_history=chat_history,
system_prompt=system_prompt,
)
return {"generated_text": result}
def finalize(self):
self.tokenizer = None
self.model = None