Skip to content

Commit

Permalink
Update chat_sample.py to load prompts and select device
Browse files Browse the repository at this point in the history
  • Loading branch information
andrew-k-park committed Nov 6, 2024
1 parent 7505fc0 commit 4662f0d
Showing 1 changed file with 21 additions and 8 deletions.
29 changes: 21 additions & 8 deletions samples/python/chat_sample/chat_sample.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,21 +15,34 @@ def streamer(subword):

def main():
parser = argparse.ArgumentParser()
parser.add_argument('model_dir')
parser.add_argument("-m", "--model", type=str, help="Path to model")
parser.add_argument("-d", "--device", type=str, default="CPU", help="Device")

args = parser.parse_args()

device = 'CPU' # GPU can be used as well
pipe = openvino_genai.LLMPipeline(args.model_dir, device)
models_path = args.model
device = args.device

pipe = openvino_genai.LLMPipeline(models_path, device)

config = openvino_genai.GenerationConfig()
config.max_new_tokens = 100

# Predefined list of prompts
prompts = [
"Hello there! How are you doing?",
"What is OpenVINO?",
"Who are you?",
"Can you explain to me briefly what is Python programming language?",
"Explain the plot of Cinderella in a sentence.",
"What are some common mistakes to avoid when writing code?",
"Write a 100-word blog post on “Benefits of Artificial Intelligence and OpenVINO“",
]

pipe.start_chat()
while True:
try:
prompt = input('question:\n')
except EOFError:
break

for prompt in prompts:
print(f"question:\n{prompt}")
pipe.generate(prompt, config, streamer)
print('\n----------')
pipe.finish_chat()
Expand Down

0 comments on commit 4662f0d

Please sign in to comment.