diff --git a/samples/python/chat_sample/chat_sample.py b/samples/python/chat_sample/chat_sample.py index eee66fb71d..9477206145 100755 --- a/samples/python/chat_sample/chat_sample.py +++ b/samples/python/chat_sample/chat_sample.py @@ -15,21 +15,34 @@ def streamer(subword): def main(): parser = argparse.ArgumentParser() - parser.add_argument('model_dir') + parser.add_argument("-m", "--model", type=str, help="Path to model") + parser.add_argument("-d", "--device", type=str, default="CPU", help="Device") + args = parser.parse_args() - device = 'CPU' # GPU can be used as well - pipe = openvino_genai.LLMPipeline(args.model_dir, device) + models_path = args.model + device = args.device + + pipe = openvino_genai.LLMPipeline(models_path, device) config = openvino_genai.GenerationConfig() config.max_new_tokens = 100 + # Predefined list of prompts + prompts = [ + "Hello there! How are you doing?", + "What is OpenVINO?", + "Who are you?", + "Can you explain to me briefly what is Python programming language?", + "Explain the plot of Cinderella in a sentence.", + "What are some common mistakes to avoid when writing code?", + "Write a 100-word blog post on “Benefits of Artificial Intelligence and OpenVINO“", + ] + pipe.start_chat() - while True: - try: - prompt = input('question:\n') - except EOFError: - break + + for prompt in prompts: + print(f"question:\n{prompt}") pipe.generate(prompt, config, streamer) print('\n----------') pipe.finish_chat()