-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathllm.js
75 lines (60 loc) · 2.36 KB
/
llm.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import OpenAI from "openai";
import debug from "debug";
const llmDebug = new debug('llm');
const embeddingDebug = new debug('embedding');
const openai = new OpenAI();
// Generate a vector embedding from the submitted prompt
export async function generateEmbedding(prompt) {
const embeddingsModel = 'text-embedding-3-small'
llmDebug(`Model: ${embeddingsModel}`);
llmDebug(`Prompt: ${prompt}`);
// This is the call to the openAI model that generates the embedding
const embedding = await openai.embeddings.create({
model: embeddingsModel,
input: prompt,
}).then(response =>
// extract the embedding from the JSON response
response["data"][0]["embedding"]
);
embeddingDebug('Embedding:');
embeddingDebug(embedding);
return embedding
}
// Use the gpt-4o-mini model to generate an LLM response
// based on the prompt and any additional context obtained via RAG
export async function generateChatbotResponse(prompt, context){
const completionsModel = 'gpt-4o-mini'
// The developer prompt to the chatbot tells it how to behave,
// provides information about the data it's receiving,
// and supplies the RAG context (if any)
const developerPrompt = `
You are a very enthusiastic HR representative who loves
to help people!
Given the following sections from the company
handbook and internal documents, answer the question using
that information as the primary source.
You can supplement the information in the context sections
with general information that you know, but be sure to distinguish
internal information from external information in your response.
If you are unsure and the answer is not explicitly written
in the information you have, say
"Sorry, I don't know how to help with that."
Context sections:
${context}
Answer in conversational prose.
`
llmDebug(`Model: ${completionsModel}`);
llmDebug(`Prompt: ${prompt}`);
// This is the call to the openAI model that generates the chatbot response
const response = await openai.chat.completions.create({
model: completionsModel,
messages: [
{ role: "developer", content: developerPrompt },
{ role: "user", content: prompt }, // the user prompt is the question that the user asked
],
store: true,
});
llmDebug("Response:");
llmDebug(response);
return `${response.choices[0].message.content}\n`;
}