Skip to content

Commit

Permalink
examples now take model path as an cli argument
Browse files Browse the repository at this point in the history
  • Loading branch information
nnance committed Apr 21, 2024
1 parent 0126705 commit 8fdf249
Show file tree
Hide file tree
Showing 4 changed files with 18 additions and 28 deletions.
4 changes: 2 additions & 2 deletions guides/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,10 @@ This is a complete list of guides and examples using Vercel AI and the llamacpp-

## Examples

You can run any of the following examples with project root folder with:
You can run any of the following examples from the project root folder where the {modelPath} is the full path to the LLM to use:

```ssh
npx tsx ./src/examples/{example}
npx tsx ./src/examples/{example} {modelPath}
```

- [generate-text.ts](../src/examples/generate-text.ts): Text generation
Expand Down
13 changes: 5 additions & 8 deletions src/examples/generate-text.ts
Original file line number Diff line number Diff line change
@@ -1,15 +1,12 @@
import { experimental_generateText } from "ai";
import { LLamaCpp } from "../index.js";
import { fileURLToPath } from "url";
import path from "path";

const modelPath = path.join(
path.dirname(fileURLToPath(import.meta.url)),
"../../models",
"llama-2-7b-chat.Q4_K_M.gguf"
);
const model = process.argv[2];
if (!model) {
throw new Error("Missing model path argument");
}

const llamacpp = new LLamaCpp(modelPath);
const llamacpp = new LLamaCpp(model);

experimental_generateText({
model: llamacpp.completion(),
Expand Down
13 changes: 5 additions & 8 deletions src/examples/stream-text.ts
Original file line number Diff line number Diff line change
@@ -1,16 +1,13 @@
import { experimental_streamText } from "ai";
import { LLamaCpp } from "../index.js";
import { fileURLToPath } from "url";
import path from "path";
import { stdout } from "process";

const modelPath = path.join(
path.dirname(fileURLToPath(import.meta.url)),
"../../models",
"llama-2-7b-chat.Q4_K_M.gguf"
);
const model = process.argv[2];
if (!model) {
throw new Error("Missing model path argument");
}

const llamacpp = new LLamaCpp(modelPath);
const llamacpp = new LLamaCpp(model);

experimental_streamText({
model: llamacpp.completion(),
Expand Down
16 changes: 6 additions & 10 deletions src/examples/streaming-chatbot.ts
Original file line number Diff line number Diff line change
@@ -1,16 +1,12 @@
import * as readline from "node:readline/promises";
import { fileURLToPath } from "url";
import path from "path";

import { ExperimentalMessage, experimental_streamText } from "ai";
import { LLamaCpp } from "../index.js";

const __dirname = path.dirname(fileURLToPath(import.meta.url));
const modelPath = path.join(
__dirname,
"../../models",
"llama-2-7b-chat.Q4_K_M.gguf"
);
const model = process.argv[2];
if (!model) {
throw new Error("Missing model path argument");
}

const terminal = readline.createInterface({
input: process.stdin,
Expand All @@ -19,7 +15,7 @@ const terminal = readline.createInterface({

const messages: ExperimentalMessage[] = [];

async function main() {
async function main(modelPath: string) {
const llamacpp = new LLamaCpp(modelPath);
const model = llamacpp.chat();

Expand Down Expand Up @@ -47,4 +43,4 @@ async function main() {
}
}

main().catch(console.error);
main(model).catch(console.error);

0 comments on commit 8fdf249

Please sign in to comment.