diff --git a/.env.example b/.env.example
index 7d7ca1e..deb8da8 100644
--- a/.env.example
+++ b/.env.example
@@ -1,3 +1,4 @@
CRX_PUBLIC_KEY="longCRXPublicKeyToEnsureTheExtensionIdStaysTheSameandOauth2.0WorksNicely"
OAUTH_CLIENT_ID=xxx.apps.googleusercontent.com
-PUBLIC_EXTENSION_OS_API_ENDPOINT=https://localhost:3000/v1/chat/completions
\ No newline at end of file
+PUBLIC_EXTENSION_OS_API_ENDPOINT=https://localhost:3000/v1/chat/completions
+E2E_TEST_GROQ_KEY="yourapikey:thisisusedforE2ETestingOnly"
diff --git a/README.md b/README.md
index 74c990b..75e973a 100644
--- a/README.md
+++ b/README.md
@@ -183,6 +183,10 @@ Move it somewhere else ASAP:
# Changelog
+### 0.0.24
+
+- Adding the ability to specify a custom URL
+
### 0.0.23
- Adding the uninstall hook to understand what can we improve.
diff --git a/contents/core.tsx b/contents/core.tsx
index 2705f28..4514b34 100644
--- a/contents/core.tsx
+++ b/contents/core.tsx
@@ -123,7 +123,7 @@ const PlasmoOverlay = () => {
wordBreak: "break-word",
display: successDivVisibe ? "block" : "none"
}}>
-
+
{/* Loading box */}
diff --git a/lib/openAITypeCall.ts b/lib/openAITypeCall.ts
index 96da53d..d0af544 100644
--- a/lib/openAITypeCall.ts
+++ b/lib/openAITypeCall.ts
@@ -3,19 +3,25 @@
// ------------------------------------------------------------------------------------
import { Storage } from "@plasmohq/storage";
-import { useUserInfo } from "./providers/UserInfoContext";
import { getOrCreateClientUUID } from "./clientUUID";
import { insertStatisticsRow } from "./anonymousTracking";
// Function to map vendor names to their respective API endpoints
-function vendorToEndpoint(vendor: string): string {
+async function vendorToEndpoint(vendor: string): Promise {
+ const storage = new Storage();
+
+ if (vendor === "localhost") {
+ const customUrl = await storage.get("llmCustomEndpoint");
+ return customUrl;
+ }
+
const endpoints: { [key: string]: string } = {
"extension | OS": process.env.PLASMO_PUBLIC_EXTENSION_OS_API_ENDPOINT,
openai: "https://api.openai.com/v1/chat/completions",
groq: "https://api.groq.com/openai/v1/chat/completions",
together: "https://api.together.xyz/v1/chat/completions",
- localhost: "http://localhost:11434/v1/chat/completions",
};
+
return endpoints[vendor] || endpoints["groq"];
}
@@ -72,7 +78,7 @@ export async function callOpenAIReturn(
const openAIModel = overrideModel || storedModel;
const vendor = overrideProvider || storedVendor;
const apiKey = llmKeys ? llmKeys[vendor] : "";
- const openAIEndpoint = vendorToEndpoint(vendor);
+ const openAIEndpoint = await vendorToEndpoint(vendor);
const headers = new Headers({
"Content-Type": "application/json",
diff --git a/options/LlmSettings.tsx b/options/LlmSettings.tsx
index 64f591a..a4235e8 100644
--- a/options/LlmSettings.tsx
+++ b/options/LlmSettings.tsx
@@ -174,6 +174,7 @@ export default function LlmSettings({ debugInfo }: { debugInfo: string }) {
const [llmModel, setLlmModel] = useStorage("llmModel", "llama-3.1-70b-versatile")
const [llmProvider, setLlmProvider] = useStorage("llmProvider", "extension | OS")
const [llmKeys, setLlmKeys] = useStorage("llmKeys", {})
+ const [llmCustomEndpoint, setLlmCustomEndpoint] = useStorage("llmCustomEndpoint", (v) => v === undefined ? "http://localhost:11434/v1/chat/completions" : v)
const hasRun = useRef(false); // Add this line
@@ -226,78 +227,96 @@ export default function LlmSettings({ debugInfo }: { debugInfo: string }) {
)}
-
-
-
-
-
+
+
+
+
+
+
- {llmProvider === "extension | OS" && }
+ {llmProvider === "extension | OS" && }
- {!llmProvider && (
- <>
-
- Instructions: Choose a provider from the list on your left. The selected provider will be set as the default.
- >
- )}
+ {!llmProvider && (
+ <>
+
+ Instructions: Choose a provider from the list on your left. The selected provider will be set as the default.
+ >
+ )}
+