Skip to content

Commit

Permalink
Merge pull request ChatGPTNextWeb#6292 from Little-LittleProgrammer/f…
Browse files Browse the repository at this point in the history
…eature/alibaba-omni-support

feat(alibaba): Added alibaba vision model and omni model support
  • Loading branch information
Leizhenpeng authored Mar 1, 2025
2 parents 570cbb3 + a2c4e46 commit f7cde17
Show file tree
Hide file tree
Showing 4 changed files with 61 additions and 16 deletions.
5 changes: 5 additions & 0 deletions app/client/api.ts
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,11 @@ export interface MultimodalContent {
};
}

export interface MultimodalContentForAlibaba {
text?: string;
image?: string;
}

export interface RequestMessage {
role: MessageRole;
content: string | MultimodalContent[];
Expand Down
38 changes: 26 additions & 12 deletions app/client/platforms/alibaba.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,20 +7,25 @@ import {
ChatMessageTool,
usePluginStore,
} from "@/app/store";
import { streamWithThink } from "@/app/utils/chat";
import {
preProcessImageContentForAlibabaDashScope,
streamWithThink,
} from "@/app/utils/chat";
import {
ChatOptions,
getHeaders,
LLMApi,
LLMModel,
SpeechOptions,
MultimodalContent,
MultimodalContentForAlibaba,
} from "../api";
import { getClientConfig } from "@/app/config/client";
import {
getMessageTextContent,
getMessageTextContentWithoutThinking,
getTimeoutMSByModel,
isVisionModel,
} from "@/app/utils";
import { fetch } from "@/app/utils/stream";

Expand Down Expand Up @@ -89,14 +94,6 @@ export class QwenApi implements LLMApi {
}

async chat(options: ChatOptions) {
const messages = options.messages.map((v) => ({
role: v.role,
content:
v.role === "assistant"
? getMessageTextContentWithoutThinking(v)
: getMessageTextContent(v),
}));

const modelConfig = {
...useAppConfig.getState().modelConfig,
...useChatStore.getState().currentSession().mask.modelConfig,
Expand All @@ -105,6 +102,21 @@ export class QwenApi implements LLMApi {
},
};

const visionModel = isVisionModel(options.config.model);

const messages: ChatOptions["messages"] = [];
for (const v of options.messages) {
const content = (
visionModel
? await preProcessImageContentForAlibabaDashScope(v.content)
: v.role === "assistant"
? getMessageTextContentWithoutThinking(v)
: getMessageTextContent(v)
) as any;

messages.push({ role: v.role, content });
}

const shouldStream = !!options.config.stream;
const requestPayload: RequestPayload = {
model: modelConfig.model,
Expand All @@ -129,7 +141,7 @@ export class QwenApi implements LLMApi {
"X-DashScope-SSE": shouldStream ? "enable" : "disable",
};

const chatPath = this.path(Alibaba.ChatPath);
const chatPath = this.path(Alibaba.ChatPath(modelConfig.model));
const chatPayload = {
method: "POST",
body: JSON.stringify(requestPayload),
Expand Down Expand Up @@ -162,7 +174,7 @@ export class QwenApi implements LLMApi {
const json = JSON.parse(text);
const choices = json.output.choices as Array<{
message: {
content: string | null;
content: string | null | MultimodalContentForAlibaba[];
tool_calls: ChatMessageTool[];
reasoning_content: string | null;
};
Expand Down Expand Up @@ -212,7 +224,9 @@ export class QwenApi implements LLMApi {
} else if (content && content.length > 0) {
return {
isThinking: false,
content: content,
content: Array.isArray(content)
? content.map((item) => item.text).join(",")
: content,
};
}

Expand Down
10 changes: 9 additions & 1 deletion app/constant.ts
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,12 @@ export const ByteDance = {

export const Alibaba = {
ExampleEndpoint: ALIBABA_BASE_URL,
ChatPath: "v1/services/aigc/text-generation/generation",
ChatPath: (modelName: string) => {
if (modelName.includes("vl") || modelName.includes("omni")) {
return "v1/services/aigc/multimodal-generation/generation";
}
return `v1/services/aigc/text-generation/generation`;
},
};

export const Tencent = {
Expand Down Expand Up @@ -570,6 +575,9 @@ const alibabaModes = [
"qwen-max-0403",
"qwen-max-0107",
"qwen-max-longcontext",
"qwen-omni-turbo",
"qwen-vl-plus",
"qwen-vl-max",
];

const tencentModels = [
Expand Down
24 changes: 21 additions & 3 deletions app/utils/chat.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ import {
UPLOAD_URL,
REQUEST_TIMEOUT_MS,
} from "@/app/constant";
import { RequestMessage } from "@/app/client/api";
import { MultimodalContent, RequestMessage } from "@/app/client/api";
import Locale from "@/app/locales";
import {
EventStreamContentType,
Expand Down Expand Up @@ -70,8 +70,9 @@ export function compressImage(file: Blob, maxSize: number): Promise<string> {
});
}

export async function preProcessImageContent(
export async function preProcessImageContentBase(
content: RequestMessage["content"],
transformImageUrl: (url: string) => Promise<{ [key: string]: any }>,
) {
if (typeof content === "string") {
return content;
Expand All @@ -81,7 +82,7 @@ export async function preProcessImageContent(
if (part?.type == "image_url" && part?.image_url?.url) {
try {
const url = await cacheImageToBase64Image(part?.image_url?.url);
result.push({ type: part.type, image_url: { url } });
result.push(await transformImageUrl(url));
} catch (error) {
console.error("Error processing image URL:", error);
}
Expand All @@ -92,6 +93,23 @@ export async function preProcessImageContent(
return result;
}

export async function preProcessImageContent(
content: RequestMessage["content"],
) {
return preProcessImageContentBase(content, async (url) => ({
type: "image_url",
image_url: { url },
})) as Promise<MultimodalContent[] | string>;
}

export async function preProcessImageContentForAlibabaDashScope(
content: RequestMessage["content"],
) {
return preProcessImageContentBase(content, async (url) => ({
image: url,
}));
}

const imageCaches: Record<string, string> = {};
export function cacheImageToBase64Image(imageUrl: string) {
if (imageUrl.includes(CACHE_URL_PREFIX)) {
Expand Down

0 comments on commit f7cde17

Please sign in to comment.