Skip to content

Commit

Permalink
Add new endpoint for HackerGPT v2 plugin integration (#224)
Browse files Browse the repository at this point in the history
  • Loading branch information
RostyslavManko authored Feb 27, 2024
1 parent 38feffd commit 6642984
Show file tree
Hide file tree
Showing 9 changed files with 325 additions and 24 deletions.
285 changes: 285 additions & 0 deletions pages/api/chat-2v.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,285 @@
import { OpenAIError, OpenAIStream } from '@/pages/api/openaistream';
import { ChatBody, Message } from '@/types/chat';
import { ToolID } from '@/types/tool';

// @ts-expect-error
import wasm from '../../node_modules/@dqbd/tiktoken/lite/tiktoken_bg.wasm?module';

import tiktokenModel from '@dqbd/tiktoken/encoders/cl100k_base.json';
import { Tiktoken, init } from '@dqbd/tiktoken/lite/init';

import {
fetchGoogleSearchResults,
processGoogleResults,
createAnswerPromptGoogle,
} from '@/pages/api/chat/plugins/googlesearch';

import {
toolUrls,
toolIdToHandlerMapping,
isCommand,
handleCommand,
isToolsCommand,
displayToolsHelpGuide,
} from '@/pages/api/chat/plugins/tools';

export const config = {
runtime: 'edge',
};

export const corsHeaders = {
'Access-Control-Allow-Origin': 'https://www.hackergpt.chat',
'Access-Control-Allow-Methods': 'POST',
'Access-Control-Allow-Headers': 'Content-Type, Authorization',
};

enum ModelType {
GPT35TurboInstruct = 'gpt-3.5-turbo-instruct',
GPT4 = 'gpt-4',
}

const getTokenLimit = (model: string) => {
switch (model) {
case ModelType.GPT35TurboInstruct:
return 6000;
case ModelType.GPT4:
return 12000;
default:
return null;
}
};

const handler = async (req: Request): Promise<Response> => {
try {
const useWebBrowsingPlugin = process.env.USE_WEB_BROWSING_PLUGIN === 'TRUE';

const authToken = req.headers.get('Authorization');

if (authToken !== process.env.SECRET_AUTH_PLUGINS_HACKERGPT_V2) {
return new Response('Unauthorized', {
status: 401,
headers: corsHeaders,
});
}

let { messages, model, max_tokens, temperature, stream, toolId } =
(await req.json()) as ChatBody;

let answerMessage: Message = { role: 'user', content: '' };

max_tokens = max_tokens || 1000;
stream = stream || true;

const defaultTemperature = process.env.HACKERGPT_MODEL_TEMPERATURE
? parseFloat(process.env.HACKERGPT_MODEL_TEMPERATURE)
: 0.4;
temperature = temperature ?? defaultTemperature;

const tokenLimit = getTokenLimit(model);

if (!tokenLimit) {
return new Response('Error: Model not found', {
status: 400,
headers: corsHeaders,
});
}

let reservedTokens = 2000;

const MIN_LAST_MESSAGE_LENGTH = parseInt(
process.env.MIN_LAST_MESSAGE_LENGTH || '50',
10,
);
const MAX_LAST_MESSAGE_LENGTH = parseInt(
process.env.MAX_LAST_MESSAGE_LENGTH || '1000',
10,
);

const lastMessageContent = messages[messages.length - 1].content;

if (
model === ModelType.GPT35TurboInstruct &&
(lastMessageContent.length < MIN_LAST_MESSAGE_LENGTH ||
lastMessageContent.length > MAX_LAST_MESSAGE_LENGTH)
) {
reservedTokens = 3500;
}

await init((imports) => WebAssembly.instantiate(wasm, imports));
const encoding = new Tiktoken(
tiktokenModel.bpe_ranks,
tiktokenModel.special_tokens,
tiktokenModel.pat_str,
);

const promptToSend = () => {
return process.env.SECRET_OPENAI_SYSTEM_PROMPT || null;
};

const prompt_tokens = encoding.encode(promptToSend()!);
let tokenCount = prompt_tokens.length;

const lastMessage = messages[messages.length - 1];
const lastMessageTokens = encoding.encode(lastMessage.content);

if (lastMessageTokens.length + reservedTokens > tokenLimit) {
const errorMessage = `This message exceeds the model's maximum token limit of ${tokenLimit}. Please shorten your message.`;
return new Response(errorMessage, { headers: corsHeaders });
}

tokenCount += lastMessageTokens.length;

let messagesToSend: Message[] = [lastMessage];

for (let i = messages.length - 2; i >= 0; i--) {
const message = messages[i];
const tokens = encoding.encode(message.content);

if (i !== messages.length - 1) {
if (tokenCount + tokens.length + reservedTokens <= tokenLimit) {
tokenCount += tokens.length;
messagesToSend.unshift(message);
} else {
break;
}
}
}

if (toolId === ToolID.WEBSEARCH && lastMessage.role === 'user') {
messagesToSend.pop();
}

const skipFirebaseStatusCheck = 'TRUE';

let userStatusOk = true;

if (!skipFirebaseStatusCheck) {
const response = await fetch(
`${process.env.SECRET_CHECK_USER_STATUS_FIREBASE_FUNCTION_URL}`,
{
method: 'POST',
headers: {
Authorization: `${authToken}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: model,
}),
},
);

userStatusOk = response.ok;

if (!response.ok) {
const errorText = await response.text();
return new Response(errorText, { headers: corsHeaders });
}
}

if (userStatusOk && toolId === ToolID.WEBSEARCH) {
if (!useWebBrowsingPlugin) {
return new Response(
'The Web Browsing Plugin is disabled. To enable it, please configure the necessary environment variables.',
{ status: 200, headers: corsHeaders },
);
}

const query = lastMessage.content.trim();
const googleData = await fetchGoogleSearchResults(query);
const sourceTexts = await processGoogleResults(
googleData,
tokenLimit,
tokenCount,
);

const answerPrompt = createAnswerPromptGoogle(query, sourceTexts);
answerMessage.content = answerPrompt;
}

encoding.free();

if (userStatusOk) {
let invokedByToolId = false;

if (lastMessage.content.startsWith('/')) {
if (isToolsCommand(lastMessage.content)) {
return new Response(displayToolsHelpGuide(toolUrls), {
status: 200,
headers: corsHeaders,
});
}

const tools = Object.keys(toolUrls);
for (const tool of tools) {
if (isCommand(tool.toLowerCase(), lastMessage.content)) {
if (
model !== ModelType.GPT4 &&
tool.toLowerCase() !== 'tools' &&
tool.toLowerCase() !== 'subfinder' &&
tool.toLowerCase() !== 'alterx' &&
tool.toLowerCase() !== 'gau'
) {
const toolUrl = toolUrls[tool];
return new Response(
`You can access [${tool}](${toolUrl}) only with GPT-4.`,
{ status: 200, headers: corsHeaders },
);
}
return await handleCommand(
tool.toLowerCase(),
lastMessage,
model,
messagesToSend,
answerMessage,
authToken,
);
}
}
} else if (toolId && toolIdToHandlerMapping.hasOwnProperty(toolId)) {
invokedByToolId = true;

const toolHandler = toolIdToHandlerMapping[toolId];
const response = await toolHandler(
lastMessage,
corsHeaders,
process.env[`ENABLE_${toolId.toUpperCase()}_FEATURE`] === 'TRUE',
OpenAIStream,
model,
messagesToSend,
answerMessage,
authToken,
invokedByToolId,
);

return response;
}

let streamResult;

return new Response(streamResult, {
headers: corsHeaders,
});
} else {
return new Response('An unexpected error occurred', {
status: 500,
headers: corsHeaders,
});
}
} catch (error) {
console.error('An error occurred:', error);
if (error instanceof OpenAIError) {
return new Response('OpenAI Error', {
status: 500,
statusText: error.message,
headers: corsHeaders,
});
} else {
return new Response('Internal Server Error', {
status: 500,
headers: corsHeaders,
});
}
}
};

export default handler;
8 changes: 5 additions & 3 deletions pages/api/chat/plugins/alterx/alterx.content.ts
Original file line number Diff line number Diff line change
Expand Up @@ -213,10 +213,12 @@ export async function handleAlterxRequest(
return new Response(params.error, { status: 200, headers: corsHeaders });
}

const rateLimitCheck = await checkToolRateLimit(authToken, toolId);
if (authToken !== process.env.SECRET_AUTH_PLUGINS_HACKERGPT_V2) {
const rateLimitCheck = await checkToolRateLimit(authToken, toolId);

if (rateLimitCheck.isRateLimited) {
return rateLimitCheck.response;
if (rateLimitCheck.isRateLimited) {
return rateLimitCheck.response;
}
}

let alterxUrl = `${process.env.SECRET_GKE_PLUGINS_BASE_URL}/api/chat/plugins/alterx?`;
Expand Down
8 changes: 5 additions & 3 deletions pages/api/chat/plugins/cyberchef/cyberchef.content.ts
Original file line number Diff line number Diff line change
Expand Up @@ -222,10 +222,12 @@ export async function handleCyberChefRequest(
}

const toolId = 'cyberchef';
const rateLimitCheck = await checkToolRateLimit(authToken, toolId);
if (authToken !== process.env.SECRET_AUTH_PLUGINS_HACKERGPT_V2) {
const rateLimitCheck = await checkToolRateLimit(authToken, toolId);

if (rateLimitCheck.isRateLimited) {
return rateLimitCheck.response;
if (rateLimitCheck.isRateLimited) {
return rateLimitCheck.response;
}
}

let cyberchefUrl = `${process.env.SECRET_CYBERCHEF_BASE_URL}`;
Expand Down
8 changes: 5 additions & 3 deletions pages/api/chat/plugins/gau/gau.content.ts
Original file line number Diff line number Diff line change
Expand Up @@ -271,10 +271,12 @@ export async function handleGauRequest(
return new Response(params.error, { status: 200, headers: corsHeaders });
}

const rateLimitCheck = await checkToolRateLimit(authToken, toolId);
if (authToken !== process.env.SECRET_AUTH_PLUGINS_HACKERGPT_V2) {
const rateLimitCheck = await checkToolRateLimit(authToken, toolId);

if (rateLimitCheck.isRateLimited) {
return rateLimitCheck.response;
if (rateLimitCheck.isRateLimited) {
return rateLimitCheck.response;
}
}

let gauUrl = `${process.env.SECRET_GKE_PLUGINS_BASE_URL}/api/chat/plugins/gau?`;
Expand Down
8 changes: 5 additions & 3 deletions pages/api/chat/plugins/httpx/httpx.content.ts
Original file line number Diff line number Diff line change
Expand Up @@ -769,10 +769,12 @@ export async function handleHttpxRequest(
}

const toolId = 'httpx';
const rateLimitCheck = await checkToolRateLimit(authToken, toolId);
if (authToken !== process.env.SECRET_AUTH_PLUGINS_HACKERGPT_V2) {
const rateLimitCheck = await checkToolRateLimit(authToken, toolId);

if (rateLimitCheck.isRateLimited) {
return rateLimitCheck.response;
if (rateLimitCheck.isRateLimited) {
return rateLimitCheck.response;
}
}

let httpxUrl = `${process.env.SECRET_GKE_PLUGINS_BASE_URL}/api/chat/plugins/httpx`;
Expand Down
8 changes: 5 additions & 3 deletions pages/api/chat/plugins/katana/katana.content.ts
Original file line number Diff line number Diff line change
Expand Up @@ -427,10 +427,12 @@ export async function handleKatanaRequest(
}

const toolId = 'katana';
const rateLimitCheck = await checkToolRateLimit(authToken, toolId);
if (authToken !== process.env.SECRET_AUTH_PLUGINS_HACKERGPT_V2) {
const rateLimitCheck = await checkToolRateLimit(authToken, toolId);

if (rateLimitCheck.isRateLimited) {
return rateLimitCheck.response;
if (rateLimitCheck.isRateLimited) {
return rateLimitCheck.response;
}
}

let katanaUrl = `${process.env.SECRET_GKE_PLUGINS_BASE_URL}/api/chat/plugins/katana`;
Expand Down
8 changes: 5 additions & 3 deletions pages/api/chat/plugins/naabu/naabu.content.ts
Original file line number Diff line number Diff line change
Expand Up @@ -356,10 +356,12 @@ export async function handleNaabuRequest(
}

const toolId = 'naabu';
const rateLimitCheck = await checkToolRateLimit(authToken, toolId);
if (authToken !== process.env.SECRET_AUTH_PLUGINS_HACKERGPT_V2) {
const rateLimitCheck = await checkToolRateLimit(authToken, toolId);

if (rateLimitCheck.isRateLimited) {
return rateLimitCheck.response;
if (rateLimitCheck.isRateLimited) {
return rateLimitCheck.response;
}
}

let naabuUrl = `${process.env.SECRET_GKE_PLUGINS_BASE_URL}/api/chat/plugins/naabu`;
Expand Down
8 changes: 5 additions & 3 deletions pages/api/chat/plugins/nuclei/nuclei.content.ts
Original file line number Diff line number Diff line change
Expand Up @@ -855,10 +855,12 @@ export async function handleNucleiRequest(
}

const toolId = 'nuclei';
const rateLimitCheck = await checkToolRateLimit(authToken, toolId);
if (authToken !== process.env.SECRET_AUTH_PLUGINS_HACKERGPT_V2) {
const rateLimitCheck = await checkToolRateLimit(authToken, toolId);

if (rateLimitCheck.isRateLimited) {
return rateLimitCheck.response;
if (rateLimitCheck.isRateLimited) {
return rateLimitCheck.response;
}
}

let nucleiUrl = `${process.env.SECRET_GKE_PLUGINS_BASE_URL}/api/chat/plugins/nuclei`;
Expand Down
Loading

0 comments on commit 6642984

Please sign in to comment.