From e5c716d9fdcee4956a83eab323840357b7673a22 Mon Sep 17 00:00:00 2001 From: Sahil sen Date: Sat, 11 Jan 2025 01:45:47 +0530 Subject: [PATCH] addingadding-streams-ai-agent --- Streams/ai-bot-discord/.env.example | 3 + Streams/ai-bot-discord/actions/getMetrics.js | 30 +++++ .../ai-bot-discord/actions/openaiHelper.js | 46 ++++++++ Streams/ai-bot-discord/main.js | 107 ++++++++++++++++++ Streams/ai-bot-discord/package.json | 18 +++ 5 files changed, 204 insertions(+) create mode 100644 Streams/ai-bot-discord/.env.example create mode 100644 Streams/ai-bot-discord/actions/getMetrics.js create mode 100644 Streams/ai-bot-discord/actions/openaiHelper.js create mode 100644 Streams/ai-bot-discord/main.js create mode 100644 Streams/ai-bot-discord/package.json diff --git a/Streams/ai-bot-discord/.env.example b/Streams/ai-bot-discord/.env.example new file mode 100644 index 0000000..ff523c7 --- /dev/null +++ b/Streams/ai-bot-discord/.env.example @@ -0,0 +1,3 @@ +DISCORD_TOKEN=your_discord_bot_token +OPENAI_API_KEY=openai_api_key +DATABASE_URL=postgres_sql_database_url \ No newline at end of file diff --git a/Streams/ai-bot-discord/actions/getMetrics.js b/Streams/ai-bot-discord/actions/getMetrics.js new file mode 100644 index 0000000..7bbb471 --- /dev/null +++ b/Streams/ai-bot-discord/actions/getMetrics.js @@ -0,0 +1,30 @@ +const { Pool } = require('pg'); + +// Initialize PostgreSQL connection +const pool = new Pool({ + connectionString: process.env.DATABASE_URL, + ssl: { + rejectUnauthorized: false, // Allow self-signed certificates + }, +}); + +module.exports = async (blockNumber) => { + const query = ` + SELECT data + FROM "block-metrics" + WHERE (data->>'blockNumber')::BIGINT = $1 + `; + + try { + const result = await pool.query(query, [blockNumber]); + + if (result.rows.length > 0) { + return result.rows[0].data; // Data is already parsed JSON + } else { + return null; + } + } catch (err) { + console.error("Database query error:", err.message); + throw new Error("Failed to fetch block metrics."); + } +}; \ No newline at end of file diff --git a/Streams/ai-bot-discord/actions/openaiHelper.js b/Streams/ai-bot-discord/actions/openaiHelper.js new file mode 100644 index 0000000..c36d424 --- /dev/null +++ b/Streams/ai-bot-discord/actions/openaiHelper.js @@ -0,0 +1,46 @@ +const axios = require('axios'); + +const openaiEndpoint = 'https://api.openai.com/v1/chat/completions'; + +module.exports = async (prompt) => { + const MAX_RETRIES = 5; // Maximum retries for handling rate limits + let retryDelay = 1000; // Initial retry delay (in milliseconds) + + for (let attempt = 1; attempt <= MAX_RETRIES; attempt++) { + try { + const response = await axios.post( + openaiEndpoint, + { + model: "gpt-4", // Use GPT-4 for higher quality responses + messages: [{ role: "user", content: prompt }], + max_tokens: 200, // Limit the response length to 200 tokens + }, + { + headers: { + Authorization: `Bearer ${process.env.OPENAI_API_KEY}`, + "Content-Type": "application/json", + }, + } + ); + + // Log only rate limit details + console.log("Rate Limit Details:", { + remainingRequests: response.headers['x-ratelimit-remaining-requests'], + resetTime: response.headers['x-ratelimit-reset-requests'], + }); + + return response.data.choices[0].message.content.trim(); + } catch (err) { + if (err.response?.status === 429) { + console.warn(`Rate limit hit. Retrying in ${retryDelay / 1000}s... (Attempt ${attempt}/${MAX_RETRIES})`); + await new Promise((resolve) => setTimeout(resolve, retryDelay)); + retryDelay *= 2; // Increase delay exponentially + } else { + console.error("OpenAI API Error:", err.response?.data || err.message); + throw new Error("Failed to generate response."); + } + } + } + + return "I'm currently experiencing high demand and cannot process your request. Please try again later."; +}; diff --git a/Streams/ai-bot-discord/main.js b/Streams/ai-bot-discord/main.js new file mode 100644 index 0000000..6e4e0ef --- /dev/null +++ b/Streams/ai-bot-discord/main.js @@ -0,0 +1,107 @@ +require('dotenv').config(); +const { Client, GatewayIntentBits } = require('discord.js'); +const getMetrics = require('./actions/getMetrics'); +const openaiHelper = require('./actions/openaiHelper'); + +const client = new Client({ + intents: [ + GatewayIntentBits.Guilds, + GatewayIntentBits.GuildMessages, + GatewayIntentBits.MessageContent, // Required to read message content + ], +}); + +// Thread-specific context storage +const THREAD_CONTEXT = new Map(); + +client.once('ready', () => { + console.log(`Logged in as ${client.user.tag}`); +}); + +client.on('messageCreate', async (message) => { + if (message.author.bot) return; // Ignore bot messages + + // Handle follow-up queries in threads + if (message.channel.isThread()) { + const context = THREAD_CONTEXT.get(message.channel.id); + + if (!context) { + message.channel.send("This thread has no active context. Please start a new query."); + return; + } + + const includeTimestamp = /time|date|when|confirmed|timestamp/i.test(message.content); + const prompt = `You are Michael Scott, the quirky and often inappropriate boss from The Office. + You are answering questions about Ethereum block ${context.blockNumber}. + Here is the known data for block ${context.blockNumber}: + ${JSON.stringify(context.blockData, null, 2)} + User's query: "${message.content}" + Respond as Michael Scott would: provide an accurate answer first, and then add a humorous remark in Michael's style. + ${includeTimestamp ? `Mention the block timestamp (${context.blockData.blockTimestamp}) as part of the response.` : 'Do not mention the block timestamp unless explicitly asked.'} + Keep your response under 150 tokens.`; + + try { + const response = await openaiHelper(prompt); + await message.reply(response); + } catch (error) { + console.error("OpenAI Error:", error.message); + message.reply("Uh-oh, looks like something went wrong. Classic Michael mistake!"); + } + return; + } + + const blockNumberMatch = message.content.match(/block(?:\s*number)?\s*(\d+)/i); + + if (blockNumberMatch) { + const blockNumber = parseInt(blockNumberMatch[1], 10); + + try { + const blockData = await getMetrics(blockNumber); + + if (!blockData) { + message.channel.send(`No data found for block ${blockNumber}. That's what she said!`); + return; + } + + const thread = await message.startThread({ + name: `Block ${blockNumber} Query`, + autoArchiveDuration: 60, + }); + + THREAD_CONTEXT.set(thread.id, { blockNumber, blockData }); + + const includeTimestamp = /time|date|when|confirmed|timestamp/i.test(message.content); + const prompt = `You are Michael Scott, the quirky and often inappropriate boss from The Office. + You are answering questions about Ethereum block ${blockNumber}. + Here is the known data for block ${blockNumber}: + ${JSON.stringify(blockData, null, 2)} + User's query: "${message.content}" + Respond as Michael Scott would: provide an accurate answer first, and then add a humorous remark in Michael's style. + ${includeTimestamp ? `Mention the block timestamp (${blockData.blockTimestamp}) as part of the response.` : 'Do not mention the block timestamp unless explicitly asked.'} + Keep your response under 150 tokens.`; + + const response = await openaiHelper(prompt); + await thread.send(response); + } catch (error) { + console.error("Error:", error.message); + message.channel.send(`I couldn't process your query for block ${blockNumber}.`); + } + } else { + const funnyResponses = [ + "I'm sorry, I can't read your mind. Do you know how many times I've been asked to do that in the office? Just give me a block number!", + "This feels like a setup for 'that's what she said.' Anyway, I need a block number to work with.", + "No block number? That’s okay, I’ll just sit here awkwardly until you give me one.", + "Imagine I'm your assistant... but I need details. Which block are we talking about?", + "You’re lucky I’m not Dwight, or I’d make you fill out a block request form. Just give me the number!" + ]; + const followUp = "Could you please specify the block number you'd like to know about?"; + + message.channel.send(funnyResponses[Math.floor(Math.random() * funnyResponses.length)]); + setTimeout(() => { + message.channel.send(followUp); + }, 2000); + } +}); + +// Login to Discord +client.login(process.env.DISCORD_TOKEN); diff --git a/Streams/ai-bot-discord/package.json b/Streams/ai-bot-discord/package.json new file mode 100644 index 0000000..d72d1a5 --- /dev/null +++ b/Streams/ai-bot-discord/package.json @@ -0,0 +1,18 @@ +{ + "name": "block-metrics-bot", + "version": "1.0.0", + "description": "", + "main": "index.js", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "keywords": [], + "author": "", + "license": "ISC", + "dependencies": { + "discord.js": "^14.17.3", + "dotenv": "^16.4.7", + "openai": "^4.78.0", + "pg": "^8.13.1" + } +}