Skip to content

Commit

Permalink
addingadding-streams-ai-agent
Browse files Browse the repository at this point in the history
  • Loading branch information
Sahilsen committed Jan 10, 2025
1 parent c0742d2 commit e5c716d
Show file tree
Hide file tree
Showing 5 changed files with 204 additions and 0 deletions.
3 changes: 3 additions & 0 deletions Streams/ai-bot-discord/.env.example
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
DISCORD_TOKEN=your_discord_bot_token
OPENAI_API_KEY=openai_api_key
DATABASE_URL=postgres_sql_database_url
30 changes: 30 additions & 0 deletions Streams/ai-bot-discord/actions/getMetrics.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
const { Pool } = require('pg');

// Initialize PostgreSQL connection
const pool = new Pool({
connectionString: process.env.DATABASE_URL,
ssl: {
rejectUnauthorized: false, // Allow self-signed certificates
},
});

module.exports = async (blockNumber) => {
const query = `
SELECT data
FROM "block-metrics"
WHERE (data->>'blockNumber')::BIGINT = $1
`;

try {
const result = await pool.query(query, [blockNumber]);

if (result.rows.length > 0) {
return result.rows[0].data; // Data is already parsed JSON
} else {
return null;
}
} catch (err) {
console.error("Database query error:", err.message);
throw new Error("Failed to fetch block metrics.");
}
};
46 changes: 46 additions & 0 deletions Streams/ai-bot-discord/actions/openaiHelper.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
const axios = require('axios');

const openaiEndpoint = 'https://api.openai.com/v1/chat/completions';

module.exports = async (prompt) => {
const MAX_RETRIES = 5; // Maximum retries for handling rate limits
let retryDelay = 1000; // Initial retry delay (in milliseconds)

for (let attempt = 1; attempt <= MAX_RETRIES; attempt++) {
try {
const response = await axios.post(
openaiEndpoint,
{
model: "gpt-4", // Use GPT-4 for higher quality responses
messages: [{ role: "user", content: prompt }],
max_tokens: 200, // Limit the response length to 200 tokens
},
{
headers: {
Authorization: `Bearer ${process.env.OPENAI_API_KEY}`,
"Content-Type": "application/json",
},
}
);

// Log only rate limit details
console.log("Rate Limit Details:", {
remainingRequests: response.headers['x-ratelimit-remaining-requests'],
resetTime: response.headers['x-ratelimit-reset-requests'],
});

return response.data.choices[0].message.content.trim();
} catch (err) {
if (err.response?.status === 429) {
console.warn(`Rate limit hit. Retrying in ${retryDelay / 1000}s... (Attempt ${attempt}/${MAX_RETRIES})`);
await new Promise((resolve) => setTimeout(resolve, retryDelay));
retryDelay *= 2; // Increase delay exponentially
} else {
console.error("OpenAI API Error:", err.response?.data || err.message);
throw new Error("Failed to generate response.");
}
}
}

return "I'm currently experiencing high demand and cannot process your request. Please try again later.";
};
107 changes: 107 additions & 0 deletions Streams/ai-bot-discord/main.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,107 @@
require('dotenv').config();
const { Client, GatewayIntentBits } = require('discord.js');
const getMetrics = require('./actions/getMetrics');
const openaiHelper = require('./actions/openaiHelper');

const client = new Client({
intents: [
GatewayIntentBits.Guilds,
GatewayIntentBits.GuildMessages,
GatewayIntentBits.MessageContent, // Required to read message content
],
});

// Thread-specific context storage
const THREAD_CONTEXT = new Map();

client.once('ready', () => {
console.log(`Logged in as ${client.user.tag}`);
});

client.on('messageCreate', async (message) => {
if (message.author.bot) return; // Ignore bot messages

// Handle follow-up queries in threads
if (message.channel.isThread()) {
const context = THREAD_CONTEXT.get(message.channel.id);

if (!context) {
message.channel.send("This thread has no active context. Please start a new query.");
return;
}

const includeTimestamp = /time|date|when|confirmed|timestamp/i.test(message.content);
const prompt = `You are Michael Scott, the quirky and often inappropriate boss from The Office.
You are answering questions about Ethereum block ${context.blockNumber}.
Here is the known data for block ${context.blockNumber}:
${JSON.stringify(context.blockData, null, 2)}
User's query: "${message.content}"
Respond as Michael Scott would: provide an accurate answer first, and then add a humorous remark in Michael's style.
${includeTimestamp ? `Mention the block timestamp (${context.blockData.blockTimestamp}) as part of the response.` : 'Do not mention the block timestamp unless explicitly asked.'}
Keep your response under 150 tokens.`;

try {
const response = await openaiHelper(prompt);
await message.reply(response);
} catch (error) {
console.error("OpenAI Error:", error.message);
message.reply("Uh-oh, looks like something went wrong. Classic Michael mistake!");
}
return;
}

const blockNumberMatch = message.content.match(/block(?:\s*number)?\s*(\d+)/i);

if (blockNumberMatch) {
const blockNumber = parseInt(blockNumberMatch[1], 10);

try {
const blockData = await getMetrics(blockNumber);

if (!blockData) {
message.channel.send(`No data found for block ${blockNumber}. That's what she said!`);
return;
}

const thread = await message.startThread({
name: `Block ${blockNumber} Query`,
autoArchiveDuration: 60,
});

THREAD_CONTEXT.set(thread.id, { blockNumber, blockData });

const includeTimestamp = /time|date|when|confirmed|timestamp/i.test(message.content);
const prompt = `You are Michael Scott, the quirky and often inappropriate boss from The Office.
You are answering questions about Ethereum block ${blockNumber}.
Here is the known data for block ${blockNumber}:
${JSON.stringify(blockData, null, 2)}
User's query: "${message.content}"
Respond as Michael Scott would: provide an accurate answer first, and then add a humorous remark in Michael's style.
${includeTimestamp ? `Mention the block timestamp (${blockData.blockTimestamp}) as part of the response.` : 'Do not mention the block timestamp unless explicitly asked.'}
Keep your response under 150 tokens.`;

const response = await openaiHelper(prompt);
await thread.send(response);
} catch (error) {
console.error("Error:", error.message);
message.channel.send(`I couldn't process your query for block ${blockNumber}.`);
}
} else {
const funnyResponses = [
"I'm sorry, I can't read your mind. Do you know how many times I've been asked to do that in the office? Just give me a block number!",
"This feels like a setup for 'that's what she said.' Anyway, I need a block number to work with.",
"No block number? That’s okay, I’ll just sit here awkwardly until you give me one.",
"Imagine I'm your assistant... but I need details. Which block are we talking about?",
"You’re lucky I’m not Dwight, or I’d make you fill out a block request form. Just give me the number!"
];
const followUp = "Could you please specify the block number you'd like to know about?";

message.channel.send(funnyResponses[Math.floor(Math.random() * funnyResponses.length)]);
setTimeout(() => {
message.channel.send(followUp);
}, 2000);
}
});

// Login to Discord
client.login(process.env.DISCORD_TOKEN);
18 changes: 18 additions & 0 deletions Streams/ai-bot-discord/package.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
{
"name": "block-metrics-bot",
"version": "1.0.0",
"description": "",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"keywords": [],
"author": "",
"license": "ISC",
"dependencies": {
"discord.js": "^14.17.3",
"dotenv": "^16.4.7",
"openai": "^4.78.0",
"pg": "^8.13.1"
}
}

0 comments on commit e5c716d

Please sign in to comment.