diff --git a/README.md b/README.md
index d4140ac..711e32f 100644
--- a/README.md
+++ b/README.md
@@ -15,6 +15,7 @@ Your texting data never leaves your computer. We are proudly open-source for thi
- 🔍 filter by a word, friend, or time range
- 💯 sentiment analysis
- 🎁 "Your Year in Text" experience a.k.a iMessage Wrapped
+- 💬 Chat with an LLM to query your data
## Download Left on Read for Mac
diff --git a/app/README.md b/app/README.md
index 58aabb0..a9357bf 100644
--- a/app/README.md
+++ b/app/README.md
@@ -116,9 +116,25 @@ You most likely have an old electron process running. If this is warning, you ca
If you are getting a "Cannot find module" error, you likely forgot to install the packages. Be sure to run `yarn` in the `app/` directory.
-
## Support
+
+## LLM Chat (first go)
+
+How it works:
+
+1. User inputs a natural language query (How many texts did I send to Mom?)
+2. This is passed to LLM to turn it into a SQL Query to run on core_main_table TODO: - give the LLM more of the DB schema
+3. Execute the generated query and synthesize query result + original message/query with the LLM to output readable response to the user
+
+Most of the backend logic happens in app/src/analysis/queries/RAGEngine.ts
+
+Notes:
+
+- Currently does not account for messages in group chat vs. not in a group chat, so numbers are misleading.
+- Must use the exact spelling of your contacts' name as it appears in the address book.
+- Requires an OpenAI API Key and internet access
+- Errors (including but not limited to internet connectivity, malformed SQL queries generated by LLM, etc.) are not all handled gracefully (some are OK, like invalid API key)
diff --git a/app/package.json b/app/package.json
index c485fea..6236091 100644
--- a/app/package.json
+++ b/app/package.json
@@ -142,6 +142,7 @@
"node-machine-id": "^1.1.12",
"node-schedule": "^2.1.0",
"nodemailer": "^6.7.7",
+ "openai": "^4.28.4",
"path-browserify": "^1.0.1",
"react": "^18.1.0",
"react-chartjs-2": "^4.3.1",
diff --git a/app/src/analysis/queries/RagEngine.ts b/app/src/analysis/queries/RagEngine.ts
new file mode 100644
index 0000000..fd2ad7a
--- /dev/null
+++ b/app/src/analysis/queries/RagEngine.ts
@@ -0,0 +1,116 @@
+import OpenAI from 'openai';
+import * as sqlite3 from 'sqlite3';
+
+import * as sqlite3Wrapper from '../../utils/sqliteWrapper';
+
+// A test function for me to understand the altered DB schema better :)
+export async function printDBTableNames(
+ db: sqlite3.Database
+): Promise {
+ const q = `
+ SELECT
+ name
+ FROM
+ sqlite_master
+ WHERE
+ type='table'
+ ORDER BY
+ name
+ `;
+ return sqlite3Wrapper.allP(db, q);
+}
+
+// Put together a really hacky RAG pipeline...
+export async function queryRAGEngine(
+ db: sqlite3.Database,
+ message: string,
+ key: string
+): Promise {
+ const openai = new OpenAI({
+ apiKey: key,
+ });
+
+ let q: string | null = null;
+
+ let prompt = `
+ Write a query for a table called core_main_table with this schema:
+ contact_name,
+ text (which has the message's text),
+ date (a unix timestamp number in nanoseconds when the message was sent),
+ is_from_me (a boolean indicating if I was the sender of the message)
+
+ to answer the following query: ${message}
+ Please respond with only the raw unformatted SQL and no other text. If this is not possible, or it's hard to get a concrete result based on the schema, return 'Not Possible'
+ `;
+
+ try {
+ const response = await openai.chat.completions.create({
+ model: 'gpt-3.5-turbo', // Change the model as per your requirement
+ messages: [{ role: 'system', content: prompt }],
+ temperature: 0.7,
+ max_tokens: 150,
+ });
+ q = response.choices[0].message.content;
+ console.log(response.choices[0]);
+ } catch (error) {
+ console.error(error);
+ return new Promise((resolve, reject) => {
+ resolve('An error occurred. Check your API key and try a new message.');
+ });
+ }
+
+ const query = `
+ SELECT COUNT(*) AS message_count
+ FROM core_main_table
+ WHERE LOWER(contact_name) = LOWER('${message}');
+ `;
+
+ const queryResult = await sqlite3Wrapper.allP(db, q ?? query);
+
+ function isObject(value: any): value is Record {
+ return (
+ typeof value === 'object' &&
+ value !== null &&
+ !Array.isArray(value) &&
+ !(value instanceof Date)
+ );
+ }
+ if (!isObject(queryResult[0])) {
+ console.log(queryResult[0]);
+ }
+ const resultString = JSON.stringify(queryResult[0]);
+ // Sanity check so you don't use don't accidentally use too many tokens...
+ if (resultString.length > 10000) {
+ return new Promise((resolve, reject) => {
+ resolve('An error occurred. Try a new message.');
+ });
+ }
+
+ prompt = `
+ Given this message from a user: ${message},
+ this corresponding generated query over a database: ${query},
+ and this result of the query ${resultString}:
+ interpret the result of the query in plain english as a response to the initial message.
+ `;
+
+ let result = '';
+ try {
+ const response = await openai.chat.completions.create({
+ model: 'gpt-3.5-turbo', // Change the model as per your requirement
+ messages: [{ role: 'system', content: prompt }],
+ temperature: 0.7,
+ max_tokens: 150,
+ });
+ result = response.choices[0].message.content ?? 'An error occurred';
+ console.log(response.choices[0]);
+ } catch (error) {
+ console.error(error);
+ return new Promise((resolve, reject) => {
+ resolve('An error occurred. Check your API key and try a new message.');
+ });
+ }
+
+ return new Promise((resolve, reject) => {
+ resolve(result); // Resolve the promise with a string value
+ });
+}
diff --git a/app/src/components/Dashboard/ChatInterface.tsx b/app/src/components/Dashboard/ChatInterface.tsx
new file mode 100644
index 0000000..d764d5c
--- /dev/null
+++ b/app/src/components/Dashboard/ChatInterface.tsx
@@ -0,0 +1,114 @@
+import { Box, Button, Flex, Input, Text } from '@chakra-ui/react';
+import { ipcRenderer } from 'electron';
+import React, { useEffect, useRef, useState } from 'react';
+
+interface Message {
+ text: string;
+ sender: 'user' | 'bot';
+}
+
+const initialBotMessage: Message = {
+ text: 'Hi there :) You can ask me questions here about your iMessages! For example, try "Who is my best friend?"',
+ sender: 'bot',
+};
+
+interface ChatInterfaceProps {
+ openAIKey: string;
+}
+
+export function ChatInterface(props: ChatInterfaceProps) {
+ const { openAIKey } = props;
+
+ const [messages, setMessages] = useState([initialBotMessage]);
+ const [newMessage, setNewMessage] = useState('');
+
+ const [awaitingResponse, setAwaitingResponse] = useState(false);
+
+ const messagesContainerRef = useRef(null);
+
+ const handleMessageChange = (e: React.ChangeEvent) => {
+ setNewMessage(e.target.value);
+ };
+
+ const handleSendMessage = async () => {
+ if (newMessage.trim()) {
+ setMessages([...messages, { text: newMessage, sender: 'user' }]);
+ setNewMessage('');
+ setAwaitingResponse(true);
+
+ const llmResponse: string = await ipcRenderer.invoke(
+ 'rag-engine',
+ newMessage,
+ openAIKey
+ );
+
+ setMessages((prevMessages) => [
+ ...prevMessages,
+ { text: llmResponse, sender: 'bot' },
+ ]);
+ setAwaitingResponse(false);
+ }
+ };
+
+ useEffect(() => {
+ if (messagesContainerRef.current) {
+ messagesContainerRef.current.scrollTop =
+ messagesContainerRef.current.scrollHeight;
+ }
+ }, [messages]);
+
+ return (
+
+
+ {messages.map((message, index) => (
+
+
+ {message.text}
+
+
+ ))}
+
+
+
+
+
+
+ );
+}
+
+export default ChatInterface;
diff --git a/app/src/components/Dashboard/ChatPage.tsx b/app/src/components/Dashboard/ChatPage.tsx
new file mode 100644
index 0000000..183fe3f
--- /dev/null
+++ b/app/src/components/Dashboard/ChatPage.tsx
@@ -0,0 +1,240 @@
+import {
+ AlertDialog,
+ AlertDialogBody,
+ AlertDialogContent,
+ AlertDialogFooter,
+ AlertDialogHeader,
+ AlertDialogOverlay,
+ Box,
+ Button,
+ Icon,
+ Input,
+ Text,
+ theme as defaultTheme,
+} from '@chakra-ui/react';
+import { ipcRenderer } from 'electron';
+import log from 'electron-log';
+import { useEffect, useRef, useState } from 'react';
+import { FiAlertCircle } from 'react-icons/fi';
+import { IoChatbubblesOutline } from 'react-icons/io5';
+
+import { logEvent } from '../../utils/analytics';
+import { Footer } from '../Footer';
+import { ChatInterface } from './ChatInterface';
+
+export function ChatPage({ onRefresh }: { onRefresh: () => void }) {
+ const [doesRequireRefresh, setDoesRequireRefresh] = useState(false);
+ const [showUpdateAvailable, setShowUpdateAvailable] =
+ useState(false);
+
+ const [openAIKey, setOpenAIKey] = useState('');
+
+ const handleKeyChange = (e: React.ChangeEvent) => {
+ setOpenAIKey(e.target.value);
+ };
+
+ const cancelRef = useRef();
+
+ useEffect(() => {
+ const checkRequiresRefresh = async () => {
+ let requiresRefresh = false;
+ try {
+ requiresRefresh = await ipcRenderer.invoke('check-requires-refresh');
+ } catch (e) {
+ log.error(e);
+ }
+
+ if (requiresRefresh) {
+ setDoesRequireRefresh(requiresRefresh);
+ }
+ };
+
+ checkRequiresRefresh();
+ }, []);
+
+ useEffect(() => {
+ logEvent({
+ eventName: 'LOADED_DASHBOARD',
+ });
+ }, []);
+
+ useEffect(() => {
+ ipcRenderer.send('listen-to-updates');
+
+ ipcRenderer.on('update-available', () => {
+ setShowUpdateAvailable(true);
+ });
+ }, []);
+
+ return (
+
+
+
+
+
+
+
+
+
+ LLM Chat
+
+
+
+
+
+ Ask a chatbot questions about your iMessages
+
+