Skip to content

Commit

Permalink
refactor: simplify code ( part I ) (#3)
Browse files Browse the repository at this point in the history
* refactor: default to ollama llm locally

* refactor: simplify code further

* chore: tauri build ci

* fix: ci

* chore: run ruff linter
  • Loading branch information
cs50victor authored Mar 27, 2024
1 parent 1b9afd2 commit cb8e12d
Show file tree
Hide file tree
Showing 19 changed files with 65 additions and 172 deletions.
7 changes: 5 additions & 2 deletions .github/workflows/ci-rs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@ on:
paths:
- "**/.github/workflows/ci-rs.yml"
- "**/Cargo.lock"
- "**/src/**"
- "**/Cargo.toml"
- "**/src-tauri/**"
- "**/rust-toolchain"
- "**/.taplo.toml"
workflow_dispatch:
Expand Down Expand Up @@ -42,8 +42,11 @@ jobs:
components: rustfmt, clippy
enable-sccache: "true"

- name: Install Tauri
run: cargo install tauri

- name: Build
run: cargo build --release
run: cargo tauri build --release

- name: Test
run: cargo test --release
Expand Down
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -52,3 +52,6 @@ dist-ssr
*.njsproj
*.sln
*.sw?

# python
.cenv
24 changes: 15 additions & 9 deletions core/source/clients/base_device.py → core/mac_device.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,7 @@
import os
import asyncio
import threading
import os
import pyaudio
from starlette.websockets import WebSocket
from queue import Queue
from pynput import keyboard
import json
import traceback
Expand All @@ -23,17 +20,17 @@
import base64
from interpreter import interpreter # Just for code execution. Maybe we should let people do from interpreter.computer import run?
# In the future, I guess kernel watching code should be elsewhere? Somewhere server / client agnostic?
from ..server.utils.kernel import put_kernel_messages_into_queue
from ..server.utils.process_utils import kill_process_tree
from source.server.utils.kernel import put_kernel_messages_into_queue
from source.server.utils.process_utils import kill_process_tree

from ..server.utils.logs import setup_logging
from ..server.utils.logs import logger
from source.server.utils.logs import setup_logging
from source.server.utils.logs import logger
setup_logging()

os.environ["STT_RUNNER"] = "server"
os.environ["TTS_RUNNER"] = "server"

from ..utils.accumulator import Accumulator
from source.utils.accumulator import Accumulator

accumulator = Accumulator()

Expand Down Expand Up @@ -324,4 +321,13 @@ async def start_async(self):
def start(self):
if os.getenv('TEACH_MODE') != "True":
asyncio.run(self.start_async())
p.terminate()
p.terminate()

device = Device()

def run_device(server_url):
device.server_url = server_url
device.start()

if __name__ == "__main__":
run_device()
Empty file removed core/source/clients/__init__.py
Empty file.
Empty file.
10 changes: 0 additions & 10 deletions core/source/clients/mac/device.py

This file was deleted.

3 changes: 0 additions & 3 deletions core/source/server/conftest.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,5 @@
import os
import sys
import pytest
from source.server.i import configure_interpreter
from unittest.mock import Mock
from interpreter import OpenInterpreter
from fastapi.testclient import TestClient
from .server import app
Expand Down
2 changes: 0 additions & 2 deletions core/source/server/i.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,9 @@
from dotenv import load_dotenv
load_dotenv() # take environment variables from .env.

import os
import glob
import time
import json
from pathlib import Path
from interpreter import OpenInterpreter
import shutil

Expand Down
6 changes: 1 addition & 5 deletions core/source/server/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@

import traceback
from platformdirs import user_data_dir
import ast
import json
import queue
import os
Expand All @@ -13,9 +12,7 @@
from fastapi import FastAPI, Request
from fastapi.responses import PlainTextResponse
from starlette.websockets import WebSocket, WebSocketDisconnect
from pathlib import Path
import asyncio
import urllib.parse
from .utils.kernel import put_kernel_messages_into_queue
from .i import configure_interpreter
from interpreter import interpreter
Expand Down Expand Up @@ -352,7 +349,6 @@ def stream_tts(sentence):

from uvicorn import Config, Server
import os
import platform
from importlib import import_module

# these will be overwritten
Expand All @@ -363,7 +359,7 @@ def stream_tts(sentence):
async def startup_event():
server_url = f"{HOST}:{PORT}"
print("")
print_markdown(f"\n*Ready.*\n")
print_markdown("\n*Ready.*\n")
print("")

@app.on_event("shutdown")
Expand Down
2 changes: 0 additions & 2 deletions core/source/server/services/stt/local-whisper/stt.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,6 @@
import ffmpeg
import subprocess

import os
import subprocess


class Stt:
Expand Down
2 changes: 0 additions & 2 deletions core/source/server/services/tts/openai/tts.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,6 @@
import tempfile
from openai import OpenAI
import os
import subprocess
import tempfile

client = OpenAI()

Expand Down
1 change: 0 additions & 1 deletion core/source/server/skills/schedule.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
from pytimeparse import parse
from crontab import CronTab
from uuid import uuid4
from datetime import datetime
from platformdirs import user_data_dir

def schedule(message="", start=None, interval=None) -> None:
Expand Down
Empty file.
41 changes: 0 additions & 41 deletions core/source/server/tests/test_run.py

This file was deleted.

4 changes: 1 addition & 3 deletions core/source/server/tunnel.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,10 @@
import os
import subprocess
import re
import shutil
import time
from ..utils.print_markdown import print_markdown

def create_tunnel(tunnel_method='ngrok', server_host='localhost', server_port=10001):
print_markdown(f"Exposing server to the internet...")
print_markdown("Exposing server to the internet...")

if tunnel_method == "bore":
try:
Expand Down
2 changes: 0 additions & 2 deletions core/source/server/utils/kernel.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,8 @@

import asyncio
import subprocess
import platform

from .logs import setup_logging
from .logs import logger
setup_logging()

def get_kernel_messages():
Expand Down
123 changes: 37 additions & 86 deletions core/source/server/utils/local_mode.py
Original file line number Diff line number Diff line change
@@ -1,102 +1,53 @@
import sys
import os
import platform
import subprocess
import time
import inquirer
from interpreter import interpreter


def select_local_model():

# START OF LOCAL MODEL PROVIDER LOGIC
interpreter.display_message("> 01 is compatible with several local model providers.\n")

# Define the choices for local models
choices = [
"Ollama",
"LM Studio",
# "Jan",
]

# Use inquirer to let the user select an option
questions = [
inquirer.List(
"model",
message="Which one would you like to use?",
choices=choices,
),
]
answers = inquirer.prompt(questions)


selected_model = answers["model"]


if selected_model == "LM Studio":
interpreter.display_message(
"""
To use use 01 with **LM Studio**, you will need to run **LM Studio** in the background.
1. Download **LM Studio** from [https://lmstudio.ai/](https://lmstudio.ai/), then start it.
2. Select a language model then click **Download**.
3. Click the **<->** button on the left (below the chat button).
4. Select your model at the top, then click **Start Server**.
Once the server is running, you can begin your conversation below.
"""
)
time.sleep(1)
selected_model = "Ollama"
try:

interpreter.llm.api_base = "http://localhost:1234/v1"
interpreter.llm.max_tokens = 1000
interpreter.llm.context_window = 8000
interpreter.llm.api_key = "x"

elif selected_model == "Ollama":
try:

# List out all downloaded ollama models. Will fail if ollama isn't installed
result = subprocess.run(["ollama", "list"], capture_output=True, text=True, check=True)
lines = result.stdout.split('\n')
names = [line.split()[0].replace(":latest", "") for line in lines[1:] if line.strip()] # Extract names, trim out ":latest", skip header

# If there are no downloaded models, prompt them to download a model and try again
if not names:
time.sleep(1)

interpreter.display_message(f"\nYou don't have any Ollama models downloaded. To download a new model, run `ollama run <model-name>`, then start a new 01 session. \n\n For a full list of downloadable models, check out [https://ollama.com/library](https://ollama.com/library) \n")

print("Please download a model then try again\n")
time.sleep(2)
sys.exit(1)
# List out all downloaded ollama models. Will fail if ollama isn't installed
result = subprocess.run(["ollama", "list"], capture_output=True, text=True, check=True)
lines = result.stdout.split('\n')
names = [line.split()[0].replace(":latest", "") for line in lines[1:] if line.strip()] # Extract names, trim out ":latest", skip header

# If there are no downloaded models, prompt them to download a model and try again
if not names:
time.sleep(1)

# If there are models, prompt them to select one
else:
time.sleep(1)
interpreter.display_message(f"**{len(names)} Ollama model{'s' if len(names) != 1 else ''} found.** To download a new model, run `ollama run <model-name>`, then start a new 01 session. \n\n For a full list of downloadable models, check out [https://ollama.com/library](https://ollama.com/library) \n")

# Create a new inquirer selection from the names
name_question = [
inquirer.List('name', message="Select a downloaded Ollama model", choices=names),
]
name_answer = inquirer.prompt(name_question)
selected_name = name_answer['name'] if name_answer else None

# Set the model to the selected model
interpreter.llm.model = f"ollama/{selected_name}"
interpreter.display_message(f"\nUsing Ollama model: `{selected_name}` \n")
time.sleep(1)
interpreter.display_message("\nYou don't have any Ollama models downloaded. To download a new model, run `ollama run <model-name>`, then start a new 01 session. \n\n For a full list of downloadable models, check out [https://ollama.com/library](https://ollama.com/library) \n")

# If Ollama is not installed or not recognized as a command, prompt the user to download Ollama and try again
except (subprocess.CalledProcessError, FileNotFoundError) as e:
print("Ollama is not installed or not recognized as a command.")
time.sleep(1)
interpreter.display_message(f"\nPlease visit [https://ollama.com/](https://ollama.com/) to download Ollama and try again\n")
print("Please download a model then try again\n")
time.sleep(2)
sys.exit(1)

# If there are models, prompt them to select one
else:
time.sleep(1)
interpreter.display_message(f"**{len(names)} Ollama model{'s' if len(names) != 1 else ''} found.** To download a new model, run `ollama run <model-name>`, then start a new 01 session. \n\n For a full list of downloadable models, check out [https://ollama.com/library](https://ollama.com/library) \n")

# Create a new inquirer selection from the names
name_question = [
inquirer.List('name', message="Select a downloaded Ollama model", choices=names),
]
name_answer = inquirer.prompt(name_question)
selected_name = name_answer['name'] if name_answer else None

# Set the model to the selected model
interpreter.llm.model = "ollama/mistral"
interpreter.display_message(f"\nUsing Ollama model: `{selected_name}` \n")
time.sleep(1)

# If Ollama is not installed or not recognized as a command, prompt the user to download Ollama and try again
except (subprocess.CalledProcessError, FileNotFoundError):
print("Ollama is not installed or not recognized as a command.")
time.sleep(1)
interpreter.display_message("\nPlease visit [https://ollama.com/](https://ollama.com/) to download Ollama and try again\n")
time.sleep(2)
sys.exit(1)

# elif selected_model == "Jan":
# interpreter.display_message(
Expand Down
2 changes: 1 addition & 1 deletion core/source/server/utils/process_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,4 +25,4 @@ def kill_process_tree():
except psutil.NoSuchProcess:
print(f"Process {pid} does not exist or is already terminated")
except psutil.AccessDenied:
print(f"Permission denied to terminate some processes")
print("Permission denied to terminate some processes")
Loading

0 comments on commit cb8e12d

Please sign in to comment.