diff --git a/.gitignore b/.gitignore
new file mode 100644
index 000000000..485dee64b
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1 @@
+.idea
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..18c914718
--- /dev/null
+++ b/CODE_OF_CONDUCT.md
@@ -0,0 +1,128 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+We as members, contributors, and leaders pledge to make participation in our
+community a harassment-free experience for everyone, regardless of age, body
+size, visible or invisible disability, ethnicity, sex characteristics, gender
+identity and expression, level of experience, education, socio-economic status,
+nationality, personal appearance, race, religion, or sexual identity
+and orientation.
+
+We pledge to act and interact in ways that contribute to an open, welcoming,
+diverse, inclusive, and healthy community.
+
+## Our Standards
+
+Examples of behavior that contributes to a positive environment for our
+community include:
+
+* Demonstrating empathy and kindness toward other people
+* Being respectful of differing opinions, viewpoints, and experiences
+* Giving and gracefully accepting constructive feedback
+* Accepting responsibility and apologizing to those affected by our mistakes,
+ and learning from the experience
+* Focusing on what is best not just for us as individuals, but for the
+ overall community
+
+Examples of unacceptable behavior include:
+
+* The use of sexualized language or imagery, and sexual attention or
+ advances of any kind
+* Trolling, insulting or derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or email
+ address, without their explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Enforcement Responsibilities
+
+Community leaders are responsible for clarifying and enforcing our standards of
+acceptable behavior and will take appropriate and fair corrective action in
+response to any behavior that they deem inappropriate, threatening, offensive,
+or harmful.
+
+Community leaders have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, and will communicate reasons for moderation
+decisions when appropriate.
+
+## Scope
+
+This Code of Conduct applies within all community spaces, and also applies when
+an individual is officially representing the community in public spaces.
+Examples of representing our community include using an official e-mail address,
+posting via an official social media account, or acting as an appointed
+representative at an online or offline event.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported to the community leaders responsible for enforcement at
+.
+All complaints will be reviewed and investigated promptly and fairly.
+
+All community leaders are obligated to respect the privacy and security of the
+reporter of any incident.
+
+## Enforcement Guidelines
+
+Community leaders will follow these Community Impact Guidelines in determining
+the consequences for any action they deem in violation of this Code of Conduct:
+
+### 1. Correction
+
+**Community Impact**: Use of inappropriate language or other behavior deemed
+unprofessional or unwelcome in the community.
+
+**Consequence**: A private, written warning from community leaders, providing
+clarity around the nature of the violation and an explanation of why the
+behavior was inappropriate. A public apology may be requested.
+
+### 2. Warning
+
+**Community Impact**: A violation through a single incident or series
+of actions.
+
+**Consequence**: A warning with consequences for continued behavior. No
+interaction with the people involved, including unsolicited interaction with
+those enforcing the Code of Conduct, for a specified period of time. This
+includes avoiding interactions in community spaces as well as external channels
+like social media. Violating these terms may lead to a temporary or
+permanent ban.
+
+### 3. Temporary Ban
+
+**Community Impact**: A serious violation of community standards, including
+sustained inappropriate behavior.
+
+**Consequence**: A temporary ban from any sort of interaction or public
+communication with the community for a specified period of time. No public or
+private interaction with the people involved, including unsolicited interaction
+with those enforcing the Code of Conduct, is allowed during this period.
+Violating these terms may lead to a permanent ban.
+
+### 4. Permanent Ban
+
+**Community Impact**: Demonstrating a pattern of violation of community
+standards, including sustained inappropriate behavior, harassment of an
+individual, or aggression toward or disparagement of classes of individuals.
+
+**Consequence**: A permanent ban from any sort of public interaction within
+the community.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 2.0, available at
+https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
+
+Community Impact Guidelines were inspired by [Mozilla's code of conduct
+enforcement ladder](https://github.com/mozilla/diversity).
+
+[homepage]: https://www.contributor-covenant.org
+
+For answers to common questions about this code of conduct, see the FAQ at
+https://www.contributor-covenant.org/faq. Translations are available at
+https://www.contributor-covenant.org/translations.
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 000000000..1ec41bdb6
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2023 TransformerOptimus
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/README.MD b/README.MD
new file mode 100644
index 000000000..9cd5b61bb
--- /dev/null
+++ b/README.MD
@@ -0,0 +1,51 @@
+
+
+
+Β° [](https://discord.gg/dXbRe5BHJC) Β° [](https://twitter.com/_superAGI) Β° [](https://www.reddit.com/r/Super_AGI)
+
+
+### *Infrastructure for building useful Autonomous Agents*
+
+## π‘Features
+
+### π**Provision, Spawn & Deploy Autonomous AI Agents**
+
+### π οΈ**Extend Agent Capabilities with Tools**
+
+### π**Run Concurrent Agents Seamlessly**
+
+### π**Open Source:**
+SuperAGI is an open-source platform, enabling developers to join a community of contributors constantly working to make it better.
+
+### π₯οΈ**GUI:**
+Access your agents through a user-friendly graphical interface, simplifying agent management and interaction.
+
+### β¨οΈ**Action Console:**
+Interact with agents by providing input, permissions, and more.
+
+### π**Multiple Vector DBs:**
+Connect to multiple Vector DBs to enhance your agent's performance and access additional data sources.
+
+### π€**Multi-Model Agents:**
+Customize your agents by using different models of your choice, tailoring their behavior to specific tasks.
+
+### π―**Agent Trajectory Fine-Tuning:**
+Agents learn and improve their performance over time through feedback loops, allowing for fine-tuning and optimization.
+
+### π**Performance Telemetry:**
+Gain insights into your agent's performance through telemetry data, enabling optimization and improvement.
+
+### π°**Optimized Token Usage:**
+Control token usage to effectively manage costs associated with the platform.
+
+### π§ **Agent Memory Storage:**
+Enable agents to learn and adapt by storing their memory, facilitating continuous improvement.
+
+### π**Looping Detection Heuristics:**
+Receive notifications when agents get stuck in a loop and take proactive measures to resolve the issue.
+
+### π**Concurrent Agents:**
+Run multiple agents simultaneously, maximizing efficiency and achieving parallel processing.
+
+### πΎ**Resource Manager:**
+Read and store files generated by agents, facilitating data management and analysis.
diff --git a/agent/__init__.py b/agent/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/agent/agent_execution.py b/agent/agent_execution.py
new file mode 100644
index 000000000..aeedde567
--- /dev/null
+++ b/agent/agent_execution.py
@@ -0,0 +1,29 @@
+# agent has a master prompt
+# agent executes the master prompt along with long term memory
+# agent can run the task queue as well with long term memory
+class AgentExecution:
+ def __int__(self, agent_prompt, document):
+ self.state = None
+
+
+ async def send_request_to_openai(self, prompt):
+ try:
+ openai.api_key = os.getenv("OPENAI_API_KEY")
+ response = await openai.ChatCompletion.acreate(
+ n=self.number_of_results,
+ model=self.model,
+ messages=prompt,
+ temperature=self.temperature,
+ max_tokens=self.max_tokens,
+ top_p=self.top_p,
+ frequency_penalty=self.frequency_penalty,
+ presence_penalty=self.presence_penalty
+ )
+ return response
+
+ except Exception as exception:
+ return {"error": exception}
+
+
+
+
diff --git a/agent/agent_prompt.py b/agent/agent_prompt.py
new file mode 100644
index 000000000..3d0aa3b23
--- /dev/null
+++ b/agent/agent_prompt.py
@@ -0,0 +1,45 @@
+from typing import List
+
+
+class AgentPrompt:
+ def __int__(self) -> None:
+ self.ai_name: str = ""
+ self.ai_role: str = ""
+ self.base_prompt: str = ""
+ self.goals: List[str] = []
+ self.constraints: List[str] = []
+ self.tools: List[str] = []
+ self.resources: List[str] = []
+ self.evaluations: List[str] = []
+ self.response_format: str = ""
+
+ def construct_full_prompt(self) -> str:
+ # Construct full prompt
+ full_prompt = (
+ f"You are {self.ai_name}, {self.ai_role}\n{self.base_prompt}\n\nGOALS:\n\n"
+ )
+ for i, goal in enumerate(self.goals):
+ full_prompt += f"{i + 1}. {goal}\n"
+
+ for i, goal in enumerate(self.goals):
+ full_prompt += f"{i + 1}. {goal}\n"
+
+ full_prompt += f"\n\n{get_prompt(self.tools)}"
+ return full_prompt
+
+ def build_agent_prompt(self):
+ agent_prompt = AgentPrompt()
+ prompt_start = (
+ "Your decisions must always be made independently "
+ "without seeking user assistance.\n"
+ "Play to your strengths as an LLM and pursue simple "
+ "strategies with no legal complications.\n"
+ "If you have completed all your tasks, make sure to "
+ 'use the "finish" command.'
+ )
+ agent_prompt.set_base_system_prompt(prompt_start)
+ agent_prompt.goals(prompt_start)
+
+
+
+
diff --git a/agent/agent_prompt_builder.py b/agent/agent_prompt_builder.py
new file mode 100644
index 000000000..14d315c8b
--- /dev/null
+++ b/agent/agent_prompt_builder.py
@@ -0,0 +1,99 @@
+from agent.agent_prompt import AgentPrompt
+
+
+class AgentPromptBuilder:
+ def __init__(self, agent):
+ self.agent_prompt = AgentPrompt()
+
+ def set_base_prompt(self, base_prompt):
+ self.agent_prompt.set_base_system_prompt(base_prompt)
+
+ def add_goal(self, goal):
+ self.agent_prompt.tools.append(goal)
+
+ def add_tool(self, tool):
+ self.agent_prompt.goals.append(tool)
+
+ def add_resource(self, resource: str) -> None:
+ self.agent_prompt.resources.append(resource)
+
+ def add_constraint(self, constraint):
+ self.agent_prompt.constraints.append(constraint)
+
+ def add_evaluation(self, evaluation: str) -> None:
+ self.agent_prompt.evaluations.append(evaluation)
+
+ def set_response_format(self, response_format: str) -> None:
+ self.agent_prompt.set_response_format(response_format)
+
+
+ def build_agent_prompt(self):
+ agent_prompt = AgentPrompt()
+ prompt_start = (
+ "Your decisions must always be made independently "
+ "without seeking user assistance.\n"
+ "Play to your strengths as an LLM and pursue simple "
+ "strategies with no legal complications.\n"
+ "If you have completed all your tasks, make sure to "
+ 'use the "finish" command.'
+ )
+ agent_prompt.set_base_system_prompt(prompt_start)
+ agent_prompt.goals(prompt_start)
+
+ @classmethod
+ def get_autogpt_prompt(cls) -> str:
+ # Initialize the PromptGenerator object
+ prompt_builder = AgentPromptBuilder()
+ base_prompt = (
+ "Your decisions must always be made independently "
+ "without seeking user assistance.\n"
+ "Play to your strengths as an LLM and pursue simple "
+ "strategies with no legal complications.\n"
+ "If you have completed all your tasks, make sure to "
+ 'use the "finish" command.'
+ )
+ prompt_builder.set_base_prompt(base_prompt)
+
+ # Add constraints to the PromptGenerator object
+ prompt_builder.add_constraint(
+ "~4000 word limit for short term memory. "
+ "Your short term memory is short, "
+ "so immediately save important information to files."
+ )
+ prompt_builder.add_constraint(
+ "If you are unsure how you previously did something "
+ "or want to recall past events, "
+ "thinking about similar events will help you remember."
+ )
+ prompt_builder.add_constraint("No user assistance")
+ prompt_builder.add_constraint(
+ 'Exclusively use the commands listed in double quotes e.g. "command name"'
+ )
+
+ # Add commands to the PromptGenerator object
+ for tool in tools:
+ prompt_generator.add_tool(tool)
+
+ resources = ["Internet access for searches and information gathering.",
+ "Long Term memory management.",
+ "GPT-3.5 powered Agents for delegation of simple tasks.",
+ "File output."]
+ for resource in resources:
+ prompt_builder.add_resource(resource)
+
+ # Add performance evaluations to the PromptGenerator object
+ evaluations = [
+ "Continuously review and analyze your actions "
+ "to ensure you are performing to the best of your abilities.",
+ "Constructively self-criticize your big-picture behavior constantly.",
+ "Reflect on past decisions and strategies to refine your approach.",
+ "Every command has a cost, so be smart and efficient. "
+ "Aim to complete tasks in the least number of steps.",
+ ]
+ for evaluation in evaluations:
+ prompt_builder.add_evaluation(evaluation)
+
+ # Generate the prompt string
+ prompt_string = prompt_generator.generate_prompt_string()
+
+ return prompt_string
\ No newline at end of file
diff --git a/agent/super_agi.py b/agent/super_agi.py
new file mode 100644
index 000000000..b480f419a
--- /dev/null
+++ b/agent/super_agi.py
@@ -0,0 +1,16 @@
+# agent has a master prompt
+# agent executes the master prompt along with long term memory
+# agent can run the task queue as well with long term memory
+class SuperAgi:
+ def __int__(self):
+ self.state = None
+
+ def execute_step(self):
+ pass
+
+ def call_llm(self):
+ pass
+
+ def move_to_next_step(self):
+ pass
+
diff --git a/llms/__init__.py b/llms/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/llms/openai.py b/llms/openai.py
new file mode 100644
index 000000000..b8be4e528
--- /dev/null
+++ b/llms/openai.py
@@ -0,0 +1,34 @@
+import os
+
+import openai
+
+class OpenAi:
+ def __init__(self, model="gpt-4", temperature=0.3, max_tokens=3600, top_p=1, frequency_penalty=0,
+ presence_penalty=0, number_of_results=1):
+ self.model = model
+ self.temperature = temperature
+ self.max_tokens = max_tokens
+ self.top_p = top_p
+ self.frequency_penalty = frequency_penalty
+ self.presence_penalty = presence_penalty
+ self.number_of_results = number_of_results
+
+
+ async def chat_completion(self, prompt):
+ try:
+ openai.api_key = os.getenv("OPENAI_API_KEY")
+ response = await openai.ChatCompletion.acreate(
+ n=self.number_of_results,
+ model=self.model,
+ messages=prompt,
+ temperature=self.temperature,
+ max_tokens=self.max_tokens,
+ top_p=self.top_p,
+ frequency_penalty=self.frequency_penalty,
+ presence_penalty=self.presence_penalty
+ )
+
+ return response
+
+ except Exception as exception:
+ return {"error": exception}
diff --git a/main.py b/main.py
new file mode 100644
index 000000000..6d7c6d969
--- /dev/null
+++ b/main.py
@@ -0,0 +1,13 @@
+from fastapi import FastAPI
+
+app = FastAPI()
+
+
+@app.get("/")
+async def root():
+ return {"message": "Hello World"}
+
+
+@app.get("/hello/{name}")
+async def say_hello(name: str):
+ return {"message": f"Hello {name}"}
diff --git a/memory/__init__.py b/memory/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/memory/embedding/__init__.py b/memory/embedding/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/memory/embedding/openai.py b/memory/embedding/openai.py
new file mode 100644
index 000000000..3470016a8
--- /dev/null
+++ b/memory/embedding/openai.py
@@ -0,0 +1,19 @@
+import os
+
+import openai
+
+
+class OpenAiEmbedding:
+ def __init__(self, model="text-embedding-ada-002"):
+ self.model = model
+
+ async def get_embedding(self, text):
+ try:
+ openai.api_key = os.getenv("OPENAI_API_KEY")
+ response = await openai.Embedding.create(
+ input=[text],
+ engine=self.model
+ )
+ return response['data'][0]['embedding']
+ except Exception as exception:
+ return {"error": exception}
\ No newline at end of file
diff --git a/memory/pinecone.py b/memory/pinecone.py
new file mode 100644
index 000000000..6b76c2026
--- /dev/null
+++ b/memory/pinecone.py
@@ -0,0 +1,21 @@
+# build a pinecone wrapper
+from memory.embedding.openai import OpenAiEmbedding
+import pinecone
+
+
+class Pinecone:
+ def __init__(self, search_index_name: str = "long_term_memory"):
+ self.embed_model = OpenAiEmbedding(model="text-embedding-ada-002")
+ self.search_index_name = "long_term_memory"
+
+ async def get_match(self, query):
+ namespace = "long_term_memory"
+
+ embed_text = await self.embed_model.get_embedding(query)
+
+ index = pinecone.Index(self.search_index_name)
+ # get relevant contexts (including the questions)
+ res = index.query(embed_text, top_k=5, namespace=namespace, include_metadata=True)
+
+ contexts = [item['metadata']['text'] for item in res['matches']]
+ return contexts
diff --git a/models/__init__.py b/models/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/task_queue/__init__.py b/task_queue/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/test_main.http b/test_main.http
new file mode 100644
index 000000000..a2d81a92c
--- /dev/null
+++ b/test_main.http
@@ -0,0 +1,11 @@
+# Test your FastAPI endpoints
+
+GET http://127.0.0.1:8000/
+Accept: application/json
+
+###
+
+GET http://127.0.0.1:8000/hello/User
+Accept: application/json
+
+###
diff --git a/tools/__init__.py b/tools/__init__.py
new file mode 100644
index 000000000..e69de29bb