forked from TransformerOptimus/SuperAGI
-
Notifications
You must be signed in to change notification settings - Fork 0
/
config_template.yaml
125 lines (98 loc) · 4.67 KB
/
config_template.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
#####################------------------SYSTEM KEYS-------------------------########################
PINECONE_API_KEY: YOUR_PINECONE_API_KEY
PINECONE_ENVIRONMENT: YOUR_PINECONE_ENVIRONMENT
OPENAI_API_KEY: YOUR_OPEN_API_KEY
PALM_API_KEY: YOUR_PALM_API_KEY
REPLICATE_API_TOKEN: YOUR_REPLICATE_API_TOKEN
HUGGING_API_TOKEN: YOUR_HUGGING_FACE_API_TOKEN
# For locally hosted LLMs comment out the next line and uncomment the one after
# to configure a local llm point your browser to 127.0.0.1:7860 and click on the model tab in text generation web ui.
OPENAI_API_BASE: https://api.openai.com/v1
#OPENAI_API_BASE: "http://super__tgwui:5001/v1"
# "gpt-3.5-turbo-0301": 4032, "gpt-4-0314": 8092, "gpt-3.5-turbo": 4032, "gpt-4": 8092, "gpt-4-32k": 32768, "gpt-4-32k-0314": 32768, "llama":2048, "mpt-7b-storywriter":45000
MODEL_NAME: "gpt-3.5-turbo-0301"
# "gpt-3.5-turbo", , "gpt-4", "models/chat-bison-001"
RESOURCES_SUMMARY_MODEL_NAME: "gpt-3.5-turbo"
MAX_TOOL_TOKEN_LIMIT: 800
MAX_MODEL_TOKEN_LIMIT: 4032 # set to 2048 for llama
#DATABASE INFO
# redis details
DB_NAME: super_agi_main
DB_HOST: super__postgres
DB_USERNAME: superagi
DB_PASSWORD: password
DB_URL: postgresql://superagi:password@super__postgres:5432/super_agi_main
REDIS_URL: "super__redis:6379"
#STORAGE TYPE ("FILE" or "S3")
STORAGE_TYPE: "FILE"
#TOOLS
TOOLS_DIR: "superagi/tools"
#STORAGE INFO FOR FILES
RESOURCES_INPUT_ROOT_DIR: workspace/input/{agent_id}
RESOURCES_OUTPUT_ROOT_DIR: workspace/output/{agent_id}/{agent_execution_id} # For keeping resources at agent execution level
#RESOURCES_OUTPUT_ROOT_DIR: workspace/output/{agent_id} # For keeping resources at agent level
#S3 RELATED DETAILS ONLY WHEN STORAGE_TYPE IS "S3"
BUCKET_NAME:
INSTAGRAM_TOOL_BUCKET_NAME: #Public read bucket, Images generated by stable diffusion are put in this bucket and the public url of the same is generated.
AWS_ACCESS_KEY_ID:
AWS_SECRET_ACCESS_KEY:
#AUTH
ENV: 'DEV' #DEV,PROD, to use GITHUB OAUTH set to PROD
JWT_SECRET_KEY: 'secret'
expiry_time_hours: 1
#GITHUB OAUTH:
GITHUB_CLIENT_ID:
GITHUB_CLIENT_SECRET:
FRONTEND_URL: "http://localhost:3000"
#ENCRYPTION KEY, Replace this with your own key for production
ENCRYPTION_KEY: abcdefghijklmnopqrstuvwxyz123456
#WEAVIATE
# If you are using docker or web hosted uncomment the next two lines and comment the third one
# WEAVIATE_URL: YOUR_WEAVIATE_URL
# WEAVIATE_API_KEY: YOUR_WEAVIATE_API_KEY
WEAVIATE_USE_EMBEDDED: true
#####################------------------TOOLS KEY-------------------------########################
#If you have google api key and CSE key, use this
GOOGLE_API_KEY: YOUR_GOOGLE_API_KEY
SEARCH_ENGINE_ID: YOUR_SEARCH_ENIGNE_ID
# IF YOU DONT HAVE GOOGLE SEARCH KEY, YOU CAN USE SERPER.DEV KEYS
SERP_API_KEY: YOUR_SERPER_API_KEY
#ENTER YOUR EMAIL CREDENTIALS TO ACCESS EMAIL TOOL
EMAIL_ADDRESS: YOUR_EMAIL_ADDRESS
EMAIL_PASSWORD: YOUR_EMAIL_APP_PASSWORD #get the app password from (https://myaccount.google.com/apppasswords)
EMAIL_SMTP_HOST: smtp.gmail.com #Change the SMTP host if not using Gmail
EMAIL_SMTP_PORT: 587 #Change the SMTP port if not using Gmail
EMAIL_IMAP_SERVER: imap.gmail.com #Change the IMAP Host if not using Gmail
EMAIL_SIGNATURE: Email sent by SuperAGI
EMAIL_DRAFT_MODE_WITH_FOLDER: YOUR_DRAFTS_FOLDER
EMAIL_ATTACHMENT_BASE_PATH: YOUR_DIRECTORY_FOR_EMAIL_ATTACHMENTS
# GITHUB
GITHUB_USERNAME: YOUR_GITHUB_USERNAME
GITHUB_ACCESS_TOKEN: YOUR_GITHUB_ACCESS_TOKEN
#JIRA
JIRA_INSTANCE_URL: YOUR_JIRA_INSTANCE_URL
JIRA_USERNAME: YOUR_JIRA_EMAIL
JIRA_API_TOKEN: YOUR_JIRA_API_TOKEN
#SLACK
SLACK_BOT_TOKEN: YOUR_SLACK_BOT_TOKEN
# For running stable diffusion
STABILITY_API_KEY: YOUR_STABILITY_API_KEY
#Engine IDs that can be used: 'stable-diffusion-v1', 'stable-diffusion-v1-5','stable-diffusion-512-v2-0', 'stable-diffusion-768-v2-0','stable-diffusion-512-v2-1','stable-diffusion-768-v2-1','stable-diffusion-xl-beta-v2-2-2'
ENGINE_ID: "stable-diffusion-xl-beta-v2-2-2"
## To config a vector store for resources manager uncomment config below
## based on the vector store you want to use
## RESOURCE_VECTOR_STORE can be REDIS, PINECONE, CHROMA, QDRANT
#RESOURCE_VECTOR_STORE: YOUR_RESOURCE_VECTOR_STORE
#RESOURCE_VECTOR_STORE_INDEX_NAME: YOUR_RESOURCE_VECTOR_STORE_INDEX_NAME
## To use a custom redis
#REDIS_VECTOR_STORE_URL: YOUR_REDIS_VECTOR_STORE_URL
## To use qdrant for vector store in resources manager
#QDRANT_PORT: YOUR_QDRANT_PORT
#QDRANT_HOST_NAME: YOUR_QDRANT_HOST_NAME
## To use chroma for vector store in resources manager
#CHROMA_HOST_NAME: YOUR_CHROMA_HOST_NAME
#CHROMA_PORT: YOUR_CHROMA_PORT
## To use Qdrant for vector store
#QDRANT_HOST_NAME: YOUR_QDRANT_HOST_NAME
#QDRANT_PORT: YOUR_QDRANT_PORT
#GPU_LAYERS: GPU LAYERS THAT YOU WANT TO OFFLOAD TO THE GPU WHILE USING LOCAL LLMS