-
Notifications
You must be signed in to change notification settings - Fork 1.9k
/
local-llm-gpu
106 lines (99 loc) · 2.73 KB
/
local-llm-gpu
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
version: '3.8'
services:
backend:
volumes:
- "./:/app"
build: .
ports:
- "8001:8001"
depends_on:
- super__tgwui
- super__redis
- super__postgres
networks:
- super_network
celery:
volumes:
- "./:/app"
build:
context: .
dockerfile: DockerfileCelery
depends_on:
- super__tgwui
- super__redis
- super__postgres
networks:
- super_network
gui:
build: ./gui
ports:
- "3000:3000"
environment:
- NEXT_PUBLIC_API_BASE_URL=http://localhost:8001
networks:
- super_network
volumes:
- ./gui:/app
- /app/node_modules
- /app/.next
super__tgwui:
build:
context: ./tgwui/
target: llama-cublas
dockerfile: DockerfileTGWUI
# args:
# - LCL_SRC_DIR=text-generation-webui # Developers - see Dockerfile app_base
image: atinoda/text-generation-webui:llama-cublas # Specify variant as the :tag
container_name: super__tgwui
environment:
- EXTRA_LAUNCH_ARGS="--no-mmap --verbose --extensions openai --auto-devices --n_ctx 2000 --gpu-memory 22 22 --n-gpu-layers 128 --threads 8"
# - BUILD_EXTENSIONS_LIVE="silero_tts whisper_stt" # Install named extensions during every container launch. THIS WILL SIGNIFICANLTLY SLOW LAUNCH TIME.
ports:
- 7860:7860 # Default web port
- 5000:5000 # Default API port
- 5005:5005 # Default streaming port
- 5001:5001 # Default OpenAI API extension port
volumes:
- ./tgwui/config/loras:/app/loras
- ./tgwui/config/models:/app/models
- ./tgwui/config/presets:/app/presets
- ./tgwui/config/prompts:/app/prompts
- ./tgwui/config/softprompts:/app/softprompts
- ./tgwui/config/training:/app/training
# - ./config/extensions:/app/extensions
logging:
driver: json-file
options:
max-file: "3" # number of files or file count
max-size: '10m'
networks:
- super_network
deploy:
resources:
reservations:
devices:
- driver: nvidia
# count: "all"
device_ids: ['0', '1'] # must comment the above line if this line is uncommented.
capabilities: [gpu]
super__redis:
image: "docker.io/library/redis:latest"
networks:
- super_network
super__postgres:
image: "docker.io/library/postgres:latest"
environment:
- POSTGRES_USER=superagi
- POSTGRES_PASSWORD=password
- POSTGRES_DB=super_agi_main
volumes:
- superagi_postgres_data:/var/lib/postgresql/data/
networks:
- super_network
ports:
- "5432:5432"
networks:
super_network:
driver: bridge
volumes:
superagi_postgres_data: