forked from SINTEF-9012/madt-neodash
-
Notifications
You must be signed in to change notification settings - Fork 0
/
docker-compose.yml
60 lines (57 loc) · 2.24 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
services:
# Neo4j enterprise container
graph:
image: neo4j:enterprise
ports:
- "7474:7474"
- "7687:7687"
container_name: graph
environment:
#apoc-extended is not supported by 5.23 but can be useful for other versions
NEO4J_PLUGINS: '["graph-data-science","apoc", "apoc-extended"]'
# If you want to add an authentication you should add it like <username>/<password>, be sure to change the database's connection code accordingly
# You need to start the container for the first time to change username and password
NEO4J_AUTH: "none"
NEO4J_ACCEPT_LICENSE_AGREEMENT: "yes"
NEO4J_apoc_export_file_enabled: "true"
NEO4J_apoc_import_file_enabled: "true"
NEO4J_dbms_security_procedures_allowlist: gds.*,apoc.*
NEO4J_dbms_security_procedures.unrestricted: apoc.*, gds.*
NEO4J_apoc_import_file_use__neo4j__config: "false"
volumes:
# This will copy your cypher script on the neo4j container
- ./data/movies.cypher:/var/lib/neo4j/files/movies.cypher
healthcheck:
# This healthcheck will try the connection and then it will load the movies database using the cypher script
test: wget http://localhost:7474 && bin/cypher-shell --fail-fast -f files/movies.cypher || exit 1
tty: true
# Ollama container
llm:
image: ollama/ollama
# Container name is used to access ollama services from the notebook
container_name: llm
ports:
- 11434:11434
# Let ollama run on GPU, if you do not have one we suggest to comment/remove this part.
# The documentation to install the correct drivers can be found at https://hub.docker.com/r/ollama/ollama.
healthcheck:
test: ollama pull llama3.1:8b-instruct-q2_K
timeout: 3m
retries: 3
extra_hosts:
- "host.docker.internal:host-gateway"
# This part will let ollama use the GPU instead of the CPU
# You need to load the driver first to run it, be sure they are installed
deploy:
resources:
reservations:
devices:
- driver: nvidia
capabilities: [gpu]
tty: true
volumes: # Ollama stores the installed model
- ollama:/root/.ollama
restart: unless-stopped
profiles: [local]
volumes:
ollama: