-
Notifications
You must be signed in to change notification settings - Fork 223
/
Copy pathdocker-compose.yml
72 lines (67 loc) · 1.63 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
services:
qdrant:
# Used by the RAG workflow
image: qdrant/qdrant:latest
ports:
- "6333:6333"
- "6334:6334"
volumes:
- qdrant_data:/qdrant/storage
redis:
# LlamaDeploy message queue
image: redis:latest
hostname: redis
ports:
- "6379:6379"
healthcheck:
test: redis-cli --raw incr ping
interval: 5s
timeout: 3s
retries: 5
apiserver:
# LlamaDeploy API server, will run the workflows
image: llamaindex/llama-deploy:main
environment:
QDRANT_HOST: qdrant
OPENAI_API_KEY: $OPENAI_API_KEY
ports:
- "4501:4501"
depends_on:
redis:
condition: service_healthy
healthcheck:
test: llamactl status
interval: 5s
timeout: 3s
retries: 5
volumes:
- ./:/opt/app
working_dir: /opt/app
deploy_workflows:
# Init container, it deploys python_fullstack.yaml and exits
image: llamaindex/llama-deploy:main
volumes:
- ./python_fullstack.yaml:/opt/python_fullstack.yaml
working_dir: /opt/
depends_on:
apiserver:
condition: service_healthy
entrypoint: llamactl -s http://apiserver:4501 -t 60 deploy python_fullstack.yaml
frontend:
# UI for this deployment, running at http://localhost:3000
environment:
APISERVER_URL: http://apiserver:4501
DEPLOYMENT_NAME: MyDeployment
build:
context: ./frontend
dockerfile: dockerfile
ports:
- "3000:3000"
- "9000:9000"
volumes:
- ./frontend:/app
depends_on:
deploy_workflows:
condition: service_completed_successfully
volumes:
qdrant_data: