-
Notifications
You must be signed in to change notification settings - Fork 174
Expand file tree
/
Copy pathdocker-compose.yml
More file actions
148 lines (142 loc) · 3.89 KB
/
docker-compose.yml
File metadata and controls
148 lines (142 loc) · 3.89 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
services:
# --- LLM Inference (CUDA + grammar support) ---
llama-server:
build:
context: ./inference
dockerfile: Dockerfile.v31
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
volumes:
- ${ATLAS_MODELS_DIR:-./models}:/models:ro
command: >
/llama-server
--model /models/${ATLAS_MODEL_FILE:-Qwen3.5-9B-Q6_K.gguf}
--host 0.0.0.0 --port 8080
--ctx-size ${ATLAS_CTX_SIZE:-32768}
--n-gpu-layers 99
--no-mmap
ports:
- "${ATLAS_LLAMA_PORT:-8080}:8080"
ulimits:
memlock: -1
stack: 67108864
healthcheck:
test: ["CMD", "curl", "-sf", "http://localhost:8080/health"]
interval: 30s
timeout: 10s
retries: 10
start_period: 120s
networks:
- atlas
# --- Geometric Lens (C(x)/G(x) scoring) ---
geometric-lens:
build:
context: ./geometric-lens
environment:
- LLAMA_URL=http://llama-server:8080
- LLAMA_EMBED_URL=http://llama-server:8080
- GEOMETRIC_LENS_ENABLED=true
- PROJECT_DATA_DIR=/data/projects
volumes:
- lens-data:/data/projects
- ${ATLAS_LENS_MODELS:-./geometric-lens/geometric_lens/models}:/app/geometric_lens/models:ro
ports:
- "${ATLAS_LENS_PORT:-8099}:8099"
depends_on:
llama-server:
condition: service_healthy
healthcheck:
test: ["CMD", "curl", "-sf", "http://localhost:8099/health"]
interval: 15s
timeout: 5s
retries: 5
networks:
- atlas
# --- V3 Pipeline Service ---
v3-service:
build:
context: .
dockerfile: v3-service/Dockerfile
environment:
- ATLAS_INFERENCE_URL=http://llama-server:8080
- ATLAS_LENS_URL=http://geometric-lens:8099
- ATLAS_SANDBOX_URL=http://sandbox:8020
- ATLAS_MODEL_NAME=${ATLAS_MODEL_NAME:-Qwen3.5-9B-Q6_K}
ports:
- "${ATLAS_V3_PORT:-8070}:8070"
depends_on:
llama-server:
condition: service_healthy
geometric-lens:
condition: service_healthy
healthcheck:
test: ["CMD", "curl", "-sf", "http://localhost:8070/health"]
interval: 15s
timeout: 5s
retries: 5
networks:
- atlas
# --- Sandbox (isolated code execution) ---
sandbox:
build:
context: ./sandbox
tmpfs:
- /tmp/sandbox:size=512M
read_only: true
security_opt:
- no-new-privileges:true
ports:
- "${ATLAS_SANDBOX_PORT:-30820}:8020"
healthcheck:
test: ["CMD", "curl", "-sf", "http://localhost:8020/health"]
interval: 15s
timeout: 5s
retries: 3
networks:
- atlas
# --- ATLAS Proxy v2 (agent loop + grammar + Aider format) ---
# The proxy needs access to your project files for read_file, write_file, etc.
# Set ATLAS_PROJECT_DIR to your project path, or it defaults to the current directory.
atlas-proxy:
build:
context: ./atlas-proxy
environment:
- ATLAS_PROXY_PORT=8090
- ATLAS_INFERENCE_URL=http://llama-server:8080
- ATLAS_LLAMA_URL=http://llama-server:8080
- ATLAS_LENS_URL=http://geometric-lens:8099
- ATLAS_SANDBOX_URL=http://sandbox:8020
- ATLAS_V3_URL=http://v3-service:8070
- ATLAS_AGENT_LOOP=1
- ATLAS_MODEL_NAME=${ATLAS_MODEL_NAME:-Qwen3.5-9B-Q6_K}
volumes:
- ${ATLAS_PROJECT_DIR:-.}:/workspace
working_dir: /workspace
ports:
- "${ATLAS_PROXY_PORT:-8090}:8090"
depends_on:
llama-server:
condition: service_healthy
geometric-lens:
condition: service_healthy
v3-service:
condition: service_healthy
sandbox:
condition: service_healthy
healthcheck:
test: ["CMD", "curl", "-sf", "http://localhost:8090/health"]
interval: 10s
timeout: 5s
retries: 5
networks:
- atlas
volumes:
lens-data:
networks:
atlas:
driver: bridge