-
Notifications
You must be signed in to change notification settings - Fork 26
Expand file tree
/
Copy pathcompose.yaml
More file actions
279 lines (265 loc) · 8.74 KB
/
compose.yaml
File metadata and controls
279 lines (265 loc) · 8.74 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
networks:
dtm-network:
name: dtm-network
volumes:
db-data:
name: drone-tm-db-data
frontend-html:
name: drone-tm-frontend-html
rustfs-data:
name: drone-tm-rustfs-data
services:
backend:
image: ghcr.io/hotosm/drone-tm/backend:debug
build:
context: src
dockerfile: backend/Dockerfile
args:
- MONITORING=${MONITORING}
# Override to include --reload hot-reloading
command:
[
"uvicorn",
"app.main:api",
"--host",
"0.0.0.0",
"--port",
"8000",
"--log-level",
"info",
"--reload",
]
depends_on:
db:
condition: service_healthy
nodeodm:
condition: service_started
migrations:
condition: service_completed_successfully
ports:
- ${BACKEND_WEB_APP_PORT:-8000}:8000
# For RustFS S3 (API :9000, console :9090)
- 9000:9000
- 9090:9090
volumes:
- ./src/backend/pyproject.toml:/project/src/backend/pyproject.toml:ro
- ./src/backend/app:/project/src/backend/app:ro
- ./src/backend/tests:/project/src/backend/tests:ro
- ./src/backend/packages/drone-flightplan/drone_flightplan:/opt/python/lib/python3.11/site-packages/drone_flightplan:ro
- frontend-html:/project/src/backend/frontend_html
env_file: .env
environment:
# In dev the browser can't resolve docker DNS names like `s3`,
# so presigned URLs must use a host-accessible base.
S3_ENDPOINT_UPLOAD: ${S3_ENDPOINT_UPLOAD:-http://localhost:9000}
# In dev, use the host-exposed RustFS port for presigned URLs.
# (Path-rewriting proxies like `/s3` break S3 presigned signatures.)
S3_ENDPOINT_DOWNLOAD: ${S3_ENDPOINT_DOWNLOAD:-http://localhost:9000}
# Host-networked ScaleODM needs host-reachable URLs for S3 and webhooks.
SCALEODM_S3_ENDPOINT: ${SCALEODM_S3_ENDPOINT:-http://localhost:9000}
BACKEND_URL_INTERNAL: ${BACKEND_URL_INTERNAL:-http://localhost:${BACKEND_WEB_APP_PORT:-8000}}
# Dev uses network_mode:service:backend (shared namespace), so localhost
DRAGONFLY_DSN: redis://localhost:6379/0
networks:
- dtm-network
extra_hosts:
- "host.docker.internal:host-gateway"
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/__lbheartbeat__"]
start_period: 60s
interval: 10s
timeout: 5s
retries: 10
frontend:
image: ghcr.io/hotosm/drone-tm/frontend:debug
build:
context: src
dockerfile: frontend/Dockerfile
target: development
ports:
- ${FRONTEND_WEB_APP_PORT:-3040}:3040
depends_on:
backend:
condition: service_started
s3-init:
condition: service_completed_successfully
environment:
VITE_API_URL: ${VITE_API_URL:-http://localhost:${BACKEND_WEB_APP_PORT:-8000}/api}
networks:
- dtm-network
volumes:
- ./src/frontend/public:/app/frontend/public
- ./src/frontend/src:/app/frontend/src
- ./src/frontend/components.json:/app/frontend/components.json
- ./src/frontend/index.html:/app/frontend/index.html
- ./src/frontend/postcss.config.js:/app/frontend/postcss.config.js
- ./src/frontend/tailwind.config.js:/app/frontend/tailwind.config.js
- ./src/frontend/vite.config.ts:/app/frontend/vite.config.ts
- ./src/gcp-editor/src:/app/gcp-editor/src
- ./src/gcp-editor/dist:/app/gcp-editor/dist
- frontend-html:/frontend_html
# If error, please upgrade the db with `contrib/pg-upgrade/compose.yaml`
db:
image: postgis/postgis:16-3.4-alpine
volumes:
- db-data:/var/lib/postgresql/data
env_file: .env
environment:
LANG: en-GB.utf8
POSTGRES_INITDB_ARGS: "--locale-provider=icu --icu-locale=en-GB"
ports:
- "5467:5432"
networks:
- dtm-network
restart: unless-stopped
healthcheck:
test: pg_isready -U ${POSTGRES_USER:-dtm} -d ${POSTGRES_DB:-dtm_db}
start_period: 5s
interval: 10s
timeout: 5s
retries: 3
s3:
image: "rustfs/rustfs:1.0.0-alpha.90"
command: server /data --console-address ":9090" --address ":9000"
volumes:
- rustfs-data:/data
environment:
# NOTE we set these to match the dev credentials for ScaleODM S3 service too
# This allows us to test with ScaleODM, without having to re-configure access/secret
RUSTFS_ACCESS_KEY: ${S3_ACCESS_KEY:-admin}
RUSTFS_SECRET_KEY: ${S3_SECRET_KEY:-somelongpassword}
network_mode: service:backend
restart: unless-stopped
healthcheck:
test: ["CMD", "sh", "-c", "curl -f http://localhost:9000/health"]
interval: 5s
retries: 3
start_period: 5s
timeout: 5s
s3-init:
image: "docker.io/python:3.13-slim-bookworm"
entrypoint:
- /bin/sh
- -eu
- -c
- |
python -m pip install --no-cache-dir boto3 >/dev/null
python - <<'PY'
import json
import os
import boto3
import botocore.config
import botocore.exceptions
s3 = boto3.client(
"s3",
endpoint_url=os.environ["AWS_ENDPOINT_URL"],
region_name="us-east-1",
aws_access_key_id=os.environ["AWS_ACCESS_KEY_ID"],
aws_secret_access_key=os.environ["AWS_SECRET_ACCESS_KEY"],
config=botocore.config.Config(s3={"addressing_style": "path"}),
)
bucket = os.environ["S3_BUCKET_NAME"]
try:
s3.create_bucket(Bucket=bucket)
print(f"Created bucket '{bucket}'")
except botocore.exceptions.ClientError as e:
code = e.response.get("Error", {}).get("Code", "")
if code in ("BucketAlreadyOwnedByYou", "BucketAlreadyExists"):
print(f"Bucket '{bucket}' already exists")
else:
raise
policy = json.dumps({
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": ["*"]},
"Action": ["s3:GetObject"],
"Resource": [
f"arn:aws:s3:::{bucket}/publicuploads/*",
f"arn:aws:s3:::{bucket}/tutorials/*",
],
}],
})
s3.put_bucket_policy(Bucket=bucket, Policy=policy)
print("Anonymous download policy set for publicuploads/ and tutorials/.")
PY
environment:
AWS_ENDPOINT_URL: http://localhost:9000
AWS_ACCESS_KEY_ID: ${S3_ACCESS_KEY:-admin}
AWS_SECRET_ACCESS_KEY: ${S3_SECRET_KEY:-somelongpassword}
S3_BUCKET_NAME: ${S3_BUCKET_NAME:-dtm-bucket}
depends_on:
s3:
condition: service_healthy
network_mode: service:backend
restart: "no"
migrations:
image: ghcr.io/hotosm/drone-tm/backend:debug
volumes:
- ./src/backend:/project/src/backend
depends_on:
db:
condition: service_healthy
env_file:
- .env
networks:
- dtm-network
command: ["alembic", "upgrade", "head"]
restart: "no"
arq-worker:
image: ghcr.io/hotosm/drone-tm/backend:debug
command: arq --watch /project/src/backend/app app.arq.tasks.WorkerSettings
depends_on:
backend:
condition: service_healthy
dragonfly:
condition: service_started
nodeodm:
condition: service_started
qgis-packager:
condition: service_healthy
volumes:
- ./src/backend:/project/src/backend
- ./src/qfield-plugin:/project/src/qfield-plugin
env_file: .env
environment:
QGIS_URL: ${QGIS_URL:-http://localhost:8080}
SCALEODM_S3_ENDPOINT: ${SCALEODM_S3_ENDPOINT:-http://localhost:9000}
BACKEND_URL_INTERNAL: ${BACKEND_URL_INTERNAL:-http://localhost:${BACKEND_WEB_APP_PORT:-8000}}
DRAGONFLY_DSN: redis://localhost:6379/0
network_mode: service:backend
restart: unless-stopped
healthcheck:
test: ["CMD", "arq", "app.arq.tasks.WorkerSettings", "--check"]
interval: 30s
timeout: 5s
retries: 2
start_period: 20s
dragonfly:
image: ghcr.io/dragonflydb/dragonfly:v1.36.0
network_mode: service:backend
restart: unless-stopped
qgis-packager:
image: "ghcr.io/hotosm/qfield-project-packager:26.3"
environment:
LOG_LEVEL: DEBUG
network_mode: service:backend
restart: unless-stopped
healthcheck:
test: timeout 5s bash -c ':> /dev/tcp/127.0.0.1/8080' || exit 1
start_period: 10s
interval: 5s
retries: 5
timeout: 5s
# This container does the actual imagery processing (not persistent, scalable)
nodeodm:
image: docker.io/opendronemap/nodeodm:3.5.5
command: ["--port", "9900", "--log_level", "debug"]
env_file: .env
ports:
- 9900:9900
networks:
- dtm-network
restart: unless-stopped
oom_score_adj: 500