Skip to content

Commit 83733af

Browse files
committed
add compose_openeuler.yaml
Signed-off-by: zhihang <[email protected]>
1 parent e647adf commit 83733af

File tree

1 file changed

+183
-0
lines changed

1 file changed

+183
-0
lines changed
Lines changed: 183 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,183 @@
1+
# Copyright (C) 2025 Huawei Technologies Co., Ltd.
2+
# SPDX-License-Identifier: Apache-2.0
3+
4+
services:
5+
redis-vector-db:
6+
image: redis/redis-stack:7.2.0-v9
7+
container_name: redis-vector-db
8+
ports:
9+
- "6379:6379"
10+
- "8001:8001"
11+
healthcheck:
12+
test: ["CMD", "redis-cli", "ping"]
13+
interval: 5s
14+
timeout: 3s
15+
retries: 10
16+
dataprep-redis-service:
17+
image: ${REGISTRY:-opea}/dataprep:${TAG:-latest}-openeuler
18+
container_name: dataprep-redis-server
19+
depends_on:
20+
redis-vector-db:
21+
condition: service_healthy
22+
tei-embedding-service:
23+
condition: service_started
24+
ports:
25+
- "6007:5000"
26+
environment:
27+
no_proxy: ${no_proxy}
28+
http_proxy: ${http_proxy}
29+
https_proxy: ${https_proxy}
30+
REDIS_URL: redis://redis-vector-db:6379
31+
REDIS_HOST: redis-vector-db
32+
INDEX_NAME: ${INDEX_NAME}
33+
TEI_ENDPOINT: http://tei-embedding-service:80
34+
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
35+
healthcheck:
36+
test: ["CMD-SHELL", "curl -f http://localhost:5000/v1/health_check || exit 1"]
37+
interval: 10s
38+
timeout: 5s
39+
retries: 50
40+
restart: unless-stopped
41+
tei-embedding-service:
42+
image: openeuler/text-embeddings-inference-cpu:1.5.0-oe2403lts
43+
container_name: tei-embedding-server
44+
ports:
45+
- "6006:80"
46+
volumes:
47+
- "${MODEL_CACHE:-./data}:/data"
48+
shm_size: 1g
49+
environment:
50+
no_proxy: ${no_proxy}
51+
http_proxy: ${http_proxy}
52+
https_proxy: ${https_proxy}
53+
command: --model-id ${EMBEDDING_MODEL_ID} --auto-truncate
54+
retriever:
55+
image: ${REGISTRY:-opea}/retriever:${TAG:-latest}-openeuler
56+
container_name: retriever-redis-server
57+
depends_on:
58+
- redis-vector-db
59+
ports:
60+
- "7000:7000"
61+
ipc: host
62+
environment:
63+
no_proxy: ${no_proxy}
64+
http_proxy: ${http_proxy}
65+
https_proxy: ${https_proxy}
66+
REDIS_URL: redis://redis-vector-db:6379
67+
REDIS_HOST: redis-vector-db
68+
INDEX_NAME: ${INDEX_NAME}
69+
TEI_EMBEDDING_ENDPOINT: http://tei-embedding-service:80
70+
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
71+
LOGFLAG: ${LOGFLAG}
72+
RETRIEVER_COMPONENT_NAME: "OPEA_RETRIEVER_REDIS"
73+
restart: unless-stopped
74+
tei-reranking-service:
75+
image: openeuler/text-embeddings-inference-cpu:1.5.0-oe2403lts
76+
container_name: tei-reranking-server
77+
ports:
78+
- "8808:80"
79+
volumes:
80+
- "${MODEL_CACHE:-./data}:/data"
81+
shm_size: 1g
82+
environment:
83+
no_proxy: ${no_proxy}
84+
http_proxy: ${http_proxy}
85+
https_proxy: ${https_proxy}
86+
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
87+
HF_HUB_DISABLE_PROGRESS_BARS: 1
88+
HF_HUB_ENABLE_HF_TRANSFER: 0
89+
command: --model-id ${RERANK_MODEL_ID} --auto-truncate
90+
vllm-service:
91+
image: openeuler/vllm-cpu:0.8.5-oe2403lts
92+
container_name: vllm-service
93+
ports:
94+
- "9009:80"
95+
volumes:
96+
- "${MODEL_CACHE:-./data}:/root/.cache/huggingface/hub"
97+
shm_size: 128g
98+
environment:
99+
no_proxy: ${no_proxy}
100+
http_proxy: ${http_proxy}
101+
https_proxy: ${https_proxy}
102+
HF_TOKEN: ${HF_TOKEN}
103+
LLM_MODEL_ID: ${LLM_MODEL_ID}
104+
VLLM_TORCH_PROFILER_DIR: "/mnt"
105+
VLLM_CPU_KVCACHE_SPACE: 50
106+
healthcheck:
107+
test: ["CMD-SHELL", "curl -f http://$host_ip:9009/health || exit 1"]
108+
interval: 10s
109+
timeout: 10s
110+
retries: 100
111+
command: --model $LLM_MODEL_ID --host 0.0.0.0 --port 80
112+
chatqna-xeon-backend-server:
113+
image: ${REGISTRY:-opea}/chatqna:${TAG:-latest}-openeuler
114+
container_name: chatqna-xeon-backend-server
115+
depends_on:
116+
redis-vector-db:
117+
condition: service_started
118+
dataprep-redis-service:
119+
condition: service_healthy
120+
tei-embedding-service:
121+
condition: service_started
122+
retriever:
123+
condition: service_started
124+
tei-reranking-service:
125+
condition: service_started
126+
vllm-service:
127+
condition: service_healthy
128+
ports:
129+
- "8888:8888"
130+
environment:
131+
- no_proxy=${no_proxy}
132+
- https_proxy=${https_proxy}
133+
- http_proxy=${http_proxy}
134+
- MEGA_SERVICE_HOST_IP=chatqna-xeon-backend-server
135+
- EMBEDDING_SERVER_HOST_IP=tei-embedding-service
136+
- EMBEDDING_SERVER_PORT=${EMBEDDING_SERVER_PORT:-80}
137+
- RETRIEVER_SERVICE_HOST_IP=retriever
138+
- RERANK_SERVER_HOST_IP=tei-reranking-service
139+
- RERANK_SERVER_PORT=${RERANK_SERVER_PORT:-80}
140+
- LLM_SERVER_HOST_IP=vllm-service
141+
- LLM_SERVER_PORT=80
142+
- LLM_MODEL=${LLM_MODEL_ID}
143+
- LOGFLAG=${LOGFLAG}
144+
ipc: host
145+
restart: always
146+
chatqna-xeon-ui-server:
147+
image: ${REGISTRY:-opea}/chatqna-ui:${TAG:-latest}-openeuler
148+
container_name: chatqna-xeon-ui-server
149+
depends_on:
150+
- chatqna-xeon-backend-server
151+
ports:
152+
- "5173:5173"
153+
environment:
154+
- no_proxy=${no_proxy}
155+
- https_proxy=${https_proxy}
156+
- http_proxy=${http_proxy}
157+
ipc: host
158+
restart: always
159+
chatqna-xeon-nginx-server:
160+
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}-openeuler
161+
container_name: chatqna-xeon-nginx-server
162+
depends_on:
163+
- chatqna-xeon-backend-server
164+
- chatqna-xeon-ui-server
165+
ports:
166+
- "${NGINX_PORT:-80}:80"
167+
environment:
168+
- no_proxy=${no_proxy}
169+
- https_proxy=${https_proxy}
170+
- http_proxy=${http_proxy}
171+
- FRONTEND_SERVICE_IP=chatqna-xeon-ui-server
172+
- FRONTEND_SERVICE_PORT=5173
173+
- BACKEND_SERVICE_NAME=chatqna
174+
- BACKEND_SERVICE_IP=chatqna-xeon-backend-server
175+
- BACKEND_SERVICE_PORT=8888
176+
- DATAPREP_SERVICE_IP=dataprep-redis-service
177+
- DATAPREP_SERVICE_PORT=5000
178+
ipc: host
179+
restart: always
180+
181+
networks:
182+
default:
183+
driver: bridge

0 commit comments

Comments
 (0)