Skip to content

Commit 204fa7e

Browse files
committed
add support for openEuler
Signed-off-by: zhihang <[email protected]>
1 parent aa96337 commit 204fa7e

File tree

6 files changed

+446
-0
lines changed

6 files changed

+446
-0
lines changed

ChatQnA/Dockerfile.openEuler

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
# Copyright (C) 2025 Huawei Technologies Co., Ltd.
2+
# SPDX-License-Identifier: Apache-2.0
3+
4+
ARG IMAGE_REPO=opea
5+
ARG BASE_TAG=latest-openeuler
6+
FROM $IMAGE_REPO/comps-base:$BASE_TAG
7+
8+
COPY ./chatqna.py $HOME/chatqna.py
9+
COPY ./entrypoint.sh $HOME/entrypoint.sh
10+
11+
ENTRYPOINT ["bash", "entrypoint.sh"]
Lines changed: 184 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,184 @@
1+
# Copyright (C) 2025 Huawei Technologies Co., Ltd.
2+
# SPDX-License-Identifier: Apache-2.0
3+
4+
services:
5+
redis-vector-db:
6+
image: redis/redis-stack:7.2.0-v9
7+
container_name: redis-vector-db
8+
ports:
9+
- "6379:6379"
10+
- "8001:8001"
11+
healthcheck:
12+
test: ["CMD", "redis-cli", "ping"]
13+
interval: 5s
14+
timeout: 3s
15+
retries: 10
16+
dataprep-redis-service:
17+
image: ${REGISTRY:-opea}/dataprep:${TAG:-latest}
18+
container_name: dataprep-redis-server
19+
depends_on:
20+
redis-vector-db:
21+
condition: service_healthy
22+
tei-embedding-service:
23+
condition: service_started
24+
ports:
25+
- "6007:5000"
26+
environment:
27+
no_proxy: ${no_proxy}
28+
http_proxy: ${http_proxy}
29+
https_proxy: ${https_proxy}
30+
REDIS_URL: redis://redis-vector-db:6379
31+
REDIS_HOST: redis-vector-db
32+
INDEX_NAME: ${INDEX_NAME}
33+
TEI_ENDPOINT: http://tei-embedding-service:80
34+
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
35+
healthcheck:
36+
test: ["CMD-SHELL", "curl -f http://localhost:5000/v1/health_check || exit 1"]
37+
interval: 10s
38+
timeout: 5s
39+
retries: 50
40+
restart: unless-stopped
41+
tei-embedding-service:
42+
image: openeuler/text-embeddings-inference-cpu:1.5.0-oe2403lts
43+
container_name: tei-embedding-server
44+
ports:
45+
- "6006:80"
46+
volumes:
47+
- "${MODEL_CACHE:-./data}:/data"
48+
shm_size: 1g
49+
environment:
50+
no_proxy: ${no_proxy}
51+
http_proxy: ${http_proxy}
52+
https_proxy: ${https_proxy}
53+
command: --model-id ${EMBEDDING_MODEL_ID} --auto-truncate
54+
retriever:
55+
image: ${REGISTRY:-opea}/retriever:${TAG:-latest}
56+
container_name: retriever-redis-server
57+
depends_on:
58+
- redis-vector-db
59+
ports:
60+
- "7000:7000"
61+
ipc: host
62+
environment:
63+
no_proxy: ${no_proxy}
64+
http_proxy: ${http_proxy}
65+
https_proxy: ${https_proxy}
66+
REDIS_URL: redis://redis-vector-db:6379
67+
REDIS_HOST: redis-vector-db
68+
INDEX_NAME: ${INDEX_NAME}
69+
TEI_EMBEDDING_ENDPOINT: http://tei-embedding-service:80
70+
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
71+
LOGFLAG: ${LOGFLAG}
72+
RETRIEVER_COMPONENT_NAME: "OPEA_RETRIEVER_REDIS"
73+
restart: unless-stopped
74+
tei-reranking-service:
75+
image: openeuler/text-embeddings-inference-cpu:1.5.0-oe2403lts
76+
container_name: tei-reranking-server
77+
ports:
78+
- "8808:80"
79+
volumes:
80+
- "${MODEL_CACHE:-./data}:/data"
81+
shm_size: 1g
82+
environment:
83+
no_proxy: ${no_proxy}
84+
http_proxy: ${http_proxy}
85+
https_proxy: ${https_proxy}
86+
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
87+
HF_HUB_DISABLE_PROGRESS_BARS: 1
88+
HF_HUB_ENABLE_HF_TRANSFER: 0
89+
command: --model-id ${RERANK_MODEL_ID} --auto-truncate
90+
vllm-service:
91+
image: openeuler/vllm-cpu:latest
92+
container_name: vllm-service
93+
ports:
94+
- "9009:80"
95+
volumes:
96+
- "${MODEL_CACHE:-./data}:/root/.cache/huggingface/hub"
97+
shm_size: 128g
98+
privileged: true
99+
environment:
100+
no_proxy: ${no_proxy}
101+
http_proxy: ${http_proxy}
102+
https_proxy: ${https_proxy}
103+
HF_TOKEN: ${HF_TOKEN}
104+
LLM_MODEL_ID: ${LLM_MODEL_ID}
105+
VLLM_TORCH_PROFILER_DIR: "/mnt"
106+
VLLM_CPU_KVCACHE_SPACE: 30
107+
healthcheck:
108+
test: ["CMD-SHELL", "curl -f http://$host_ip:9009/health || exit 1"]
109+
interval: 10s
110+
timeout: 10s
111+
retries: 100
112+
command: --model $LLM_MODEL_ID --host 0.0.0.0 --port 80
113+
chatqna-xeon-backend-server:
114+
image: ${REGISTRY:-opea}/chatqna:${TAG:-latest}
115+
container_name: chatqna-xeon-backend-server
116+
depends_on:
117+
redis-vector-db:
118+
condition: service_started
119+
dataprep-redis-service:
120+
condition: service_healthy
121+
tei-embedding-service:
122+
condition: service_started
123+
retriever:
124+
condition: service_started
125+
tei-reranking-service:
126+
condition: service_started
127+
vllm-service:
128+
condition: service_healthy
129+
ports:
130+
- "8888:8888"
131+
environment:
132+
- no_proxy=${no_proxy}
133+
- https_proxy=${https_proxy}
134+
- http_proxy=${http_proxy}
135+
- MEGA_SERVICE_HOST_IP=chatqna-xeon-backend-server
136+
- EMBEDDING_SERVER_HOST_IP=tei-embedding-service
137+
- EMBEDDING_SERVER_PORT=${EMBEDDING_SERVER_PORT:-80}
138+
- RETRIEVER_SERVICE_HOST_IP=retriever
139+
- RERANK_SERVER_HOST_IP=tei-reranking-service
140+
- RERANK_SERVER_PORT=${RERANK_SERVER_PORT:-80}
141+
- LLM_SERVER_HOST_IP=vllm-service
142+
- LLM_SERVER_PORT=80
143+
- LLM_MODEL=${LLM_MODEL_ID}
144+
- LOGFLAG=${LOGFLAG}
145+
ipc: host
146+
restart: always
147+
chatqna-xeon-ui-server:
148+
image: ${REGISTRY:-opea}/chatqna-ui:${TAG:-latest}
149+
container_name: chatqna-xeon-ui-server
150+
depends_on:
151+
- chatqna-xeon-backend-server
152+
ports:
153+
- "5173:5173"
154+
environment:
155+
- no_proxy=${no_proxy}
156+
- https_proxy=${https_proxy}
157+
- http_proxy=${http_proxy}
158+
ipc: host
159+
restart: always
160+
chatqna-xeon-nginx-server:
161+
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
162+
container_name: chatqna-xeon-nginx-server
163+
depends_on:
164+
- chatqna-xeon-backend-server
165+
- chatqna-xeon-ui-server
166+
ports:
167+
- "${NGINX_PORT:-80}:80"
168+
environment:
169+
- no_proxy=${no_proxy}
170+
- https_proxy=${https_proxy}
171+
- http_proxy=${http_proxy}
172+
- FRONTEND_SERVICE_IP=chatqna-xeon-ui-server
173+
- FRONTEND_SERVICE_PORT=5173
174+
- BACKEND_SERVICE_NAME=chatqna
175+
- BACKEND_SERVICE_IP=chatqna-xeon-backend-server
176+
- BACKEND_SERVICE_PORT=8888
177+
- DATAPREP_SERVICE_IP=dataprep-redis-service
178+
- DATAPREP_SERVICE_PORT=5000
179+
ipc: host
180+
restart: always
181+
182+
networks:
183+
default:
184+
driver: bridge

ChatQnA/docker_image_build/build.yaml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -138,3 +138,9 @@ services:
138138
dockerfile: comps/third_parties/nginx/src/Dockerfile
139139
extends: chatqna
140140
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
141+
nginx-openeuler:
142+
build:
143+
context: GenAIComps
144+
dockerfile: comps/third_parties/nginx/src/Dockerfile.openEuler
145+
extends: chatqna
146+
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}-openeuler

0 commit comments

Comments
 (0)