Skip to content

Commit 9a6ed30

Browse files
committed
add test_compose_openeuler_on_xeon.sh
Signed-off-by: zhihang <[email protected]>
1 parent 83733af commit 9a6ed30

File tree

3 files changed

+260
-7
lines changed

3 files changed

+260
-7
lines changed

ChatQnA/docker_compose/intel/cpu/xeon/compose_openeuler.yaml

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ services:
1414
timeout: 3s
1515
retries: 10
1616
dataprep-redis-service:
17-
image: ${REGISTRY:-opea}/dataprep:${TAG:-latest}-openeuler
17+
image: ${REGISTRY:-opea}/dataprep:${TAG:-latest}
1818
container_name: dataprep-redis-server
1919
depends_on:
2020
redis-vector-db:
@@ -52,7 +52,7 @@ services:
5252
https_proxy: ${https_proxy}
5353
command: --model-id ${EMBEDDING_MODEL_ID} --auto-truncate
5454
retriever:
55-
image: ${REGISTRY:-opea}/retriever:${TAG:-latest}-openeuler
55+
image: ${REGISTRY:-opea}/retriever:${TAG:-latest}
5656
container_name: retriever-redis-server
5757
depends_on:
5858
- redis-vector-db
@@ -88,29 +88,30 @@ services:
8888
HF_HUB_ENABLE_HF_TRANSFER: 0
8989
command: --model-id ${RERANK_MODEL_ID} --auto-truncate
9090
vllm-service:
91-
image: openeuler/vllm-cpu:0.8.5-oe2403lts
91+
image: openeuler/vllm-cpu:latest
9292
container_name: vllm-service
9393
ports:
9494
- "9009:80"
9595
volumes:
9696
- "${MODEL_CACHE:-./data}:/root/.cache/huggingface/hub"
9797
shm_size: 128g
98+
privileged: true
9899
environment:
99100
no_proxy: ${no_proxy}
100101
http_proxy: ${http_proxy}
101102
https_proxy: ${https_proxy}
102103
HF_TOKEN: ${HF_TOKEN}
103104
LLM_MODEL_ID: ${LLM_MODEL_ID}
104105
VLLM_TORCH_PROFILER_DIR: "/mnt"
105-
VLLM_CPU_KVCACHE_SPACE: 50
106+
VLLM_CPU_KVCACHE_SPACE: 30
106107
healthcheck:
107108
test: ["CMD-SHELL", "curl -f http://$host_ip:9009/health || exit 1"]
108109
interval: 10s
109110
timeout: 10s
110111
retries: 100
111112
command: --model $LLM_MODEL_ID --host 0.0.0.0 --port 80
112113
chatqna-xeon-backend-server:
113-
image: ${REGISTRY:-opea}/chatqna:${TAG:-latest}-openeuler
114+
image: ${REGISTRY:-opea}/chatqna:${TAG:-latest}
114115
container_name: chatqna-xeon-backend-server
115116
depends_on:
116117
redis-vector-db:
@@ -144,7 +145,7 @@ services:
144145
ipc: host
145146
restart: always
146147
chatqna-xeon-ui-server:
147-
image: ${REGISTRY:-opea}/chatqna-ui:${TAG:-latest}-openeuler
148+
image: ${REGISTRY:-opea}/chatqna-ui:${TAG:-latest}
148149
container_name: chatqna-xeon-ui-server
149150
depends_on:
150151
- chatqna-xeon-backend-server
@@ -157,7 +158,7 @@ services:
157158
ipc: host
158159
restart: always
159160
chatqna-xeon-nginx-server:
160-
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}-openeuler
161+
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
161162
container_name: chatqna-xeon-nginx-server
162163
depends_on:
163164
- chatqna-xeon-backend-server

ChatQnA/docker_image_build/build.yaml

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,24 +25,60 @@ services:
2525
dockerfile: ./docker/Dockerfile.react
2626
extends: chatqna
2727
image: ${REGISTRY:-opea}/chatqna-conversation-ui:${TAG:-latest}
28+
chatqna-openeuler:
29+
build:
30+
context: ../
31+
dockerfile: ./Dockerfile.openEuler
32+
extends: chatqna
33+
image: ${REGISTRY:-opea}/chatqna:${TAG:-latest}
34+
chatqna-ui-openeuler:
35+
build:
36+
context: ../ui
37+
dockerfile: ./docker/Dockerfile.openEuler
38+
extends: chatqna-ui
39+
image: ${REGISTRY:-opea}/chatqna-ui:${TAG:-latest}
40+
chatqna-conversation-ui-openeuler:
41+
build:
42+
context: ../ui
43+
dockerfile: ./docker/Dockerfile.react.openEuler
44+
extends: chatqna-conversation-ui
45+
image: ${REGISTRY:-opea}/chatqna-conversation-ui:${TAG:-latest}
2846
embedding:
2947
build:
3048
context: GenAIComps
3149
dockerfile: comps/embeddings/src/Dockerfile
3250
extends: chatqna
3351
image: ${REGISTRY:-opea}/embedding:${TAG:-latest}
52+
embedding-openeuler:
53+
build:
54+
context: GenAIComps
55+
dockerfile: comps/embeddings/src/Dockerfile.openEuler
56+
extends: chatqna
57+
image: ${REGISTRY:-opea}/embedding:${TAG:-latest}
3458
retriever:
3559
build:
3660
context: GenAIComps
3761
dockerfile: comps/retrievers/src/Dockerfile
3862
extends: chatqna
3963
image: ${REGISTRY:-opea}/retriever:${TAG:-latest}
64+
retriever-openeuler:
65+
build:
66+
context: GenAIComps
67+
dockerfile: comps/retrievers/src/Dockerfile.openEuler
68+
extends: chatqna
69+
image: ${REGISTRY:-opea}/retriever:${TAG:-latest}
4070
reranking:
4171
build:
4272
context: GenAIComps
4373
dockerfile: comps/rerankings/src/Dockerfile
4474
extends: chatqna
4575
image: ${REGISTRY:-opea}/reranking:${TAG:-latest}
76+
reranking-openeuler:
77+
build:
78+
context: GenAIComps
79+
dockerfile: comps/rerankings/src/Dockerfile.openEuler
80+
extends: chatqna
81+
image: ${REGISTRY:-opea}/reranking:${TAG:-latest}
4682
llm-textgen:
4783
build:
4884
context: GenAIComps
@@ -61,12 +97,24 @@ services:
6197
dockerfile: comps/dataprep/src/Dockerfile
6298
extends: chatqna
6399
image: ${REGISTRY:-opea}/dataprep:${TAG:-latest}
100+
dataprep-openeuler:
101+
build:
102+
context: GenAIComps
103+
dockerfile: comps/dataprep/src/Dockerfile.openEuler
104+
extends: chatqna
105+
image: ${REGISTRY:-opea}/dataprep:${TAG:-latest}
64106
guardrails:
65107
build:
66108
context: GenAIComps
67109
dockerfile: comps/guardrails/src/guardrails/Dockerfile
68110
extends: chatqna
69111
image: ${REGISTRY:-opea}/guardrails:${TAG:-latest}
112+
guardrails-openeuler:
113+
build:
114+
context: GenAIComps
115+
dockerfile: comps/guardrails/src/guardrails/Dockerfile.openEuler
116+
extends: chatqna
117+
image: ${REGISTRY:-opea}/guardrails:${TAG:-latest}
70118
vllm-rocm:
71119
build:
72120
context: GenAIComps
@@ -90,3 +138,9 @@ services:
90138
dockerfile: comps/third_parties/nginx/src/Dockerfile
91139
extends: chatqna
92140
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
141+
nginx-openeuler:
142+
build:
143+
context: GenAIComps
144+
dockerfile: comps/third_parties/nginx/src/Dockerfile.openEuler
145+
extends: chatqna
146+
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
Lines changed: 198 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,198 @@
1+
#!/bin/bash
2+
# Copyright (C) 2025 Huawei Technologies Co., Ltd.
3+
# SPDX-License-Identifier: Apache-2.0
4+
5+
set -xe
6+
IMAGE_REPO=${IMAGE_REPO:-"opea"}
7+
IMAGE_TAG=${IMAGE_TAG:-"latest"}-openeuler
8+
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
9+
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
10+
export REGISTRY=${IMAGE_REPO}
11+
export TAG=${IMAGE_TAG}
12+
export MODEL_CACHE=${model_cache:-"./data"}
13+
14+
WORKPATH=$(dirname "$PWD")
15+
LOG_PATH="$WORKPATH/tests"
16+
ip_address=$(hostname -I | awk '{print $1}')
17+
18+
function build_docker_images() {
19+
opea_branch=${opea_branch:-"main"}
20+
cd $WORKPATH/docker_image_build
21+
git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git
22+
pushd GenAIComps
23+
echo "GenAIComps test commit is $(git rev-parse HEAD)"
24+
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile.openEuler .
25+
popd && sleep 1s
26+
27+
28+
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
29+
service_list="chatqna-openeuler chatqna-ui-openeuler dataprep-openeuler retriever-openeuler nginx-openeuler"
30+
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
31+
32+
docker images && sleep 1s
33+
}
34+
35+
function start_services() {
36+
cd $WORKPATH/docker_compose/intel/cpu/xeon
37+
38+
source set_env.sh
39+
40+
# Start Docker Containers
41+
docker compose -f compose_openeuler.yaml -f compose.telemetry.yaml up -d --quiet-pull > ${LOG_PATH}/start_services_with_compose.log
42+
n=0
43+
until [[ "$n" -ge 100 ]]; do
44+
docker logs vllm-service > ${LOG_PATH}/vllm_service_start.log 2>&1
45+
if grep -q complete ${LOG_PATH}/vllm_service_start.log; then
46+
break
47+
fi
48+
sleep 5s
49+
n=$((n+1))
50+
done
51+
}
52+
53+
function validate_service() {
54+
local URL="$1"
55+
local EXPECTED_RESULT="$2"
56+
local SERVICE_NAME="$3"
57+
local DOCKER_NAME="$4"
58+
local INPUT_DATA="$5"
59+
60+
local HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL")
61+
if [ "$HTTP_STATUS" -eq 200 ]; then
62+
echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..."
63+
64+
local CONTENT=$(curl -s -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL" | tee ${LOG_PATH}/${SERVICE_NAME}.log)
65+
66+
if echo "$CONTENT" | grep -q "$EXPECTED_RESULT"; then
67+
echo "[ $SERVICE_NAME ] Content is as expected."
68+
else
69+
echo "[ $SERVICE_NAME ] Content does not match the expected result: $CONTENT"
70+
docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log
71+
exit 1
72+
fi
73+
else
74+
echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS"
75+
docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log
76+
exit 1
77+
fi
78+
sleep 1s
79+
}
80+
81+
function validate_microservices() {
82+
# Check if the microservices are running correctly.
83+
sleep 3m
84+
85+
# tei for embedding service
86+
validate_service \
87+
"${ip_address}:6006/embed" \
88+
"\[\[" \
89+
"tei-embedding" \
90+
"tei-embedding-server" \
91+
'{"inputs":"What is Deep Learning?"}'
92+
93+
# retrieval microservice
94+
test_embedding=$(python3 -c "import random; embedding = [random.uniform(-1, 1) for _ in range(768)]; print(embedding)")
95+
validate_service \
96+
"${ip_address}:7000/v1/retrieval" \
97+
" " \
98+
"retrieval" \
99+
"retriever-redis-server" \
100+
"{\"text\":\"What is the revenue of Nike in 2023?\",\"embedding\":${test_embedding}}"
101+
102+
# tei for rerank microservice
103+
validate_service \
104+
"${ip_address}:8808/rerank" \
105+
'{"index":1,"score":' \
106+
"tei-rerank" \
107+
"tei-reranking-server" \
108+
'{"query":"What is Deep Learning?", "texts": ["Deep Learning is not...", "Deep learning is..."]}'
109+
110+
# vllm for llm service
111+
validate_service \
112+
"${ip_address}:9009/v1/chat/completions" \
113+
"content" \
114+
"vllm-llm" \
115+
"vllm-service" \
116+
'{"model": "Qwen/Qwen3-4B", "messages": [{"role": "user", "content": "What is Deep Learning?"}], "max_tokens": 17}'
117+
}
118+
119+
function validate_megaservice() {
120+
# Curl the Mega Service
121+
validate_service \
122+
"${ip_address}:8888/v1/chatqna" \
123+
"Nike" \
124+
"mega-chatqna" \
125+
"chatqna-xeon-backend-server" \
126+
'{"messages": "What is the revenue of Nike in 2023?"}'
127+
128+
}
129+
130+
function validate_frontend() {
131+
cd $WORKPATH/ui/svelte
132+
133+
# use the official Playwright image to run the tests
134+
docker run --rm -it \
135+
-v $(pwd):/workspace \
136+
-w /workspace \
137+
-e CI=true \
138+
-e PLAYWRIGHT_BROWSERS_PATH=/ms-playwright \
139+
mcr.microsoft.com/playwright:v1.37.0-focal \
140+
/bin/bash -c "
141+
apt-get update && \
142+
apt-get install -y curl && \
143+
curl -fsSL https://deb.nodesource.com/setup_18.x | bash - && \
144+
apt-get install -y nodejs && \
145+
node -v && npm -v && \
146+
npm install && npm ci && npx playwright install --with-deps &&
147+
sed -i 's/localhost/$ip_address/g' playwright.config.ts &&
148+
npx playwright test
149+
"
150+
151+
if [ $? -ne 0 ]; then
152+
echo "[TEST INFO]: ---------frontend test failed---------"
153+
exit 1
154+
else
155+
echo "[TEST INFO]: ---------frontend test passed---------"
156+
fi
157+
}
158+
159+
function stop_docker() {
160+
cd $WORKPATH/docker_compose/intel/cpu/xeon
161+
docker compose -f compose_openeuler.yaml -f compose.telemetry.yaml down
162+
}
163+
164+
function main() {
165+
166+
echo "::group::stop_docker"
167+
stop_docker
168+
echo "::endgroup::"
169+
170+
echo "::group::build_docker_images"
171+
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
172+
echo "::endgroup::"
173+
174+
echo "::group::start_services"
175+
start_services
176+
echo "::endgroup::"
177+
178+
echo "::group::validate_microservices"
179+
validate_microservices
180+
echo "::endgroup::"
181+
182+
echo "::group::validate_megaservice"
183+
validate_megaservice
184+
echo "::endgroup::"
185+
186+
echo "::group::validate_frontend"
187+
validate_frontend
188+
echo "::endgroup::"
189+
190+
echo "::group::stop_docker"
191+
stop_docker
192+
echo "::endgroup::"
193+
194+
docker system prune -f
195+
196+
}
197+
198+
main

0 commit comments

Comments
 (0)