Skip to content

Commit 616c00c

Browse files
authored
Update default model cache for new CI cluster (opea-project#1445)
Signed-off-by: chensuyue <[email protected]>
1 parent e73c22a commit 616c00c

8 files changed

+8
-8
lines changed

tests/llms/test_llms_doc-summarization_tgi_on_intel_hpu.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ function start_service() {
3636
export MAX_INPUT_TOKENS=2048
3737
export MAX_TOTAL_TOKENS=4096
3838
export LOGFLAG=True
39-
export DATA_PATH="/data2/cache"
39+
export DATA_PATH="/data2/hf_model"
4040

4141
cd $WORKPATH/comps/llms/deployment/docker_compose
4242
docker compose -f compose_doc-summarization.yaml up ${service_name} -d > ${LOG_PATH}/start_services_with_compose.log

tests/llms/test_llms_doc-summarization_vllm_on_intel_hpu.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ function start_service() {
5151
export MAX_TOTAL_TOKENS=4096
5252
export VLLM_SKIP_WARMUP=true
5353
export LOGFLAG=True
54-
export DATA_PATH="/data2/cache"
54+
export DATA_PATH="/data2/hf_model"
5555

5656
cd $WORKPATH/comps/llms/deployment/docker_compose
5757
docker compose -f compose_doc-summarization.yaml up ${service_name} -d > ${LOG_PATH}/start_services_with_compose.log

tests/llms/test_llms_faq-generation_tgi_on_intel_hpu.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ function start_service() {
3434
export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}"
3535
export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3"
3636
export LOGFLAG=True
37-
export DATA_PATH="/data2/cache"
37+
export DATA_PATH="/data2/hf_model"
3838

3939
cd $WORKPATH/comps/llms/deployment/docker_compose
4040
docker compose -f compose_faq-generation.yaml up ${service_name} -d > ${LOG_PATH}/start_services_with_compose.log

tests/llms/test_llms_faq-generation_vllm_on_intel_hpu.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ function start_service() {
5050
export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3"
5151
export VLLM_SKIP_WARMUP=true
5252
export LOGFLAG=True
53-
export DATA_PATH="/data2/cache"
53+
export DATA_PATH="/data2/hf_model"
5454

5555
cd $WORKPATH/comps/llms/deployment/docker_compose
5656
docker compose -f compose_faq-generation.yaml up ${service_name} -d > ${LOG_PATH}/start_services_with_compose.log

tests/llms/test_llms_text-generation_native_on_intel_hpu.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ function start_service() {
3131
export host_ip=${host_ip}
3232
export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3"
3333
export LOGFLAG=True
34-
export DATA_PATH="/data2/cache"
34+
export DATA_PATH="/data2/hf_model"
3535

3636
cd $WORKPATH/comps/llms/deployment/docker_compose
3737
docker compose -f compose_text-generation.yaml up ${service_name} -d > ${LOG_PATH}/start_services_with_compose.log

tests/llms/test_llms_text-generation_service_tgi_on_intel_hpu.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ function start_service() {
3434
export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}"
3535
export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3"
3636
export LOGFLAG=True
37-
export DATA_PATH="/data2/cache"
37+
export DATA_PATH="/data2/hf_model"
3838

3939
cd $WORKPATH/comps/llms/deployment/docker_compose
4040
docker compose -f compose_text-generation.yaml up ${service_name} -d > ${LOG_PATH}/start_services_with_compose.log

tests/llms/test_llms_text-generation_service_vllm_on_intel_hpu.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ function start_service() {
4949
export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3"
5050
export VLLM_SKIP_WARMUP=true
5151
export LOGFLAG=True
52-
export DATA_PATH="/data2/cache"
52+
export DATA_PATH="/data2/hf_model"
5353

5454
cd $WORKPATH/comps/llms/deployment/docker_compose
5555
docker compose -f compose_text-generation.yaml up ${service_name} -d > ${LOG_PATH}/start_services_with_compose.log

tests/retrievers/test_retrievers_neo4j_on_intel_hpu.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ function start_service() {
4343
export LLM_ENDPOINT_PORT=11634
4444
export RETRIEVER_PORT=11635
4545
export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
46-
export DATA_PATH="/data2/cache"
46+
export DATA_PATH="/data2/hf_model"
4747
export MAX_INPUT_TOKENS=4096
4848
export MAX_TOTAL_TOKENS=8192
4949
export TEI_EMBEDDING_ENDPOINT="http://${host_ip}:${TEI_EMBEDDER_PORT}"

0 commit comments

Comments
 (0)