diff --git a/.github/env/_vllm_versions.sh b/.github/env/_vllm_versions.sh new file mode 100644 index 0000000000..375afa99a8 --- /dev/null +++ b/.github/env/_vllm_versions.sh @@ -0,0 +1,7 @@ +#!/bin/sh +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +export VLLM_VER=v0.8.3 +export VLLM_FORK_VER=v0.7.2+Gaudi-1.21.0 +export VLLM_OPENVINO_VER=v0.6.1 diff --git a/.github/workflows/_comps-workflow.yml b/.github/workflows/_comps-workflow.yml index 903be7cf0c..45fbac8902 100644 --- a/.github/workflows/_comps-workflow.yml +++ b/.github/workflows/_comps-workflow.yml @@ -75,15 +75,12 @@ jobs: fi cd ${{ github.workspace }} + source ${{ github.workspace }}/.github/env/_vllm_versions.sh if [[ $(grep -c "vllm-openvino:" ${docker_compose_yml}) != 0 ]]; then - git clone https://github.com/vllm-project/vllm.git vllm-openvino - cd ./vllm-openvino && git checkout v0.6.1 && git rev-parse HEAD && cd ../ + git clone --depth 1 -b ${VLLM_OPENVINO_VER} --single-branch https://github.com/vllm-project/vllm.git fi if [[ $(grep -c "vllm-gaudi:" ${docker_compose_yml}) != 0 ]]; then - git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork - VLLM_VER=v0.6.6.post1+Gaudi-1.20.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &> /dev/null && cd ../ + git clone --depth 1 -b ${VLLM_FORK_VER} --single-branch https://github.com/HabanaAI/vllm-fork.git fi - name: Get build list id: get-build-list diff --git a/.github/workflows/daily-update-vllm-version.yml b/.github/workflows/daily-update-vllm-version.yml new file mode 100644 index 0000000000..991d4b8db2 --- /dev/null +++ b/.github/workflows/daily-update-vllm-version.yml @@ -0,0 +1,93 @@ +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +name: Daily update vLLM & vLLM-fork version + +on: + schedule: + - cron: "30 22 * * *" + workflow_dispatch: + +env: + BRANCH_NAME: "update" + USER_NAME: "CICD-at-OPEA" + USER_EMAIL: "CICD@opea.dev" + +jobs: + freeze-tag: + runs-on: ubuntu-latest + strategy: + matrix: + include: + - repo: vLLM + repo_name: vllm-project/vllm + ver_name: VLLM_VER + - repo: vLLM-fork + repo_name: HabanaAI/vllm-fork + ver_name: VLLM_FORK_VER + fail-fast: false + permissions: + contents: write + pull-requests: write + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + ref: ${{ github.ref }} + + - name: Set up Git + run: | + git config --global user.name ${{ env.USER_NAME }} + git config --global user.email ${{ env.USER_EMAIL }} + git remote set-url origin https://${{ env.USER_NAME }}:"${{ secrets.ACTION_TOKEN }}"@github.com/${{ github.repository }}.git + git fetch + + if git ls-remote https://github.com/${{ github.repository }}.git "refs/heads/${{ env.BRANCH_NAME }}_${{ matrix.repo }}" | grep -q "refs/heads/${{ env.BRANCH_NAME }}_${{ matrix.repo }}"; then + echo "branch ${{ env.BRANCH_NAME }}_${{ matrix.repo }} exists" + git checkout ${{ env.BRANCH_NAME }}_${{ matrix.repo }} + else + echo "branch ${{ env.BRANCH_NAME }}_${{ matrix.repo }} not exists" + git checkout -b ${{ env.BRANCH_NAME }}_${{ matrix.repo }} + git push origin ${{ env.BRANCH_NAME }}_${{ matrix.repo }} + echo "branch ${{ env.BRANCH_NAME }}_${{ matrix.repo }} created successfully" + fi + + - name: Run script + run: | + latest_vllm_ver=$(curl -s "https://api.github.com/repos/${{ matrix.repo_name }}/tags" | jq '.[0].name' -) + latest_vllm_ver=$(echo "$latest_vllm_ver" | sed 's/"//g') + echo "latest_vllm_ver=${latest_vllm_ver}" >> "$GITHUB_ENV" + find . -type f -name "*.sh" -exec sed -i "s/${{ matrix.ver_name }}=.*/${{ matrix.ver_name }}=${latest_vllm_ver}/" {} \; + + - name: Commit changes + run: | + git add . + if git diff-index --quiet HEAD --; then + echo "No changes detected, skipping commit." + exit 1 + else + git commit -s -m "Update ${{ matrix.repo }} version to ${latest_vllm_ver}" + git push --set-upstream origin ${{ env.BRANCH_NAME }}_${{ matrix.repo }} + fi + + - name: Create Pull Request + env: + GH_TOKEN: ${{ secrets.ACTION_TOKEN }} + run: | + pr_url="$(gh pr list --head "${{ env.BRANCH_NAME }}_${{ matrix.repo }}" --state open --json url --jq .[].url)" + if [[ -n "${pr_url}" ]]; then + echo "Pull Request exists" + gh pr edit ${pr_url} \ + --title "Update ${{ matrix.repo }} version to ${latest_vllm_ver}" \ + --body "Update ${{ matrix.repo }} version to ${latest_vllm_ver}" + echo "Pull Request updated successfully" + else + echo "Pull Request does not exists..." + gh pr create \ + -B main \ + -H ${{ env.BRANCH_NAME }}_${{ matrix.repo }} \ + --title "Update ${{ matrix.repo }} version to ${latest_vllm_ver}" \ + --body "Update ${{ matrix.repo }} version to ${latest_vllm_ver}" + echo "Pull Request created successfully" + fi diff --git a/.github/workflows/push-image-build.yml b/.github/workflows/push-image-build.yml index ad14e67840..aaae175ee9 100644 --- a/.github/workflows/push-image-build.yml +++ b/.github/workflows/push-image-build.yml @@ -86,15 +86,12 @@ jobs: echo "file_exists=false" >> $GITHUB_ENV echo "docker_compose_path=${docker_compose_path} for this service does not exist, so skipping image build for this service!!!" fi + source ${{ github.workspace }}/.github/env/_vllm_versions.sh if [[ $(grep -c "vllm-openvino:" ${docker_compose_path}) != 0 ]]; then - git clone https://github.com/vllm-project/vllm.git vllm-openvino - cd ./vllm-openvino && git checkout v0.6.1 && git rev-parse HEAD && cd ../ + git clone --depth 1 -b ${VLLM_OPENVINO_VER} --single-branch https://github.com/vllm-project/vllm.git fi if [[ $(grep -c "vllm-gaudi:" ${docker_compose_path}) != 0 ]]; then - git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork - VLLM_VER=v0.6.6.post1+Gaudi-1.20.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &> /dev/null && cd ../ + git clone --depth 1 -b ${VLLM_FORK_VER} --single-branch https://github.com/HabanaAI/vllm-fork.git fi - name: Build Image diff --git a/comps/third_parties/vllm/src/build_docker_vllm.sh b/comps/third_parties/vllm/src/build_docker_vllm.sh index c5075303b2..ffb0999abd 100644 --- a/comps/third_parties/vllm/src/build_docker_vllm.sh +++ b/comps/third_parties/vllm/src/build_docker_vllm.sh @@ -35,19 +35,14 @@ fi # Build the docker image for vLLM based on the hardware mode if [ "$hw_mode" = "hpu" ]; then - git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork - VLLM_VER=v0.6.6.post1+Gaudi-1.20.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &> /dev/null + source $(git rev-parse --show-toplevel)/.github/env/_vllm_versions.sh + git clone --depth 1 -b ${VLLM_FORK_VER} --single-branch https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork docker build -f Dockerfile.hpu -t opea/vllm-gaudi:latest --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy cd .. rm -rf vllm-fork else - git clone https://github.com/vllm-project/vllm.git - cd ./vllm/ - VLLM_VER="v0.8.3" - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &> /dev/null + source $(git rev-parse --show-toplevel)/.github/env/_vllm_versions.sh + git clone --depth 1 -b ${VLLM_VER} --single-branch https://github.com/vllm-project/vllm.git && cd vllm docker build -f docker/Dockerfile.cpu -t opea/vllm-cpu:latest --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy cd .. rm -rf vllm diff --git a/comps/third_parties/vllm/src/build_docker_vllm_openvino.sh b/comps/third_parties/vllm/src/build_docker_vllm_openvino.sh index c7ca87cacc..0fef7a6f6c 100644 --- a/comps/third_parties/vllm/src/build_docker_vllm_openvino.sh +++ b/comps/third_parties/vllm/src/build_docker_vllm_openvino.sh @@ -22,8 +22,8 @@ if [ "$hw_mode" = "gpu" ]; then docker build -f Dockerfile.intel_gpu -t opea/vllm-arc:latest . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy else BASEDIR="$( cd "$( dirname "$0" )" && pwd )" - git clone https://github.com/vllm-project/vllm.git vllm - cd ./vllm/ && git checkout v0.6.1 + source $(git rev-parse --show-toplevel)/.github/env/_vllm_versions.sh + git clone --depth 1 -b ${VLLM_OPENVINO_VER} --single-branch https://github.com/vllm-project/vllm.git && cd vllm docker build -t vllm-openvino:latest -f Dockerfile.openvino . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy cd $BASEDIR && rm -rf vllm fi diff --git a/tests/agent/build_vllm_gaudi.sh b/tests/agent/build_vllm_gaudi.sh index 71b24dc0f3..19cab550a3 100644 --- a/tests/agent/build_vllm_gaudi.sh +++ b/tests/agent/build_vllm_gaudi.sh @@ -5,13 +5,8 @@ function build_vllm_docker_images() { echo "Building the vllm docker images" cd $WORKDIR echo $WORKPATH - if [ ! -d "./vllm" ]; then - git clone https://github.com/HabanaAI/vllm-fork.git - fi - cd ./vllm-fork - VLLM_VER=v0.6.6.post1+Gaudi-1.20.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &> /dev/null + source $(git rev-parse --show-toplevel)/.github/env/_vllm_versions.sh + git clone --depth 1 -b ${VLLM_FORK_VER} --single-branch https://github.com/HabanaAI/vllm-fork.git && cd ./vllm-fork docker build --no-cache -f Dockerfile.hpu -t opea/vllm-gaudi:comps --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy if [ $? -ne 0 ]; then echo "opea/vllm-gaudi:comps failed" diff --git a/tests/agent/sql_agent_test/test_sql_agent.sh b/tests/agent/sql_agent_test/test_sql_agent.sh index 9ace9025e1..cb9e3087a4 100644 --- a/tests/agent/sql_agent_test/test_sql_agent.sh +++ b/tests/agent/sql_agent_test/test_sql_agent.sh @@ -102,15 +102,9 @@ function build_docker_images() { function build_vllm_docker_images() { echo "Building the vllm docker images" - cd $WORKPATH - echo $WORKPATH - if [ ! -d "./vllm" ]; then - git clone https://github.com/HabanaAI/vllm-fork.git - fi - cd ./vllm-fork - VLLM_VER=v0.6.6.post1+Gaudi-1.20.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &> /dev/null + cd $WORKPATH && echo $WORKPATH + source $(git rev-parse --show-toplevel)/.github/env/_vllm_versions.sh + git clone --depth 1 -b ${VLLM_FORK_VER} --single-branch https://github.com/HabanaAI/vllm-fork.git && cd ./vllm-fork docker build --no-cache -f Dockerfile.hpu -t opea/vllm-gaudi:comps --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy if [ $? -ne 0 ]; then echo "opea/vllm-gaudi:comps failed" diff --git a/tests/agent/test_agent_langchain_on_intel_hpu.sh b/tests/agent/test_agent_langchain_on_intel_hpu.sh index a60aff2c47..50d0ab3e9c 100644 --- a/tests/agent/test_agent_langchain_on_intel_hpu.sh +++ b/tests/agent/test_agent_langchain_on_intel_hpu.sh @@ -53,15 +53,9 @@ function build_docker_images() { function build_vllm_docker_images() { echo "Building the vllm docker images" - cd $WORKPATH - echo $WORKPATH - if [ ! -d "./vllm" ]; then - git clone https://github.com/HabanaAI/vllm-fork.git - fi - cd ./vllm-fork - VLLM_VER=v0.6.6.post1+Gaudi-1.20.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &> /dev/null + cd $WORKPATH && echo $WORKPATH + source $(git rev-parse --show-toplevel)/.github/env/_vllm_versions.sh + git clone --depth 1 -b ${VLLM_FORK_VER} --single-branch https://github.com/HabanaAI/vllm-fork.git && cd ./vllm-fork docker build --no-cache -f Dockerfile.hpu -t opea/vllm-gaudi:comps --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy if [ $? -ne 0 ]; then echo "opea/vllm-gaudi:comps failed" diff --git a/tests/dataprep/test_dataprep_redis_finance_on_intel_hpu.sh b/tests/dataprep/test_dataprep_redis_finance_on_intel_hpu.sh index 8d659989d4..070d227f61 100644 --- a/tests/dataprep/test_dataprep_redis_finance_on_intel_hpu.sh +++ b/tests/dataprep/test_dataprep_redis_finance_on_intel_hpu.sh @@ -28,18 +28,9 @@ function build_docker_images() { function build_vllm_docker_images() { echo "Building the vllm docker images" - cd $WORKPATH - echo $WORKPATH - if [ ! -d "./vllm-fork" ]; then - git clone https://github.com/HabanaAI/vllm-fork.git - fi - cd ./vllm-fork - VLLM_VER=v0.6.6.post1+Gaudi-1.20.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &> /dev/null - - # cd $WORKDIR/vllm-fork - + cd $WORKPATH && echo $WORKPATH + source $(git rev-parse --show-toplevel)/.github/env/_vllm_versions.sh + git clone --depth 1 -b ${VLLM_FORK_VER} --single-branch https://github.com/HabanaAI/vllm-fork.git && cd ./vllm-fork docker build --no-cache -f Dockerfile.hpu -t opea/vllm-gaudi:comps --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy if [ $? -ne 0 ]; then echo "opea/vllm-gaudi:comps failed" diff --git a/tests/guardrails/test_guardrails_hallucination_detection_on_intel_hpu.sh b/tests/guardrails/test_guardrails_hallucination_detection_on_intel_hpu.sh index 34abccf7a1..b8c8cc44e7 100644 --- a/tests/guardrails/test_guardrails_hallucination_detection_on_intel_hpu.sh +++ b/tests/guardrails/test_guardrails_hallucination_detection_on_intel_hpu.sh @@ -11,11 +11,8 @@ export DATA_PATH=${model_cache} function build_docker_images() { echo "Start building docker images for microservice" cd $WORKPATH - git clone https://github.com/HabanaAI/vllm-fork.git - cd vllm-fork/ - VLLM_VER=v0.6.6.post1+Gaudi-1.20.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &> /dev/null + source $(git rev-parse --show-toplevel)/.github/env/_vllm_versions.sh + git clone --depth 1 -b ${VLLM_FORK_VER} --single-branch https://github.com/HabanaAI/vllm-fork.git && cd ./vllm-fork docker build --no-cache --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile.hpu -t opea/vllm-gaudi:comps --shm-size=128g . if [ $? -ne 0 ]; then echo "opea/vllm-gaudi built fail" diff --git a/tests/llms/test_llms_doc-summarization_vllm.sh b/tests/llms/test_llms_doc-summarization_vllm.sh index b4ccde85ec..60fcf1eaeb 100644 --- a/tests/llms/test_llms_doc-summarization_vllm.sh +++ b/tests/llms/test_llms_doc-summarization_vllm.sh @@ -21,11 +21,8 @@ service_name="docsum-vllm" function build_docker_images() { cd $WORKPATH - git clone https://github.com/vllm-project/vllm.git - cd ./vllm/ - VLLM_VER="v0.8.3" - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &> /dev/null + source $(git rev-parse --show-toplevel)/.github/env/_vllm_versions.sh + git clone --depth 1 -b ${VLLM_VER} --single-branch https://github.com/vllm-project/vllm.git && cd vllm docker build --no-cache -f docker/Dockerfile.cpu -t ${REGISTRY:-opea}/vllm:${TAG:-latest} --shm-size=128g . if [ $? -ne 0 ]; then echo "opea/vllm built fail" diff --git a/tests/llms/test_llms_doc-summarization_vllm_on_intel_hpu.sh b/tests/llms/test_llms_doc-summarization_vllm_on_intel_hpu.sh index 11237fcd16..0e88cdfd94 100644 --- a/tests/llms/test_llms_doc-summarization_vllm_on_intel_hpu.sh +++ b/tests/llms/test_llms_doc-summarization_vllm_on_intel_hpu.sh @@ -17,11 +17,8 @@ service_name="docsum-vllm-gaudi" function build_docker_images() { cd $WORKPATH - git clone https://github.com/HabanaAI/vllm-fork.git - cd vllm-fork/ - VLLM_VER=v0.6.6.post1+Gaudi-1.20.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &> /dev/null + source $(git rev-parse --show-toplevel)/.github/env/_vllm_versions.sh + git clone --depth 1 -b ${VLLM_FORK_VER} --single-branch https://github.com/HabanaAI/vllm-fork.git && cd ./vllm-fork docker build --no-cache -f Dockerfile.hpu -t ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} --shm-size=128g . if [ $? -ne 0 ]; then echo "opea/vllm-gaudi built fail" diff --git a/tests/llms/test_llms_faq-generation_vllm.sh b/tests/llms/test_llms_faq-generation_vllm.sh index 04ca9570aa..30d0f9b30d 100644 --- a/tests/llms/test_llms_faq-generation_vllm.sh +++ b/tests/llms/test_llms_faq-generation_vllm.sh @@ -18,11 +18,8 @@ service_name="faqgen-vllm" function build_docker_images() { cd $WORKPATH - git clone https://github.com/vllm-project/vllm.git - cd ./vllm/ - VLLM_VER="v0.8.3" - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &> /dev/null + source $(git rev-parse --show-toplevel)/.github/env/_vllm_versions.sh + git clone --depth 1 -b ${VLLM_VER} --single-branch https://github.com/vllm-project/vllm.git && cd vllm docker build --no-cache -f docker/Dockerfile.cpu -t ${REGISTRY:-opea}/vllm:${TAG:-latest} --shm-size=128g . if [ $? -ne 0 ]; then echo "opea/vllm built fail" diff --git a/tests/llms/test_llms_faq-generation_vllm_on_intel_hpu.sh b/tests/llms/test_llms_faq-generation_vllm_on_intel_hpu.sh index c875fa70ae..8cf2189c4f 100644 --- a/tests/llms/test_llms_faq-generation_vllm_on_intel_hpu.sh +++ b/tests/llms/test_llms_faq-generation_vllm_on_intel_hpu.sh @@ -17,11 +17,8 @@ service_name="faqgen-vllm-gaudi" function build_docker_images() { cd $WORKPATH - git clone https://github.com/HabanaAI/vllm-fork.git - cd vllm-fork/ - VLLM_VER=v0.6.6.post1+Gaudi-1.20.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &> /dev/null + source $(git rev-parse --show-toplevel)/.github/env/_vllm_versions.sh + git clone --depth 1 -b ${VLLM_FORK_VER} --single-branch https://github.com/HabanaAI/vllm-fork.git && cd ./vllm-fork docker build --no-cache -f Dockerfile.hpu -t ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} --shm-size=128g . if [ $? -ne 0 ]; then echo "opea/vllm-gaudi built fail" diff --git a/tests/llms/test_llms_text-generation_service_vllm_on_intel_hpu.sh b/tests/llms/test_llms_text-generation_service_vllm_on_intel_hpu.sh index 390c45e42a..b14550da7f 100644 --- a/tests/llms/test_llms_text-generation_service_vllm_on_intel_hpu.sh +++ b/tests/llms/test_llms_text-generation_service_vllm_on_intel_hpu.sh @@ -17,11 +17,8 @@ service_name="textgen-service-vllm-gaudi" function build_docker_images() { cd $WORKPATH - git clone https://github.com/HabanaAI/vllm-fork.git - cd vllm-fork/ - VLLM_VER=v0.6.6.post1+Gaudi-1.20.0 - echo "Check out vLLM tag ${VLLM_VER}" - git checkout ${VLLM_VER} &> /dev/null + source $(git rev-parse --show-toplevel)/.github/env/_vllm_versions.sh + git clone --depth 1 -b ${VLLM_FORK_VER} --single-branch https://github.com/HabanaAI/vllm-fork.git && cd ./vllm-fork docker build --no-cache -f Dockerfile.hpu -t ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} --shm-size=128g . if [ $? -ne 0 ]; then echo "opea/vllm-gaudi built fail" diff --git a/tests/lvms/test_lvms_vllm_on_intel_hpu.sh b/tests/lvms/test_lvms_vllm_on_intel_hpu.sh index 3475f58ac2..de88d32141 100644 --- a/tests/lvms/test_lvms_vllm_on_intel_hpu.sh +++ b/tests/lvms/test_lvms_vllm_on_intel_hpu.sh @@ -18,9 +18,8 @@ function build_docker_images() { cd $WORKPATH echo $(pwd) - git clone https://github.com/HabanaAI/vllm-fork.git - cd ./vllm-fork/ - git checkout f78aeb9da0712561163eddd353e3b6097cd69bac # revert this to habana_main when https://github.com/HabanaAI/vllm-fork/issues/1015 is fixed + source $(git rev-parse --show-toplevel)/.github/env/_vllm_versions.sh + git clone --depth 1 -b ${VLLM_FORK_VER} --single-branch https://github.com/HabanaAI/vllm-fork.git && cd ./vllm-fork docker build --no-cache -f Dockerfile.hpu -t opea/vllm-gaudi:$TAG --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy cd .. rm -rf vllm-fork diff --git a/tests/third_parties/test_third_parties_vllm_openvino.sh b/tests/third_parties/test_third_parties_vllm_openvino.sh index 23fe22509c..e1c1b215f9 100644 --- a/tests/third_parties/test_third_parties_vllm_openvino.sh +++ b/tests/third_parties/test_third_parties_vllm_openvino.sh @@ -17,8 +17,8 @@ service_name="vllm-openvino" function build_container() { cd $WORKPATH - git clone https://github.com/vllm-project/vllm.git vllm-openvino - cd ./vllm-openvino/ && git checkout v0.6.1 # something wrong with main branch image build + source $(git rev-parse --show-toplevel)/.github/env/_vllm_versions.sh + git clone --depth 1 -b ${VLLM_OPENVINO_VER} --single-branch https://github.com/vllm-project/vllm.git vllm-openvino && cd vllm-openvino docker build --no-cache -t ${REGISTRY:-opea}/vllm-openvino:${TAG:-latest} \ -f Dockerfile.openvino \