Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions .github/env/_vllm_versions.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
#!/bin/sh
# Copyright (C) 2025 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

export VLLM_VER=v0.8.3
export VLLM_FORK_VER=v0.7.2+Gaudi-1.21.0
export VLLM_OPENVINO_VER=v0.6.1
9 changes: 3 additions & 6 deletions .github/workflows/_comps-workflow.yml
Original file line number Diff line number Diff line change
Expand Up @@ -75,15 +75,12 @@ jobs:
fi

cd ${{ github.workspace }}
source ${{ github.workspace }}/.github/env/_vllm_versions.sh
if [[ $(grep -c "vllm-openvino:" ${docker_compose_yml}) != 0 ]]; then
git clone https://github.com/vllm-project/vllm.git vllm-openvino
cd ./vllm-openvino && git checkout v0.6.1 && git rev-parse HEAD && cd ../
git clone --depth 1 -b ${VLLM_OPENVINO_VER} --single-branch https://github.com/vllm-project/vllm.git
fi
if [[ $(grep -c "vllm-gaudi:" ${docker_compose_yml}) != 0 ]]; then
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
VLLM_VER=v0.6.6.post1+Gaudi-1.20.0
echo "Check out vLLM tag ${VLLM_VER}"
git checkout ${VLLM_VER} &> /dev/null && cd ../
git clone --depth 1 -b ${VLLM_FORK_VER} --single-branch https://github.com/HabanaAI/vllm-fork.git
fi
- name: Get build list
id: get-build-list
Expand Down
93 changes: 93 additions & 0 deletions .github/workflows/daily-update-vllm-version.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
# Copyright (C) 2025 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

name: Daily update vLLM & vLLM-fork version

on:
schedule:
- cron: "30 22 * * *"
workflow_dispatch:

env:
BRANCH_NAME: "update"
USER_NAME: "CICD-at-OPEA"
USER_EMAIL: "[email protected]"

jobs:
freeze-tag:
runs-on: ubuntu-latest
strategy:
matrix:
include:
- repo: vLLM
repo_name: vllm-project/vllm
ver_name: VLLM_VER
- repo: vLLM-fork
repo_name: HabanaAI/vllm-fork
ver_name: VLLM_FORK_VER
fail-fast: false
permissions:
contents: write
pull-requests: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
ref: ${{ github.ref }}

- name: Set up Git
run: |
git config --global user.name ${{ env.USER_NAME }}
git config --global user.email ${{ env.USER_EMAIL }}
git remote set-url origin https://${{ env.USER_NAME }}:"${{ secrets.ACTION_TOKEN }}"@github.com/${{ github.repository }}.git
git fetch

if git ls-remote https://github.com/${{ github.repository }}.git "refs/heads/${{ env.BRANCH_NAME }}_${{ matrix.repo }}" | grep -q "refs/heads/${{ env.BRANCH_NAME }}_${{ matrix.repo }}"; then
echo "branch ${{ env.BRANCH_NAME }}_${{ matrix.repo }} exists"
git checkout ${{ env.BRANCH_NAME }}_${{ matrix.repo }}
else
echo "branch ${{ env.BRANCH_NAME }}_${{ matrix.repo }} not exists"
git checkout -b ${{ env.BRANCH_NAME }}_${{ matrix.repo }}
git push origin ${{ env.BRANCH_NAME }}_${{ matrix.repo }}
echo "branch ${{ env.BRANCH_NAME }}_${{ matrix.repo }} created successfully"
fi

- name: Run script
run: |
latest_vllm_ver=$(curl -s "https://api.github.com/repos/${{ matrix.repo_name }}/tags" | jq '.[0].name' -)
latest_vllm_ver=$(echo "$latest_vllm_ver" | sed 's/"//g')
echo "latest_vllm_ver=${latest_vllm_ver}" >> "$GITHUB_ENV"
find . -type f -name "*.sh" -exec sed -i "s/${{ matrix.ver_name }}=.*/${{ matrix.ver_name }}=${latest_vllm_ver}/" {} \;

- name: Commit changes
run: |
git add .
if git diff-index --quiet HEAD --; then
echo "No changes detected, skipping commit."
exit 1
else
git commit -s -m "Update ${{ matrix.repo }} version to ${latest_vllm_ver}"
git push --set-upstream origin ${{ env.BRANCH_NAME }}_${{ matrix.repo }}
fi

- name: Create Pull Request
env:
GH_TOKEN: ${{ secrets.ACTION_TOKEN }}
run: |
pr_url="$(gh pr list --head "${{ env.BRANCH_NAME }}_${{ matrix.repo }}" --state open --json url --jq .[].url)"
if [[ -n "${pr_url}" ]]; then
echo "Pull Request exists"
gh pr edit ${pr_url} \
--title "Update ${{ matrix.repo }} version to ${latest_vllm_ver}" \
--body "Update ${{ matrix.repo }} version to ${latest_vllm_ver}"
echo "Pull Request updated successfully"
else
echo "Pull Request does not exists..."
gh pr create \
-B main \
-H ${{ env.BRANCH_NAME }}_${{ matrix.repo }} \
--title "Update ${{ matrix.repo }} version to ${latest_vllm_ver}" \
--body "Update ${{ matrix.repo }} version to ${latest_vllm_ver}"
echo "Pull Request created successfully"
fi
9 changes: 3 additions & 6 deletions .github/workflows/push-image-build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -86,15 +86,12 @@ jobs:
echo "file_exists=false" >> $GITHUB_ENV
echo "docker_compose_path=${docker_compose_path} for this service does not exist, so skipping image build for this service!!!"
fi
source ${{ github.workspace }}/.github/env/_vllm_versions.sh
if [[ $(grep -c "vllm-openvino:" ${docker_compose_path}) != 0 ]]; then
git clone https://github.com/vllm-project/vllm.git vllm-openvino
cd ./vllm-openvino && git checkout v0.6.1 && git rev-parse HEAD && cd ../
git clone --depth 1 -b ${VLLM_OPENVINO_VER} --single-branch https://github.com/vllm-project/vllm.git
fi
if [[ $(grep -c "vllm-gaudi:" ${docker_compose_path}) != 0 ]]; then
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
VLLM_VER=v0.6.6.post1+Gaudi-1.20.0
echo "Check out vLLM tag ${VLLM_VER}"
git checkout ${VLLM_VER} &> /dev/null && cd ../
git clone --depth 1 -b ${VLLM_FORK_VER} --single-branch https://github.com/HabanaAI/vllm-fork.git
fi

- name: Build Image
Expand Down
13 changes: 4 additions & 9 deletions comps/third_parties/vllm/src/build_docker_vllm.sh
Original file line number Diff line number Diff line change
Expand Up @@ -35,19 +35,14 @@ fi

# Build the docker image for vLLM based on the hardware mode
if [ "$hw_mode" = "hpu" ]; then
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
VLLM_VER=v0.6.6.post1+Gaudi-1.20.0
echo "Check out vLLM tag ${VLLM_VER}"
git checkout ${VLLM_VER} &> /dev/null
source $(git rev-parse --show-toplevel)/.github/env/_vllm_versions.sh
git clone --depth 1 -b ${VLLM_FORK_VER} --single-branch https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
docker build -f Dockerfile.hpu -t opea/vllm-gaudi:latest --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy
cd ..
rm -rf vllm-fork
else
git clone https://github.com/vllm-project/vllm.git
cd ./vllm/
VLLM_VER="v0.8.3"
echo "Check out vLLM tag ${VLLM_VER}"
git checkout ${VLLM_VER} &> /dev/null
source $(git rev-parse --show-toplevel)/.github/env/_vllm_versions.sh
git clone --depth 1 -b ${VLLM_VER} --single-branch https://github.com/vllm-project/vllm.git && cd vllm
docker build -f docker/Dockerfile.cpu -t opea/vllm-cpu:latest --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy
cd ..
rm -rf vllm
Expand Down
4 changes: 2 additions & 2 deletions comps/third_parties/vllm/src/build_docker_vllm_openvino.sh
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@ if [ "$hw_mode" = "gpu" ]; then
docker build -f Dockerfile.intel_gpu -t opea/vllm-arc:latest . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy
else
BASEDIR="$( cd "$( dirname "$0" )" && pwd )"
git clone https://github.com/vllm-project/vllm.git vllm
cd ./vllm/ && git checkout v0.6.1
source $(git rev-parse --show-toplevel)/.github/env/_vllm_versions.sh
git clone --depth 1 -b ${VLLM_OPENVINO_VER} --single-branch https://github.com/vllm-project/vllm.git && cd vllm
docker build -t vllm-openvino:latest -f Dockerfile.openvino . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy
cd $BASEDIR && rm -rf vllm
fi
9 changes: 2 additions & 7 deletions tests/agent/build_vllm_gaudi.sh
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,8 @@ function build_vllm_docker_images() {
echo "Building the vllm docker images"
cd $WORKDIR
echo $WORKPATH
if [ ! -d "./vllm" ]; then
git clone https://github.com/HabanaAI/vllm-fork.git
fi
cd ./vllm-fork
VLLM_VER=v0.6.6.post1+Gaudi-1.20.0
echo "Check out vLLM tag ${VLLM_VER}"
git checkout ${VLLM_VER} &> /dev/null
source $(git rev-parse --show-toplevel)/.github/env/_vllm_versions.sh
git clone --depth 1 -b ${VLLM_FORK_VER} --single-branch https://github.com/HabanaAI/vllm-fork.git && cd ./vllm-fork
docker build --no-cache -f Dockerfile.hpu -t opea/vllm-gaudi:comps --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy
if [ $? -ne 0 ]; then
echo "opea/vllm-gaudi:comps failed"
Expand Down
12 changes: 3 additions & 9 deletions tests/agent/sql_agent_test/test_sql_agent.sh
Original file line number Diff line number Diff line change
Expand Up @@ -102,15 +102,9 @@ function build_docker_images() {

function build_vllm_docker_images() {
echo "Building the vllm docker images"
cd $WORKPATH
echo $WORKPATH
if [ ! -d "./vllm" ]; then
git clone https://github.com/HabanaAI/vllm-fork.git
fi
cd ./vllm-fork
VLLM_VER=v0.6.6.post1+Gaudi-1.20.0
echo "Check out vLLM tag ${VLLM_VER}"
git checkout ${VLLM_VER} &> /dev/null
cd $WORKPATH && echo $WORKPATH
source $(git rev-parse --show-toplevel)/.github/env/_vllm_versions.sh
git clone --depth 1 -b ${VLLM_FORK_VER} --single-branch https://github.com/HabanaAI/vllm-fork.git && cd ./vllm-fork
docker build --no-cache -f Dockerfile.hpu -t opea/vllm-gaudi:comps --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy
if [ $? -ne 0 ]; then
echo "opea/vllm-gaudi:comps failed"
Expand Down
12 changes: 3 additions & 9 deletions tests/agent/test_agent_langchain_on_intel_hpu.sh
Original file line number Diff line number Diff line change
Expand Up @@ -53,15 +53,9 @@ function build_docker_images() {

function build_vllm_docker_images() {
echo "Building the vllm docker images"
cd $WORKPATH
echo $WORKPATH
if [ ! -d "./vllm" ]; then
git clone https://github.com/HabanaAI/vllm-fork.git
fi
cd ./vllm-fork
VLLM_VER=v0.6.6.post1+Gaudi-1.20.0
echo "Check out vLLM tag ${VLLM_VER}"
git checkout ${VLLM_VER} &> /dev/null
cd $WORKPATH && echo $WORKPATH
source $(git rev-parse --show-toplevel)/.github/env/_vllm_versions.sh
git clone --depth 1 -b ${VLLM_FORK_VER} --single-branch https://github.com/HabanaAI/vllm-fork.git && cd ./vllm-fork
docker build --no-cache -f Dockerfile.hpu -t opea/vllm-gaudi:comps --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy
if [ $? -ne 0 ]; then
echo "opea/vllm-gaudi:comps failed"
Expand Down
15 changes: 3 additions & 12 deletions tests/dataprep/test_dataprep_redis_finance_on_intel_hpu.sh
Original file line number Diff line number Diff line change
Expand Up @@ -28,18 +28,9 @@ function build_docker_images() {

function build_vllm_docker_images() {
echo "Building the vllm docker images"
cd $WORKPATH
echo $WORKPATH
if [ ! -d "./vllm-fork" ]; then
git clone https://github.com/HabanaAI/vllm-fork.git
fi
cd ./vllm-fork
VLLM_VER=v0.6.6.post1+Gaudi-1.20.0
echo "Check out vLLM tag ${VLLM_VER}"
git checkout ${VLLM_VER} &> /dev/null

# cd $WORKDIR/vllm-fork

cd $WORKPATH && echo $WORKPATH
source $(git rev-parse --show-toplevel)/.github/env/_vllm_versions.sh
git clone --depth 1 -b ${VLLM_FORK_VER} --single-branch https://github.com/HabanaAI/vllm-fork.git && cd ./vllm-fork
docker build --no-cache -f Dockerfile.hpu -t opea/vllm-gaudi:comps --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy
if [ $? -ne 0 ]; then
echo "opea/vllm-gaudi:comps failed"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,8 @@ export DATA_PATH=${model_cache}
function build_docker_images() {
echo "Start building docker images for microservice"
cd $WORKPATH
git clone https://github.com/HabanaAI/vllm-fork.git
cd vllm-fork/
VLLM_VER=v0.6.6.post1+Gaudi-1.20.0
echo "Check out vLLM tag ${VLLM_VER}"
git checkout ${VLLM_VER} &> /dev/null
source $(git rev-parse --show-toplevel)/.github/env/_vllm_versions.sh
git clone --depth 1 -b ${VLLM_FORK_VER} --single-branch https://github.com/HabanaAI/vllm-fork.git && cd ./vllm-fork
docker build --no-cache --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile.hpu -t opea/vllm-gaudi:comps --shm-size=128g .
if [ $? -ne 0 ]; then
echo "opea/vllm-gaudi built fail"
Expand Down
7 changes: 2 additions & 5 deletions tests/llms/test_llms_doc-summarization_vllm.sh
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,8 @@ service_name="docsum-vllm"

function build_docker_images() {
cd $WORKPATH
git clone https://github.com/vllm-project/vllm.git
cd ./vllm/
VLLM_VER="v0.8.3"
echo "Check out vLLM tag ${VLLM_VER}"
git checkout ${VLLM_VER} &> /dev/null
source $(git rev-parse --show-toplevel)/.github/env/_vllm_versions.sh
git clone --depth 1 -b ${VLLM_VER} --single-branch https://github.com/vllm-project/vllm.git && cd vllm
docker build --no-cache -f docker/Dockerfile.cpu -t ${REGISTRY:-opea}/vllm:${TAG:-latest} --shm-size=128g .
if [ $? -ne 0 ]; then
echo "opea/vllm built fail"
Expand Down
7 changes: 2 additions & 5 deletions tests/llms/test_llms_doc-summarization_vllm_on_intel_hpu.sh
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,8 @@ service_name="docsum-vllm-gaudi"

function build_docker_images() {
cd $WORKPATH
git clone https://github.com/HabanaAI/vllm-fork.git
cd vllm-fork/
VLLM_VER=v0.6.6.post1+Gaudi-1.20.0
echo "Check out vLLM tag ${VLLM_VER}"
git checkout ${VLLM_VER} &> /dev/null
source $(git rev-parse --show-toplevel)/.github/env/_vllm_versions.sh
git clone --depth 1 -b ${VLLM_FORK_VER} --single-branch https://github.com/HabanaAI/vllm-fork.git && cd ./vllm-fork
docker build --no-cache -f Dockerfile.hpu -t ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} --shm-size=128g .
if [ $? -ne 0 ]; then
echo "opea/vllm-gaudi built fail"
Expand Down
7 changes: 2 additions & 5 deletions tests/llms/test_llms_faq-generation_vllm.sh
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,8 @@ service_name="faqgen-vllm"

function build_docker_images() {
cd $WORKPATH
git clone https://github.com/vllm-project/vllm.git
cd ./vllm/
VLLM_VER="v0.8.3"
echo "Check out vLLM tag ${VLLM_VER}"
git checkout ${VLLM_VER} &> /dev/null
source $(git rev-parse --show-toplevel)/.github/env/_vllm_versions.sh
git clone --depth 1 -b ${VLLM_VER} --single-branch https://github.com/vllm-project/vllm.git && cd vllm
docker build --no-cache -f docker/Dockerfile.cpu -t ${REGISTRY:-opea}/vllm:${TAG:-latest} --shm-size=128g .
if [ $? -ne 0 ]; then
echo "opea/vllm built fail"
Expand Down
7 changes: 2 additions & 5 deletions tests/llms/test_llms_faq-generation_vllm_on_intel_hpu.sh
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,8 @@ service_name="faqgen-vllm-gaudi"

function build_docker_images() {
cd $WORKPATH
git clone https://github.com/HabanaAI/vllm-fork.git
cd vllm-fork/
VLLM_VER=v0.6.6.post1+Gaudi-1.20.0
echo "Check out vLLM tag ${VLLM_VER}"
git checkout ${VLLM_VER} &> /dev/null
source $(git rev-parse --show-toplevel)/.github/env/_vllm_versions.sh
git clone --depth 1 -b ${VLLM_FORK_VER} --single-branch https://github.com/HabanaAI/vllm-fork.git && cd ./vllm-fork
docker build --no-cache -f Dockerfile.hpu -t ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} --shm-size=128g .
if [ $? -ne 0 ]; then
echo "opea/vllm-gaudi built fail"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,8 @@ service_name="textgen-service-vllm-gaudi"

function build_docker_images() {
cd $WORKPATH
git clone https://github.com/HabanaAI/vllm-fork.git
cd vllm-fork/
VLLM_VER=v0.6.6.post1+Gaudi-1.20.0
echo "Check out vLLM tag ${VLLM_VER}"
git checkout ${VLLM_VER} &> /dev/null
source $(git rev-parse --show-toplevel)/.github/env/_vllm_versions.sh
git clone --depth 1 -b ${VLLM_FORK_VER} --single-branch https://github.com/HabanaAI/vllm-fork.git && cd ./vllm-fork
docker build --no-cache -f Dockerfile.hpu -t ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} --shm-size=128g .
if [ $? -ne 0 ]; then
echo "opea/vllm-gaudi built fail"
Expand Down
5 changes: 2 additions & 3 deletions tests/lvms/test_lvms_vllm_on_intel_hpu.sh
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,8 @@ function build_docker_images() {
cd $WORKPATH
echo $(pwd)

git clone https://github.com/HabanaAI/vllm-fork.git
cd ./vllm-fork/
git checkout f78aeb9da0712561163eddd353e3b6097cd69bac # revert this to habana_main when https://github.com/HabanaAI/vllm-fork/issues/1015 is fixed
source $(git rev-parse --show-toplevel)/.github/env/_vllm_versions.sh
git clone --depth 1 -b ${VLLM_FORK_VER} --single-branch https://github.com/HabanaAI/vllm-fork.git && cd ./vllm-fork
docker build --no-cache -f Dockerfile.hpu -t opea/vllm-gaudi:$TAG --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy
cd ..
rm -rf vllm-fork
Expand Down
4 changes: 2 additions & 2 deletions tests/third_parties/test_third_parties_vllm_openvino.sh
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@ service_name="vllm-openvino"

function build_container() {
cd $WORKPATH
git clone https://github.com/vllm-project/vllm.git vllm-openvino
cd ./vllm-openvino/ && git checkout v0.6.1 # something wrong with main branch image build
source $(git rev-parse --show-toplevel)/.github/env/_vllm_versions.sh
git clone --depth 1 -b ${VLLM_OPENVINO_VER} --single-branch https://github.com/vllm-project/vllm.git vllm-openvino && cd vllm-openvino

docker build --no-cache -t ${REGISTRY:-opea}/vllm-openvino:${TAG:-latest} \
-f Dockerfile.openvino \
Expand Down
Loading