Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 0 additions & 4 deletions .github/workflows/docker/compose/llms-compose-cd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,6 @@ services:
build:
dockerfile: comps/llms/text-generation/vllm/llama_index/Dockerfile
image: ${REGISTRY:-opea}/llm-vllm-llamaindex:${TAG:-latest}
llm-vllm-llamaindex-hpu:
build:
dockerfile: comps/llms/text-generation/vllm/llama_index/dependency/Dockerfile.intel_hpu
image: ${REGISTRY:-opea}/llm-vllm-llamaindex-hpu:${TAG:-latest}
llm-predictionguard:
build:
dockerfile: comps/llms/text-generation/predictionguard/Dockerfile
Expand Down
4 changes: 0 additions & 4 deletions .github/workflows/docker/compose/llms-compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,6 @@ services:
build:
dockerfile: comps/llms/text-generation/vllm/langchain/Dockerfile
image: ${REGISTRY:-opea}/llm-vllm:${TAG:-latest}
llm-vllm-hpu:
build:
dockerfile: comps/llms/text-generation/vllm/langchain/dependency/Dockerfile.intel_hpu
image: ${REGISTRY:-opea}/llm-vllm-hpu:${TAG:-latest}
llm-vllm-ray:
build:
dockerfile: comps/llms/text-generation/vllm/ray/Dockerfile
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,11 @@ fi

# Build the docker image for vLLM based on the hardware mode
if [ "$hw_mode" = "hpu" ]; then
docker build -f Dockerfile.intel_hpu -t opea/vllm:hpu --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy
git clone https://github.com/HabanaAI/vllm-fork.git
cd ./vllm-fork/
docker build -f Dockerfile.hpu -t opea/vllm:hpu --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy
cd ..
rm -rf vllm-fork
else
git clone https://github.com/vllm-project/vllm.git
cd ./vllm/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ volume=$PWD/data

# Build the Docker run command based on hardware mode
if [ "$hw_mode" = "hpu" ]; then
docker run -d --rm --runtime=habana --name="vllm-service" -p $port_number:80 -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none --cap-add=sys_nice --ipc=host -e HTTPS_PROXY=$https_proxy -e HTTP_PROXY=$https_proxy -e HF_TOKEN=${HF_TOKEN} opea/vllm:hpu /bin/bash -c "export VLLM_CPU_KVCACHE_SPACE=40 && python3 -m vllm.entrypoints.openai.api_server --enforce-eager --model $model_name --tensor-parallel-size $parallel_number --host 0.0.0.0 --port 80 --block-size $block_size --max-num-seqs $max_num_seqs --max-seq_len-to-capture $max_seq_len_to_capture "
docker run -d --rm --runtime=habana --name="vllm-service" -p $port_number:80 -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none --cap-add=sys_nice --ipc=host -e HTTPS_PROXY=$https_proxy -e HTTP_PROXY=$https_proxy -e HF_TOKEN=${HF_TOKEN} opea/vllm:hpu --enforce-eager --model $model_name --tensor-parallel-size $parallel_number --host 0.0.0.0 --port 80 --block-size $block_size --max-num-seqs $max_num_seqs --max-seq_len-to-capture $max_seq_len_to_capture
else
docker run -d --rm --name="vllm-service" -p $port_number:80 --network=host -v $volume:/data -e HTTPS_PROXY=$https_proxy -e HTTP_PROXY=$https_proxy -e HF_TOKEN=${HF_TOKEN} -e VLLM_CPU_KVCACHE_SPACE=40 opea/vllm:cpu --model $model_name --host 0.0.0.0 --port 80
fi
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ services:
cap_add:
- SYS_NICE
ipc: host
command: /bin/bash -c "export VLLM_CPU_KVCACHE_SPACE=40 && python3 -m vllm.entrypoints.openai.api_server --enforce-eager --model $LLM_MODEL --tensor-parallel-size 1 --host 0.0.0.0 --port 80"
command: --enforce-eager --model $LLM_MODEL --tensor-parallel-size 1 --host 0.0.0.0 --port 80
llm:
image: opea/llm-vllm:latest
container_name: llm-vllm-gaudi-server
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,11 @@ fi

# Build the docker image for vLLM based on the hardware mode
if [ "$hw_mode" = "hpu" ]; then
docker build -f docker/Dockerfile.intel_hpu -t opea/vllm:hpu --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy
git clone https://github.com/HabanaAI/vllm-fork.git
cd ./vllm-fork/
docker build -f Dockerfile.hpu -t opea/vllm:hpu --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy
cd ..
rm -rf vllm-fork
else
git clone https://github.com/vllm-project/vllm.git
cd ./vllm/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ volume=$PWD/data

# Build the Docker run command based on hardware mode
if [ "$hw_mode" = "hpu" ]; then
docker run -d --rm --runtime=habana --name="vllm-service" -p $port_number:80 -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none --cap-add=sys_nice --ipc=host -e HTTPS_PROXY=$https_proxy -e HTTP_PROXY=$https_proxy -e HF_TOKEN=${HUGGINGFACEHUB_API_TOKEN} opea/vllm:hpu /bin/bash -c "export VLLM_CPU_KVCACHE_SPACE=40 && python3 -m vllm.entrypoints.openai.api_server --enforce-eager --model $model_name --tensor-parallel-size $parallel_number --host 0.0.0.0 --port 80 --block-size $block_size --max-num-seqs $max_num_seqs --max-seq_len-to-capture $max_seq_len_to_capture "
docker run -d --rm --runtime=habana --name="vllm-service" -p $port_number:80 -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none --cap-add=sys_nice --ipc=host -e HTTPS_PROXY=$https_proxy -e HTTP_PROXY=$https_proxy -e HF_TOKEN=${HUGGINGFACEHUB_API_TOKEN} opea/vllm:hpu --enforce-eager --model $model_name --tensor-parallel-size $parallel_number --host 0.0.0.0 --port 80 --block-size $block_size --max-num-seqs $max_num_seqs --max-seq_len-to-capture $max_seq_len_to_capture
else
docker run -d --rm --name="vllm-service" -p $port_number:80 --network=host -v $volume:/data -e HTTPS_PROXY=$https_proxy -e HTTP_PROXY=$https_proxy -e HF_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e VLLM_CPU_KVCACHE_SPACE=40 opea/vllm:cpu --model $model_name --host 0.0.0.0 --port 80
fi
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ services:
cap_add:
- SYS_NICE
ipc: host
command: /bin/bash -c "export VLLM_CPU_KVCACHE_SPACE=40 && python3 -m vllm.entrypoints.openai.api_server --enforce-eager --model $LLM_MODEL --tensor-parallel-size 1 --host 0.0.0.0 --port 80"
command: --enforce-eager --model $LLM_MODEL --tensor-parallel-size 1 --host 0.0.0.0 --port 80
llm:
image: opea/llm-vllm-llamaindex:latest
container_name: llm-vllm-gaudi-server
Expand Down
3 changes: 2 additions & 1 deletion comps/llms/text-generation/vllm/llama_index/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,8 @@ async def stream_generator():

return StreamingResponse(stream_generator(), media_type="text/event-stream")
else:
response = await llm.acomplete(input.query).text
response = await llm.acomplete(input.query)
response = response.text
if logflag:
logger.info(response)
return GeneratedDoc(text=response, prompt=input.query)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,11 @@ WORKPATH=$(dirname "$PWD")
ip_address=$(hostname -I | awk '{print $1}')

function build_docker_images() {
## Build VLLM Ray docker
cd $WORKPATH/comps/llms/text-generation/vllm/langchain/dependency
docker build \
-f Dockerfile.intel_hpu \
--no-cache -t opea/vllm-hpu:comps \
--shm-size=128g .
## Build VLLM docker
cd $WORKPATH
git clone https://github.com/HabanaAI/vllm-fork.git
cd vllm-fork/
docker build --no-cache -f Dockerfile.hpu -t opea/vllm-hpu:comps --shm-size=128g .
if [ $? -ne 0 ]; then
echo "opea/vllm-hpu built fail"
exit 1
Expand Down Expand Up @@ -48,7 +47,7 @@ function start_service() {
--ipc=host \
-e HF_TOKEN=${HUGGINGFACEHUB_API_TOKEN} \
opea/vllm-hpu:comps \
/bin/bash -c "export VLLM_CPU_KVCACHE_SPACE=40 && python3 -m vllm.entrypoints.openai.api_server --enforce-eager --model $LLM_MODEL --tensor-parallel-size 1 --host 0.0.0.0 --port 80 --block-size 128 --max-num-seqs 256 --max-seq_len-to-capture 2048"
--enforce-eager --model $LLM_MODEL --tensor-parallel-size 1 --host 0.0.0.0 --port 80 --block-size 128 --max-num-seqs 256 --max-seq_len-to-capture 2048

export vLLM_ENDPOINT="http://${ip_address}:${port_number}"
docker run -d --rm \
Expand All @@ -65,7 +64,7 @@ function start_service() {
until [[ "$n" -ge 120 ]] || [[ $ready == true ]]; do
docker logs test-comps-vllm-service > ${WORKPATH}/tests/test-comps-vllm-service.log
n=$((n+1))
if grep -q Connected ${WORKPATH}/tests/test-comps-vllm-service.log; then
if grep -q throughput ${WORKPATH}/tests/test-comps-vllm-service.log; then
break
fi
sleep 5s
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,11 @@ WORKPATH=$(dirname "$PWD")
ip_address=$(hostname -I | awk '{print $1}')

function build_docker_images() {
## Build VLLM Ray docker
cd $WORKPATH/comps/llms/text-generation/vllm/llama_index/dependency
docker build \
-f Dockerfile.intel_hpu \
--no-cache -t opea/vllm-hpu:comps \
--shm-size=128g .
## Build VLLM docker
cd $WORKPATH
git clone https://github.com/HabanaAI/vllm-fork.git
cd vllm-fork/
docker build --no-cache -f Dockerfile.hpu -t opea/vllm-hpu:comps --shm-size=128g .
if [ $? -ne 0 ]; then
echo "opea/vllm-hpu built fail"
exit 1
Expand Down Expand Up @@ -48,7 +47,7 @@ function start_service() {
--ipc=host \
-e HF_TOKEN=${HUGGINGFACEHUB_API_TOKEN} \
opea/vllm-hpu:comps \
/bin/bash -c "export VLLM_CPU_KVCACHE_SPACE=40 && python3 -m vllm.entrypoints.openai.api_server --enforce-eager --model $LLM_MODEL --tensor-parallel-size 1 --host 0.0.0.0 --port 80 --block-size 128 --max-num-seqs 256 --max-seq_len-to-capture 2048"
--enforce-eager --model $LLM_MODEL --tensor-parallel-size 1 --host 0.0.0.0 --port 80 --block-size 128 --max-num-seqs 256 --max-seq_len-to-capture 2048

export vLLM_ENDPOINT="http://${ip_address}:${port_number}"
docker run -d --rm \
Expand All @@ -65,7 +64,7 @@ function start_service() {
until [[ "$n" -ge 120 ]] || [[ $ready == true ]]; do
docker logs test-comps-vllm-service > ${WORKPATH}/tests/test-comps-vllm-service.log
n=$((n+1))
if grep -q Connected ${WORKPATH}/tests/test-comps-vllm-service.log; then
if grep -q throughput ${WORKPATH}/tests/test-comps-vllm-service.log; then
break
fi
sleep 5s
Expand Down