diff --git a/comps/finetuning/src/integrations/llm_on_ray/finetune/finetune.py b/comps/finetuning/src/integrations/llm_on_ray/finetune/finetune.py index 3eabb90938..12101a581c 100644 --- a/comps/finetuning/src/integrations/llm_on_ray/finetune/finetune.py +++ b/comps/finetuning/src/integrations/llm_on_ray/finetune/finetune.py @@ -560,7 +560,7 @@ def main(external_config=None): } if config["General"]["gpt_base_model"] is True: - runtime_env["pip"] = ["transformers==4.26.0"] + runtime_env["pip"] = ["transformers>=4.50.0"] if device == "gpu": num_cpus = resources_per_worker["CPU"] * num_training_workers + 1 # additional 1 for head worker diff --git a/comps/finetuning/src/integrations/xtune/prepare_xtune.sh b/comps/finetuning/src/integrations/xtune/prepare_xtune.sh index 0c7e05cfc1..f35ca70d43 100644 --- a/comps/finetuning/src/integrations/xtune/prepare_xtune.sh +++ b/comps/finetuning/src/integrations/xtune/prepare_xtune.sh @@ -39,7 +39,7 @@ else pip install matplotlib pip install -e ".[metrics]" pip install --no-cache-dir --force-reinstall intel-extension-for-pytorch==2.6.10+xpu oneccl_bind_pt==2.6.0+xpu --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ - pip install "transformers<=4.49.0" optimum "auto_gptq>=0.5.0" + pip install "transformers>=4.50.0" optimum "auto_gptq>=0.5.0" echo "start llamafactory webui" if [ -z $GUI ]; then ZE_AFFINITY_MASK=0 llamafactory-cli webui & diff --git a/comps/third_parties/llama-vision/src/requirements.txt b/comps/third_parties/llama-vision/src/requirements.txt index ff802555ad..2f250f1569 100644 --- a/comps/third_parties/llama-vision/src/requirements.txt +++ b/comps/third_parties/llama-vision/src/requirements.txt @@ -11,5 +11,5 @@ prometheus-fastapi-instrumentator pydantic==2.9.2 pydub shortuuid -transformers==4.48.0 +transformers>=4.50.0 uvicorn diff --git a/comps/third_parties/llama-vision/src/requirements_tp.txt b/comps/third_parties/llama-vision/src/requirements_tp.txt index c917d4b188..23c8308a6b 100644 --- a/comps/third_parties/llama-vision/src/requirements_tp.txt +++ b/comps/third_parties/llama-vision/src/requirements_tp.txt @@ -8,5 +8,5 @@ opentelemetry-sdk prometheus-fastapi-instrumentator pydantic==2.9.2 shortuuid -transformers==4.48.0 +transformers>=4.50.0 uvicorn diff --git a/comps/third_parties/sglang/src/Dockerfile b/comps/third_parties/sglang/src/Dockerfile index eba839f761..4eb6280b7d 100644 --- a/comps/third_parties/sglang/src/Dockerfile +++ b/comps/third_parties/sglang/src/Dockerfile @@ -30,7 +30,7 @@ RUN curl -fsSL -v -o miniforge.sh -O https://github.com/conda-forge/miniforge/re RUN git clone https://github.com/jianan-gu/sglang -b llama4_optimzed_cpu_r1 RUN . ~/miniforge3/bin/activate && conda create -n sglang python=3.10 && conda activate sglang && \ cd sglang && pip install -e "python[all_cpu]" && cd .. && conda install -y libsqlite=3.48.0 && \ - pip uninstall -y triton && pip uninstall -y transformers && pip install transformers==4.51.1 && \ + pip uninstall -y triton && pip uninstall -y transformers && pip install transformers>=4.50.0 && \ pip install triton==3.1 && pip install intel-openmp==2024.2.0 && pip install transformers RUN git clone https://github.com/vllm-project/vllm.git -b v0.6.4.post1 && cd vllm && apt-get install -y libnuma-dev && \ . ~/miniforge3/bin/activate && conda activate sglang && \