Skip to content

Commit 879a065

Browse files
DarkLight1337Isotr0pyhmellor
authored
[CI/Build] Bump transformers version (vllm-project#27528)
Signed-off-by: DarkLight1337 <[email protected]> Signed-off-by: Isotr0py <[email protected]> Signed-off-by: Harry Mellor <[email protected]> Co-authored-by: Isotr0py <[email protected]> Co-authored-by: Harry Mellor <[email protected]>
1 parent 29de3cd commit 879a065

File tree

9 files changed

+17
-17
lines changed

9 files changed

+17
-17
lines changed

requirements/common.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ requests >= 2.26.0
77
tqdm
88
blake3
99
py-cpuinfo
10-
transformers >= 4.56.0
10+
transformers >= 4.56.0, < 5
1111
tokenizers >= 0.21.1 # Required for fast incremental detokenization.
1212
protobuf # Required by LlamaTokenizer.
1313
fastapi[standard] >= 0.115.0 # Required by FastAPI's form models in the OpenAI API server's audio transcriptions endpoint.

requirements/nightly_torch_test.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ opencv-python-headless >= 4.11.0 # required for video test
2929
datamodel_code_generator # required for minicpm3 test
3030
lm-eval[api] @ git+https://github.com/EleutherAI/lm-evaluation-harness.git@206b7722158f58c35b7ffcd53b035fdbdda5126d # required for model evaluation test
3131
mteb>=1.38.11, <2 # required for mteb test
32-
transformers==4.56.2
32+
transformers==4.57.1
3333
tokenizers==0.22.0
3434
schemathesis>=3.39.15 # Required for openai schema test.
3535
# quantization

requirements/test.in

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ datamodel_code_generator # required for minicpm3 test
3737
# TODO: Use lm-eval[api]==0.4.10 once released
3838
lm-eval[api] @ git+https://github.com/EleutherAI/lm-evaluation-harness.git@206b7722158f58c35b7ffcd53b035fdbdda5126d # required for model evaluation test
3939
mteb[bm25s]>=1.38.11, <2 # required for mteb test
40-
transformers==4.56.2
40+
transformers==4.57.1
4141
tokenizers==0.22.0
4242
schemathesis>=3.39.15 # Required for openai schema test.
4343
# quantization

requirements/test.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1196,7 +1196,7 @@ tqdm==4.66.6
11961196
# transformers
11971197
tqdm-multiprocess==0.0.11
11981198
# via lm-eval
1199-
transformers==4.56.2
1199+
transformers==4.57.1
12001200
# via
12011201
# -r requirements/test.in
12021202
# genai-perf

tests/models/multimodal/generation/test_maverick.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -186,6 +186,8 @@ def create_reduced_config(
186186
if "text_config" in config_dict:
187187
original_text_layers = config_dict["text_config"]["num_hidden_layers"]
188188
config_dict["text_config"]["num_hidden_layers"] = text_layers
189+
original_layer_types = config_dict["text_config"]["layer_types"]
190+
config_dict["text_config"]["layer_types"] = original_layer_types[:text_layers]
189191
print(f"Reduced text layers from {original_text_layers} to {text_layers}")
190192

191193
original_num_experts = config_dict["text_config"]["num_local_experts"]

tests/models/registry.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -882,27 +882,27 @@ def check_available_online(
882882

883883
_TRANSFORMERS_BACKEND_MODELS = {
884884
"TransformersEmbeddingModel": _HfExamplesInfo(
885-
"BAAI/bge-base-en-v1.5", min_transformers_version="4.57.0.dev0"
885+
"BAAI/bge-base-en-v1.5", min_transformers_version="5.0.0"
886886
),
887887
"TransformersForSequenceClassification": _HfExamplesInfo(
888888
"papluca/xlm-roberta-base-language-detection",
889-
min_transformers_version="4.57.0.dev0",
889+
min_transformers_version="5.0.0",
890890
),
891891
"TransformersForCausalLM": _HfExamplesInfo(
892892
"hmellor/Ilama-3.2-1B", trust_remote_code=True
893893
),
894894
"TransformersMultiModalForCausalLM": _HfExamplesInfo("BAAI/Emu3-Chat-hf"),
895895
"TransformersMoEForCausalLM": _HfExamplesInfo(
896-
"allenai/OLMoE-1B-7B-0924", min_transformers_version="4.57.0.dev0"
896+
"allenai/OLMoE-1B-7B-0924", min_transformers_version="5.0.0"
897897
),
898898
"TransformersMultiModalMoEForCausalLM": _HfExamplesInfo(
899-
"Qwen/Qwen3-VL-30B-A3B-Instruct", min_transformers_version="4.57.0.dev0"
899+
"Qwen/Qwen3-VL-30B-A3B-Instruct", min_transformers_version="5.0.0"
900900
),
901901
"TransformersMoEEmbeddingModel": _HfExamplesInfo(
902-
"Qwen/Qwen3-30B-A3B", min_transformers_version="4.57.0.dev0"
902+
"Qwen/Qwen3-30B-A3B", min_transformers_version="5.0.0"
903903
),
904904
"TransformersMoEForSequenceClassification": _HfExamplesInfo(
905-
"Qwen/Qwen3-30B-A3B", min_transformers_version="4.57.0.dev0"
905+
"Qwen/Qwen3-30B-A3B", min_transformers_version="5.0.0"
906906
),
907907
"TransformersMultiModalEmbeddingModel": _HfExamplesInfo("google/gemma-3-4b-it"),
908908
"TransformersMultiModalForSequenceClassification": _HfExamplesInfo(

tests/models/test_transformers.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ def test_models(
8282
from packaging.version import Version
8383

8484
installed = Version(transformers.__version__)
85-
required = Version("4.57.0.dev0")
85+
required = Version("5.0.0")
8686
if model == "allenai/OLMoE-1B-7B-0924" and installed < required:
8787
pytest.skip(
8888
"MoE models with the Transformers backend require "

vllm/model_executor/models/moonvit.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@
4949
import torch
5050
import torch.nn as nn
5151
import torch.nn.functional as F
52-
from transformers.activations import ACT2FN, PytorchGELUTanh
52+
from transformers.activations import ACT2FN
5353
from transformers.modeling_utils import PreTrainedModel
5454
from transformers.utils import is_flash_attn_2_available
5555

@@ -651,7 +651,7 @@ def __init__(
651651
"num_heads": config.num_attention_heads,
652652
"hidden_dim": config.hidden_size,
653653
"mlp_dim": config.intermediate_size,
654-
"activation": PytorchGELUTanh(),
654+
"activation": ACT2FN["gelu_pytorch_tanh"],
655655
"attn_bias": True,
656656
"attn_implementation": config._attn_implementation,
657657
},

vllm/model_executor/models/qwen2_vl.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@
3434
import torch.nn as nn
3535
import torch.nn.functional as F
3636
from einops import rearrange, repeat
37-
from transformers import AutoConfig, BatchFeature, PretrainedConfig
37+
from transformers import BatchFeature, PretrainedConfig
3838
from transformers.models.qwen2_vl import Qwen2VLImageProcessor, Qwen2VLProcessor
3939
from transformers.models.qwen2_vl.configuration_qwen2_vl import (
4040
Qwen2VLConfig,
@@ -1651,9 +1651,7 @@ def __init__(
16511651
class Tarsier2ProcessingInfo(Qwen2VLProcessingInfo):
16521652
def get_hf_config(self) -> Qwen2VLConfig:
16531653
model_path = self.ctx.model_config.model
1654-
original_config = AutoConfig.from_pretrained(model_path)
1655-
config_dict = original_config.to_dict()
1656-
correct_config = Qwen2VLConfig.from_dict(config_dict)
1654+
correct_config = Qwen2VLConfig.from_pretrained(model_path)
16571655

16581656
return correct_config
16591657

0 commit comments

Comments
 (0)