Skip to content

Commit 3f71757

Browse files
committed
Fix for transformers pin bump
1 parent ffd88b4 commit 3f71757

File tree

6 files changed

+16
-16
lines changed

6 files changed

+16
-16
lines changed

.ci/scripts/test_model_e2e.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ case "$HF_MODEL" in
8686
MODEL_NAME="voxtral"
8787
RUNNER_TARGET="voxtral_runner"
8888
RUNNER_PATH="voxtral"
89-
EXPECTED_OUTPUT="poem"
89+
EXPECTED_OUTPUT="existence"
9090
PREPROCESSOR="voxtral_preprocessor.pte"
9191
TOKENIZER_URL="https://huggingface.co/mistralai/Voxtral-Mini-3B-2507/resolve/main" # @lint-ignore
9292
TOKENIZER_FILE="tekken.json"

.github/workflows/trunk.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -839,7 +839,7 @@ jobs:
839839
qwen3-0.6b|xnnpack|--quantize,
840840
qwen3-1.7b|xnnpack|--quantize,
841841
gemma3-1b|xnnpack|--quantize,
842-
phi4-mini|xnnpack|--quantize,
842+
# phi4-mini|xnnpack|--quantize, transformers v5.0.0rc0 introduces a data-dependent branching in transformers/modeling_rope_utils.py:61
843843
smollm2-135m|xnnpack|--quantize,
844844
smollm3-3b|xnnpack|--quantize
845845
]

backends/qualcomm/quantizer/annotators.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -955,6 +955,10 @@ def annotate_elu(node: Node, quantization_config: QuantizationConfig) -> None:
955955
def annotate_embedding(node: Node, quantization_config: QuantizationConfig) -> None:
956956
weight = node.args[0]
957957

958+
# Only quantize if input is a float tensor
959+
if not _is_float_tensor(weight):
960+
return
961+
958962
input_qspec_map = {}
959963
input_qspec_map[weight] = quantization_config.input_activation
960964

examples/qualcomm/oss_scripts/mobilevit_v1.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
)
2525
from PIL import Image
2626
from torchvision import datasets
27-
from transformers import AutoModelForImageClassification, MobileViTFeatureExtractor
27+
from transformers import AutoImageProcessor, AutoModelForImageClassification
2828

2929

3030
def get_imagenet_dataset(dataset_path, data_size, shuffle=True):
@@ -39,15 +39,13 @@ def get_data_loader():
3939
# prepare input data
4040
inputs, targets = [], []
4141
data_loader = get_data_loader()
42-
feature_extractor = MobileViTFeatureExtractor.from_pretrained(
43-
"apple/mobilevit-xx-small"
44-
)
42+
image_processor = AutoImageProcessor.from_pretrained("apple/mobilevit-xx-small")
4543
for index, data in enumerate(data_loader.dataset.imgs):
4644
if index >= data_size:
4745
break
4846
data_path, target = data
4947
image = Image.open(data_path).convert("RGB")
50-
feature = feature_extractor(images=image, return_tensors="pt")
48+
feature = image_processor(images=image, return_tensors="pt")
5149
inputs.append((feature["pixel_values"],))
5250
targets.append(torch.tensor(target))
5351

examples/qualcomm/oss_scripts/mobilevit_v2.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
)
2626
from PIL import Image
2727
from torchvision import datasets
28-
from transformers import AutoModelForImageClassification, MobileViTFeatureExtractor
28+
from transformers import AutoImageProcessor, AutoModelForImageClassification
2929

3030

3131
def get_imagenet_dataset(dataset_path, data_size, shuffle=True):
@@ -40,15 +40,13 @@ def get_data_loader():
4040
# prepare input data
4141
inputs, targets = [], []
4242
data_loader = get_data_loader()
43-
feature_extractor = MobileViTFeatureExtractor.from_pretrained(
44-
"apple/mobilevit-xx-small"
45-
)
43+
image_processor = AutoImageProcessor.from_pretrained("apple/mobilevit-xx-small")
4644
for index, data in enumerate(data_loader.dataset.imgs):
4745
if index >= data_size:
4846
break
4947
data_path, target = data
5048
image = Image.open(data_path).convert("RGB")
51-
feature = feature_extractor(images=image, return_tensors="pt")
49+
feature = image_processor(images=image, return_tensors="pt")
5250
inputs.append((feature["pixel_values"],))
5351
targets.append(torch.tensor(target))
5452

examples/qualcomm/scripts/mobilebert_fine_tune.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -131,17 +131,17 @@ def get_fine_tuned_mobilebert(artifacts_dir, pretrained_weight, batch_size):
131131
)
132132

133133
# tokenize dataset
134-
encoded_data_train = tokenizer.batch_encode_plus(
135-
data[data.data_type == "train"].Title.values,
134+
encoded_data_train = tokenizer(
135+
data[data.data_type == "train"].Title.values.tolist(),
136136
add_special_tokens=True,
137137
return_attention_mask=True,
138138
max_length=256,
139139
padding="max_length",
140140
truncation=True,
141141
return_tensors="pt",
142142
)
143-
encoded_data_val = tokenizer.batch_encode_plus(
144-
data[data.data_type == "val"].Title.values,
143+
encoded_data_val = tokenizer(
144+
data[data.data_type == "val"].Title.values.tolist(),
145145
add_special_tokens=True,
146146
return_attention_mask=True,
147147
max_length=256,

0 commit comments

Comments
 (0)