Skip to content

Commit b22008b

Browse files
committed
Add support to ImageTextToTextPipeline and save/loading support in Processors
1 parent 0f64767 commit b22008b

File tree

2 files changed

+11
-2
lines changed

2 files changed

+11
-2
lines changed

src/transformers/processing_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -488,7 +488,7 @@ class ProcessorMixin(PushToHubMixin):
488488
"""
489489

490490
attributes = ["feature_extractor", "tokenizer"]
491-
optional_attributes = ["chat_template", "audio_tokenizer"]
491+
optional_attributes = ["chat_template", "audio_tokenizer", "response_schema"]
492492
optional_call_args: list[str] = []
493493
# Names need to be attr_class for attr in attributes
494494
feature_extractor_class = None

tests/utils/test_chat_schema_utils.py

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
import tempfile
1616
import unittest
1717

18-
from transformers import AutoTokenizer
18+
from transformers import AutoProcessor, AutoTokenizer
1919
from transformers.testing_utils import require_jmespath
2020
from transformers.utils.chat_parsing_utils import recursive_parse
2121

@@ -150,13 +150,22 @@
150150
@require_jmespath
151151
class ChatSchemaParserTest(unittest.TestCase):
152152
def test_schema_save_load(self):
153+
# Has no schema by default
153154
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
154155
tokenizer.response_schema = ernie_schema
155156
with tempfile.TemporaryDirectory() as tmpdir:
156157
tokenizer.save_pretrained(tmpdir)
157158
reloaded_tokenizer = AutoTokenizer.from_pretrained(tmpdir)
158159
self.assertEqual(reloaded_tokenizer.response_schema, ernie_schema)
159160

161+
# Has no schema by default
162+
processor = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration")
163+
processor.response_schema = ernie_schema
164+
with tempfile.TemporaryDirectory() as tmpdir:
165+
processor.save_pretrained(tmpdir)
166+
reloaded_processor = AutoProcessor.from_pretrained(tmpdir)
167+
self.assertEqual(reloaded_processor.response_schema, ernie_schema)
168+
160169
def test_tokenizer_method(self):
161170
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
162171
model_out = '<|START_THINKING|>I should call a tool.<|END_THINKING|><|START_ACTION|>[\n {"tool_call_id": "0", "tool_name": "simple_tool", "parameters": {"temperature_format": "Celsius"}}\n]<|END_ACTION|><|END_OF_TURN_TOKEN|>'

0 commit comments

Comments
 (0)