Skip to content

Commit 098e672

Browse files
committed
Fix xtuner/llava-llama-3-8b-v1_1-transformers by specifying patch size
1 parent 0b08e5d commit 098e672

File tree

1 file changed

+8
-0
lines changed

1 file changed

+8
-0
lines changed

taggui/auto_captioning/models/llava_llama_3.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,15 @@
1+
from transformers import AutoConfig, AutoProcessor
2+
13
from auto_captioning.auto_captioning_model import AutoCaptioningModel
24

35

46
class LlavaLlama3(AutoCaptioningModel):
7+
def get_processor(self):
8+
config = AutoConfig.from_pretrained(self.model_id)
9+
patch_size = config.vision_config.patch_size
10+
return AutoProcessor.from_pretrained(
11+
self.model_id, trust_remote_code=True, patch_size=patch_size)
12+
513
@staticmethod
614
def get_default_prompt() -> str:
715
return 'Describe the image in one sentence.'

0 commit comments

Comments
 (0)