Skip to content

Commit 368b44a

Browse files
committed
Properly check if auto-captioning model was previously loaded
1 parent 59b4192 commit 368b44a

File tree

1 file changed

+4
-4
lines changed

1 file changed

+4
-4
lines changed

taggui/widgets/auto_captioner.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -465,6 +465,10 @@ def load_processor_and_model(self, device: torch.device,
465465
# Only GPUs support 4-bit quantization.
466466
load_in_4_bit = (self.caption_settings['load_in_4_bit']
467467
and device.type == 'cuda')
468+
if self.models_directory_path:
469+
config_path = self.models_directory_path / model_id / 'config.json'
470+
if config_path.is_file():
471+
model_id = str(self.models_directory_path / model_id)
468472
if (model and self.parent().model_id == model_id
469473
and self.parent().model_device_type == device.type
470474
and self.parent().is_model_loaded_in_4_bit == load_in_4_bit):
@@ -480,10 +484,6 @@ def load_processor_and_model(self, device: torch.device,
480484
gc.collect()
481485
self.clear_console_text_edit_requested.emit()
482486
print(f'Loading {model_id}...')
483-
if self.models_directory_path:
484-
config_path = self.models_directory_path / model_id / 'config.json'
485-
if config_path.is_file():
486-
model_id = str(self.models_directory_path / model_id)
487487
if model_type == ModelType.COGVLM:
488488
processor = LlamaTokenizer.from_pretrained('lmsys/vicuna-7b-v1.5')
489489
else:

0 commit comments

Comments
 (0)