From 7fcab0e45bc74429fcd7f42a1548fd6950e8d932 Mon Sep 17 00:00:00 2001 From: Yunnglin Date: Tue, 26 Aug 2025 17:29:32 +0800 Subject: [PATCH] add trust remote --- evalscope/api/model/model.py | 2 +- evalscope/backend/rag_eval/utils/clip.py | 4 ++-- .../benchmarks/needle_haystack/needle_haystack_adapter.py | 2 +- .../t2v_metrics/models/clipscore_models/pickscore_model.py | 5 +++-- .../t2v_metrics/models/itmscore_models/fga_blip2_model.py | 4 +++- .../models/itmscore_models/image_reward/blip_pretrain.py | 2 +- .../models/vqascore_models/lavis/models/blip_models/blip.py | 2 +- .../metrics/t2v_metrics/models/vqascore_models/mm_utils.py | 2 +- evalscope/perf/plugin/api/custom_api.py | 2 +- evalscope/perf/plugin/api/openai_api.py | 2 +- evalscope/third_party/thinkbench/eval.py | 2 +- 11 files changed, 16 insertions(+), 13 deletions(-) diff --git a/evalscope/api/model/model.py b/evalscope/api/model/model.py index 5b2b84a2..51d1a5e2 100644 --- a/evalscope/api/model/model.py +++ b/evalscope/api/model/model.py @@ -362,7 +362,7 @@ def get_model( logger.info( f'Creating model {model} with eval_type={eval_type} ' - f'base_url={base_url}, api_key={api_key}, config={config}, model_args={model_args}' + f'base_url={base_url}, config={config}, model_args={model_args}' ) # find a matching model type diff --git a/evalscope/backend/rag_eval/utils/clip.py b/evalscope/backend/rag_eval/utils/clip.py index 62364577..1005bc4c 100644 --- a/evalscope/backend/rag_eval/utils/clip.py +++ b/evalscope/backend/rag_eval/utils/clip.py @@ -81,8 +81,8 @@ def __init__( model_name = download_model(self.model_name, self.revision) # Load the model and processor - self.model = AutoModel.from_pretrained(model_name).to(self.device) - self.processor = AutoProcessor.from_pretrained(model_name) + self.model = AutoModel.from_pretrained(model_name, trust_remote_code=True).to(self.device) + self.processor = AutoProcessor.from_pretrained(model_name, trust_remote_code=True) self.transform = self.processor.image_processor self.tokenizer = self.processor.tokenizer diff --git a/evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py b/evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py index 05207669..d9612d04 100644 --- a/evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +++ b/evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py @@ -118,7 +118,7 @@ def _init_length(self): def _init_tokenizer(self): """ Initialize the tokenizer based on the provided tokenizer path.""" from modelscope import AutoTokenizer - self.tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_path) + self.tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_path, trust_remote_code=True) def load(self): """Load dataset from local disk or remote.""" diff --git a/evalscope/metrics/t2v_metrics/models/clipscore_models/pickscore_model.py b/evalscope/metrics/t2v_metrics/models/clipscore_models/pickscore_model.py index ffaf7e48..31b2bc01 100644 --- a/evalscope/metrics/t2v_metrics/models/clipscore_models/pickscore_model.py +++ b/evalscope/metrics/t2v_metrics/models/clipscore_models/pickscore_model.py @@ -24,8 +24,9 @@ def load_model(self): # model_pretrained_name_or_path = "yuvalkirstain/PickScore_v1" model_pretrained_name_or_path = 'AI-ModelScope/PickScore_v1' # modelscope model - self.processor = AutoProcessor.from_pretrained(processor_name_or_path) - self.model = AutoModel.from_pretrained(model_pretrained_name_or_path).eval().to(self.device) + self.processor = AutoProcessor.from_pretrained(processor_name_or_path, trust_remote_code=True) + self.model = AutoModel.from_pretrained(model_pretrained_name_or_path, + trust_remote_code=True).eval().to(self.device) def load_images(self, image: List[str]) -> torch.Tensor: """Load the image(s), and return a tensor (no preprocessing!!) put on self.device diff --git a/evalscope/metrics/t2v_metrics/models/itmscore_models/fga_blip2_model.py b/evalscope/metrics/t2v_metrics/models/itmscore_models/fga_blip2_model.py index 58fcae5f..97985724 100644 --- a/evalscope/metrics/t2v_metrics/models/itmscore_models/fga_blip2_model.py +++ b/evalscope/metrics/t2v_metrics/models/itmscore_models/fga_blip2_model.py @@ -37,7 +37,9 @@ def load_model(self): from ..utils import download_file # load tokenizer - self.tokenizer = AutoTokenizer.from_pretrained('AI-ModelScope/bert-base-uncased', truncation_side='right') + self.tokenizer = AutoTokenizer.from_pretrained( + 'AI-ModelScope/bert-base-uncased', truncation_side='right', trust_remote_code=True + ) self.tokenizer.add_special_tokens({'bos_token': '[DEC]'}) # load model self.variant = FGA_BLIP2_MODELS[self.model_name]['variant'] diff --git a/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/blip_pretrain.py b/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/blip_pretrain.py index afd66c7b..19610d7a 100644 --- a/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/blip_pretrain.py +++ b/evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/blip_pretrain.py @@ -10,7 +10,7 @@ def init_tokenizer(): - tokenizer = AutoTokenizer.from_pretrained('AI-ModelScope/bert-base-uncased') + tokenizer = AutoTokenizer.from_pretrained('AI-ModelScope/bert-base-uncased', trust_remote_code=True) tokenizer.add_special_tokens({'bos_token': '[DEC]'}) tokenizer.add_special_tokens({'additional_special_tokens': ['[ENC]']}) tokenizer.enc_token_id = tokenizer.additional_special_tokens_ids[0] diff --git a/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip.py b/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip.py index b31d98ef..79b89394 100755 --- a/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip.py +++ b/evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip.py @@ -20,7 +20,7 @@ class BlipBase(BaseModel): @classmethod def init_tokenizer(cls): - tokenizer = AutoTokenizer.from_pretrained('AI-ModelScope/bert-base-uncased') + tokenizer = AutoTokenizer.from_pretrained('AI-ModelScope/bert-base-uncased', trust_remote_code=True) tokenizer.add_special_tokens({'bos_token': '[DEC]'}) tokenizer.add_special_tokens({'additional_special_tokens': ['[ENC]']}) tokenizer.enc_token_id = tokenizer.additional_special_tokens_ids[0] diff --git a/evalscope/metrics/t2v_metrics/models/vqascore_models/mm_utils.py b/evalscope/metrics/t2v_metrics/models/vqascore_models/mm_utils.py index ef8f275d..edd05597 100644 --- a/evalscope/metrics/t2v_metrics/models/vqascore_models/mm_utils.py +++ b/evalscope/metrics/t2v_metrics/models/vqascore_models/mm_utils.py @@ -81,7 +81,7 @@ def load_pretrained_model( from ..utils import download_file - tokenizer = AutoTokenizer.from_pretrained(tokenizer_path, **tokenizer_dict) + tokenizer = AutoTokenizer.from_pretrained(tokenizer_path, trust_remote_code=True, **tokenizer_dict) # tokenizer.pad_token = tokenizer.unk_token # could be redundant model_path = download_file(model_path, cache_dir=cache_dir) diff --git a/evalscope/perf/plugin/api/custom_api.py b/evalscope/perf/plugin/api/custom_api.py index 7064d7ca..2e080b68 100644 --- a/evalscope/perf/plugin/api/custom_api.py +++ b/evalscope/perf/plugin/api/custom_api.py @@ -31,7 +31,7 @@ def __init__(self, param: Arguments): super().__init__(param=param) if param.tokenizer_path is not None: from modelscope import AutoTokenizer - self.tokenizer = AutoTokenizer.from_pretrained(param.tokenizer_path) + self.tokenizer = AutoTokenizer.from_pretrained(param.tokenizer_path, trust_remote_code=True) else: self.tokenizer = None diff --git a/evalscope/perf/plugin/api/openai_api.py b/evalscope/perf/plugin/api/openai_api.py index 766e66b6..0d4f212f 100644 --- a/evalscope/perf/plugin/api/openai_api.py +++ b/evalscope/perf/plugin/api/openai_api.py @@ -25,7 +25,7 @@ def __init__(self, param: Arguments): super().__init__(param=param) if param.tokenizer_path is not None: from modelscope import AutoTokenizer - self.tokenizer = AutoTokenizer.from_pretrained(param.tokenizer_path) + self.tokenizer = AutoTokenizer.from_pretrained(param.tokenizer_path, trust_remote_code=True) else: self.tokenizer = None diff --git a/evalscope/third_party/thinkbench/eval.py b/evalscope/third_party/thinkbench/eval.py index 067b4c96..2e6394a1 100644 --- a/evalscope/third_party/thinkbench/eval.py +++ b/evalscope/third_party/thinkbench/eval.py @@ -24,7 +24,7 @@ def __init__(self, report_path, tokenizer_path, model_name, dataset_name, subset self.switch_tokens = ['alternatively', 'but wait', 'let me reconsider', 'another way', 'another approach', 'another method', 'another angle'] self.subset_dict = defaultdict(lambda: defaultdict(list)) self.think_end_token = '' - self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_path) + self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_path, trust_remote_code=True) self.model_name = model_name self.dataset_name = dataset_name self.subsets = subsets