Skip to content

Commit

Permalink
[Bug] Fix pickling of ModelConfig when RunAI Model Streamer is used (
Browse files Browse the repository at this point in the history
…vllm-project#11825)

Signed-off-by: DarkLight1337 <[email protected]>
  • Loading branch information
DarkLight1337 authored Jan 8, 2025
1 parent 259abd8 commit ef68eb2
Showing 1 changed file with 6 additions and 6 deletions.
12 changes: 6 additions & 6 deletions vllm/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -381,16 +381,16 @@ def maybe_pull_model_tokenizer_for_s3(self, model: str,
"""
if is_s3(model) or is_s3(tokenizer):
if is_s3(model):
self.s3_model = S3Model()
self.s3_model.pull_files(model, allow_pattern=["*config.json"])
s3_model = S3Model()
s3_model.pull_files(model, allow_pattern=["*config.json"])
self.model_weights = self.model
self.model = self.s3_model.dir
self.model = s3_model.dir

if is_s3(tokenizer):
self.s3_tokenizer = S3Model()
self.s3_tokenizer.pull_files(
s3_tokenizer = S3Model()
s3_tokenizer.pull_files(
model, ignore_pattern=["*.pt", "*.safetensors", "*.bin"])
self.tokenizer = self.s3_tokenizer.dir
self.tokenizer = s3_tokenizer.dir

def _init_multimodal_config(
self, limit_mm_per_prompt: Optional[Mapping[str, int]]
Expand Down

0 comments on commit ef68eb2

Please sign in to comment.