Skip to content

Commit 19d8500

Browse files
authored
test: Updated test skips that were marked with "inline::vllm" (#3979)
This should be "remote::vllm". This causes some log probs tests to be skipped with remote vllm. (They fail if run). Signed-off-by: Derek Higgins <[email protected]>
1 parent 174ef16 commit 19d8500

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

tests/integration/inference/test_openai_completion.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ def skip_if_model_doesnt_support_openai_completion(client_with_models, model_id)
3939
if provider.provider_type in (
4040
"inline::meta-reference",
4141
"inline::sentence-transformers",
42-
"inline::vllm",
42+
"remote::vllm",
4343
"remote::bedrock",
4444
"remote::databricks",
4545
# Technically Nvidia does support OpenAI completions, but none of their hosted models
@@ -120,7 +120,7 @@ def skip_if_model_doesnt_support_openai_chat_completion(client_with_models, mode
120120
if provider.provider_type in (
121121
"inline::meta-reference",
122122
"inline::sentence-transformers",
123-
"inline::vllm",
123+
"remote::vllm",
124124
"remote::bedrock",
125125
"remote::databricks",
126126
"remote::cerebras",

0 commit comments

Comments
 (0)