Skip to content

Commit af6e19f

Browse files
[Core][TPU] Support TPU Data Parallalism (vllm-project#27365)
Signed-off-by: wenxindongwork <[email protected]>
1 parent 99d69af commit af6e19f

File tree

1 file changed

+6
-1
lines changed

1 file changed

+6
-1
lines changed

vllm/entrypoints/llm.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,7 @@
6767
RequestOutput,
6868
ScoringRequestOutput,
6969
)
70+
from vllm.platforms import current_platform
7071
from vllm.pooling_params import PoolingParams
7172
from vllm.sampling_params import BeamSearchParams, RequestOutputKind, SamplingParams
7273
from vllm.tasks import PoolingTask
@@ -289,7 +290,11 @@ def __init__(
289290
# warn about single-process data parallel usage.
290291
_dp_size = int(kwargs.get("data_parallel_size", 1))
291292
_distributed_executor_backend = kwargs.get("distributed_executor_backend")
292-
if _dp_size > 1 and not _distributed_executor_backend == "external_launcher":
293+
if (
294+
_dp_size > 1
295+
and not _distributed_executor_backend == "external_launcher"
296+
and not current_platform.is_tpu()
297+
):
293298
raise ValueError(
294299
f"LLM(data_parallel_size={_dp_size}) is not supported for single-"
295300
"process usage and may hang. Please use "

0 commit comments

Comments
 (0)