Skip to content

Commit

Permalink
Tweak
Browse files Browse the repository at this point in the history
  • Loading branch information
Yard1 committed Jun 12, 2024
1 parent 3c7690e commit 2684305
Showing 1 changed file with 6 additions and 14 deletions.
20 changes: 6 additions & 14 deletions vllm/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -695,8 +695,11 @@ def inner(*args, **kwargs):
return wrapper


@lru_cache(maxsize=None)
def _get_num_gpus_available_isolated() -> int:
@lru_cache(maxsize=5)
def _get_num_gpus_available_isolated(
cuda_visible_devices: Optional[str] = None) -> int:
# Note: cuda_visible_devices is not used, but we keep it as an argument for
# LRU Cache purposes.
try:
out = subprocess.run(
[
Expand All @@ -714,22 +717,11 @@ def _get_num_gpus_available_isolated() -> int:
return int(out.stdout.strip())


_LAST_CUDA_VISIBLE_DEVICES = None


def get_num_gpus_available_isolated() -> int:
"""Get number of GPUs without initializing the CUDA context
in current process.
This should be used instead of torch.cuda.device_count()
unless CUDA_VISIBLE_DEVICES has already been set to the desired
value."""

global _LAST_CUDA_VISIBLE_DEVICES

cuda_visible_devices = envs.CUDA_VISIBLE_DEVICES
if cuda_visible_devices != _LAST_CUDA_VISIBLE_DEVICES:
_get_num_gpus_available_isolated.cache_clear()
_LAST_CUDA_VISIBLE_DEVICES = cuda_visible_devices

return _get_num_gpus_available_isolated()
return _get_num_gpus_available_isolated(envs.CUDA_VISIBLE_DEVICES)

0 comments on commit 2684305

Please sign in to comment.