From 308718e86ca4aaa6352ca881f0f062ced83ed744 Mon Sep 17 00:00:00 2001 From: Scott Davidson Date: Wed, 27 Sep 2023 16:02:46 +0100 Subject: [PATCH] Fix gpuCount status update logic --- python/perftest/models/v1alpha1/pytorch.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/python/perftest/models/v1alpha1/pytorch.py b/python/perftest/models/v1alpha1/pytorch.py index 9e01b53..007ca67 100644 --- a/python/perftest/models/v1alpha1/pytorch.py +++ b/python/perftest/models/v1alpha1/pytorch.py @@ -221,9 +221,11 @@ async def pod_modified( fetch_pod_log: t.Callable[[], t.Awaitable[str]] ): # Set default GPU count if none given in spec - gpu_count = pod.get("status", {}).get("gpuCount") - if gpu_count is None: + # (have to do this in status since spec is immutable) + if self.spec.gpu_count is None: self.status.gpu_count = (0 if self.spec.device == "cpu" else 1) + else: + self.status.gpu_count = self.spec.gpu_count pod_phase = pod.get("status", {}).get("phase", "Unknown") if pod_phase == "Running":