From 16fd7b42817acf491023cbded31c56a2d3a37894 Mon Sep 17 00:00:00 2001 From: Lucas Fernandez Date: Thu, 16 Nov 2023 13:05:53 +0100 Subject: [PATCH] Fix performance issues in model serving global context --- frontend/src/api/k8s/inferenceServices.ts | 26 ------------------- frontend/src/api/k8s/servingRuntimes.ts | 21 --------------- .../modelServing/useInferenceServices.ts | 4 +-- .../pages/modelServing/useServingRuntimes.ts | 4 +-- 4 files changed, 4 insertions(+), 51 deletions(-) diff --git a/frontend/src/api/k8s/inferenceServices.ts b/frontend/src/api/k8s/inferenceServices.ts index d527973f44..66b59a20c0 100644 --- a/frontend/src/api/k8s/inferenceServices.ts +++ b/frontend/src/api/k8s/inferenceServices.ts @@ -11,7 +11,6 @@ import { InferenceServiceKind, K8sAPIOptions, K8sStatus, KnownLabels } from '~/k import { CreatingInferenceServiceObject } from '~/pages/modelServing/screens/types'; import { translateDisplayNameForK8s } from '~/pages/projects/utils'; import { applyK8sAPIOptions } from '~/api/apiMergeUtils'; -import { getModelServingProjects } from './projects'; export const assembleInferenceService = ( data: CreatingInferenceServiceObject, @@ -77,31 +76,6 @@ export const listInferenceService = ( }).then((listResource) => listResource.items); }; -export const listScopedInferenceService = ( - labelSelector?: string, -): Promise => - getModelServingProjects().then((projects) => - Promise.all( - projects.map((project) => listInferenceService(project.metadata.name, labelSelector)), - ).then((listInferenceService) => - _.flatten( - listInferenceService.map((projectInferenceServices) => - _.uniqBy(projectInferenceServices, (is) => is.metadata.name), - ), - ), - ), - ); - -export const getInferenceServiceContext = ( - namespace?: string, - labelSelector?: string, -): Promise => { - if (namespace) { - return listInferenceService(namespace, labelSelector); - } - return listScopedInferenceService(labelSelector); -}; - export const getInferenceService = ( name: string, namespace: string, diff --git a/frontend/src/api/k8s/servingRuntimes.ts b/frontend/src/api/k8s/servingRuntimes.ts index 899109086b..9361b69d2b 100644 --- a/frontend/src/api/k8s/servingRuntimes.ts +++ b/frontend/src/api/k8s/servingRuntimes.ts @@ -1,4 +1,3 @@ -import * as _ from 'lodash'; import { k8sCreateResource, k8sDeleteResource, @@ -19,7 +18,6 @@ import { getModelServingRuntimeName } from '~/pages/modelServing/utils'; import { getDisplayNameFromK8sResource, translateDisplayNameForK8s } from '~/pages/projects/utils'; import { applyK8sAPIOptions } from '~/api/apiMergeUtils'; import { AcceleratorState } from '~/utilities/useAcceleratorState'; -import { getModelServingProjects } from './projects'; import { assemblePodSpecOptions, getshmVolume, getshmVolumeMount } from './utils'; export const assembleServingRuntime = ( @@ -148,25 +146,6 @@ export const listServingRuntimes = ( }).then((listResource) => listResource.items); }; -export const listScopedServingRuntimes = (labelSelector?: string): Promise => - getModelServingProjects().then((projects) => - Promise.all( - projects.map((project) => listServingRuntimes(project.metadata.name, labelSelector)), - ).then((listServingRuntimes) => - _.uniqBy(_.flatten(listServingRuntimes), (sr) => sr.metadata.name), - ), - ); - -export const getServingRuntimeContext = ( - namespace?: string, - labelSelector?: string, -): Promise => { - if (namespace) { - return listServingRuntimes(namespace, labelSelector); - } - return listScopedServingRuntimes(labelSelector); -}; - export const getServingRuntime = (name: string, namespace: string): Promise => k8sGetResource({ model: ServingRuntimeModel, diff --git a/frontend/src/pages/modelServing/useInferenceServices.ts b/frontend/src/pages/modelServing/useInferenceServices.ts index 31b1b348cb..c01588a9b9 100644 --- a/frontend/src/pages/modelServing/useInferenceServices.ts +++ b/frontend/src/pages/modelServing/useInferenceServices.ts @@ -1,5 +1,5 @@ import * as React from 'react'; -import { getInferenceServiceContext } from '~/api'; +import { listInferenceService } from '~/api'; import { InferenceServiceKind } from '~/k8sTypes'; import useFetchState, { FetchState, NotReadyError } from '~/utilities/useFetchState'; import useModelServingEnabled from '~/pages/modelServing/useModelServingEnabled'; @@ -13,7 +13,7 @@ const useInferenceServices = (namespace?: string): FetchState(getServingInferences, []); diff --git a/frontend/src/pages/modelServing/useServingRuntimes.ts b/frontend/src/pages/modelServing/useServingRuntimes.ts index d2a3d2c9f9..e4639f1185 100644 --- a/frontend/src/pages/modelServing/useServingRuntimes.ts +++ b/frontend/src/pages/modelServing/useServingRuntimes.ts @@ -1,5 +1,5 @@ import * as React from 'react'; -import { getServingRuntimeContext } from '~/api'; +import { listServingRuntimes } from '~/api'; import { ServingRuntimeKind } from '~/k8sTypes'; import useModelServingEnabled from '~/pages/modelServing/useModelServingEnabled'; import useFetchState, { FetchState, NotReadyError } from '~/utilities/useFetchState'; @@ -20,7 +20,7 @@ const useServingRuntimes = ( return Promise.reject(new NotReadyError('Fetch is not ready')); } - return getServingRuntimeContext(namespace, LABEL_SELECTOR_DASHBOARD_RESOURCE).catch((e) => { + return listServingRuntimes(namespace, LABEL_SELECTOR_DASHBOARD_RESOURCE).catch((e) => { if (e.statusObject?.code === 404) { throw new Error('Model serving is not properly configured.'); }