Skip to content

Commit

Permalink
Fix performance issues in model serving global context
Browse files Browse the repository at this point in the history
  • Loading branch information
lucferbux committed Nov 16, 2023
1 parent 69b1c16 commit 16fd7b4
Show file tree
Hide file tree
Showing 4 changed files with 4 additions and 51 deletions.
26 changes: 0 additions & 26 deletions frontend/src/api/k8s/inferenceServices.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ import { InferenceServiceKind, K8sAPIOptions, K8sStatus, KnownLabels } from '~/k
import { CreatingInferenceServiceObject } from '~/pages/modelServing/screens/types';
import { translateDisplayNameForK8s } from '~/pages/projects/utils';
import { applyK8sAPIOptions } from '~/api/apiMergeUtils';
import { getModelServingProjects } from './projects';

export const assembleInferenceService = (
data: CreatingInferenceServiceObject,
Expand Down Expand Up @@ -77,31 +76,6 @@ export const listInferenceService = (
}).then((listResource) => listResource.items);
};

export const listScopedInferenceService = (
labelSelector?: string,
): Promise<InferenceServiceKind[]> =>
getModelServingProjects().then((projects) =>
Promise.all(
projects.map((project) => listInferenceService(project.metadata.name, labelSelector)),
).then((listInferenceService) =>
_.flatten(
listInferenceService.map((projectInferenceServices) =>
_.uniqBy(projectInferenceServices, (is) => is.metadata.name),
),
),
),
);

export const getInferenceServiceContext = (
namespace?: string,
labelSelector?: string,
): Promise<InferenceServiceKind[]> => {
if (namespace) {
return listInferenceService(namespace, labelSelector);
}
return listScopedInferenceService(labelSelector);
};

export const getInferenceService = (
name: string,
namespace: string,
Expand Down
21 changes: 0 additions & 21 deletions frontend/src/api/k8s/servingRuntimes.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import * as _ from 'lodash';
import {
k8sCreateResource,
k8sDeleteResource,
Expand All @@ -19,7 +18,6 @@ import { getModelServingRuntimeName } from '~/pages/modelServing/utils';
import { getDisplayNameFromK8sResource, translateDisplayNameForK8s } from '~/pages/projects/utils';
import { applyK8sAPIOptions } from '~/api/apiMergeUtils';
import { AcceleratorState } from '~/utilities/useAcceleratorState';
import { getModelServingProjects } from './projects';
import { assemblePodSpecOptions, getshmVolume, getshmVolumeMount } from './utils';

export const assembleServingRuntime = (
Expand Down Expand Up @@ -148,25 +146,6 @@ export const listServingRuntimes = (
}).then((listResource) => listResource.items);
};

export const listScopedServingRuntimes = (labelSelector?: string): Promise<ServingRuntimeKind[]> =>
getModelServingProjects().then((projects) =>
Promise.all(
projects.map((project) => listServingRuntimes(project.metadata.name, labelSelector)),
).then((listServingRuntimes) =>
_.uniqBy(_.flatten(listServingRuntimes), (sr) => sr.metadata.name),
),
);

export const getServingRuntimeContext = (
namespace?: string,
labelSelector?: string,
): Promise<ServingRuntimeKind[]> => {
if (namespace) {
return listServingRuntimes(namespace, labelSelector);
}
return listScopedServingRuntimes(labelSelector);
};

export const getServingRuntime = (name: string, namespace: string): Promise<ServingRuntimeKind> =>
k8sGetResource<ServingRuntimeKind>({
model: ServingRuntimeModel,
Expand Down
4 changes: 2 additions & 2 deletions frontend/src/pages/modelServing/useInferenceServices.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import * as React from 'react';
import { getInferenceServiceContext } from '~/api';
import { listInferenceService } from '~/api';
import { InferenceServiceKind } from '~/k8sTypes';
import useFetchState, { FetchState, NotReadyError } from '~/utilities/useFetchState';
import useModelServingEnabled from '~/pages/modelServing/useModelServingEnabled';
Expand All @@ -13,7 +13,7 @@ const useInferenceServices = (namespace?: string): FetchState<InferenceServiceKi
return Promise.reject(new NotReadyError('Model serving is not enabled'));
}

return getInferenceServiceContext(namespace, LABEL_SELECTOR_DASHBOARD_RESOURCE);
return listInferenceService(namespace, LABEL_SELECTOR_DASHBOARD_RESOURCE);
}, [namespace, modelServingEnabled]);

return useFetchState<InferenceServiceKind[]>(getServingInferences, []);
Expand Down
4 changes: 2 additions & 2 deletions frontend/src/pages/modelServing/useServingRuntimes.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import * as React from 'react';
import { getServingRuntimeContext } from '~/api';
import { listServingRuntimes } from '~/api';
import { ServingRuntimeKind } from '~/k8sTypes';
import useModelServingEnabled from '~/pages/modelServing/useModelServingEnabled';
import useFetchState, { FetchState, NotReadyError } from '~/utilities/useFetchState';
Expand All @@ -20,7 +20,7 @@ const useServingRuntimes = (
return Promise.reject(new NotReadyError('Fetch is not ready'));
}

return getServingRuntimeContext(namespace, LABEL_SELECTOR_DASHBOARD_RESOURCE).catch((e) => {
return listServingRuntimes(namespace, LABEL_SELECTOR_DASHBOARD_RESOURCE).catch((e) => {
if (e.statusObject?.code === 404) {
throw new Error('Model serving is not properly configured.');
}
Expand Down

0 comments on commit 16fd7b4

Please sign in to comment.