diff --git a/backend/src/nodes/impl/pytorch/utils.py b/backend/src/nodes/impl/pytorch/utils.py index 74f13405a6..941cd9d443 100644 --- a/backend/src/nodes/impl/pytorch/utils.py +++ b/backend/src/nodes/impl/pytorch/utils.py @@ -38,6 +38,7 @@ def to_pytorch_execution_options(options: ExecutionOptions): onnx_tensorrt_cache_path=options.onnx_tensorrt_cache_path, onnx_should_tensorrt_fp16=options.onnx_should_tensorrt_fp16, reserved_system_memory=options.reserved_system_memory, + available_memory_only=options.available_memory_only, ) diff --git a/backend/src/nodes/utils/exec_options.py b/backend/src/nodes/utils/exec_options.py index b1139e74d3..c177c5fd6a 100644 --- a/backend/src/nodes/utils/exec_options.py +++ b/backend/src/nodes/utils/exec_options.py @@ -17,6 +17,7 @@ def __init__( onnx_tensorrt_cache_path: str, onnx_should_tensorrt_fp16: bool, reserved_system_memory: int, + available_memory_only: bool, ) -> None: self.__device = device self.__fp16 = fp16 @@ -28,6 +29,7 @@ def __init__( self.__onnx_tensorrt_cache_path = onnx_tensorrt_cache_path self.__onnx_should_tensorrt_fp16 = onnx_should_tensorrt_fp16 self.__reserved_system_memory = reserved_system_memory + self.__available_memory_only = available_memory_only if ( not os.path.exists(onnx_tensorrt_cache_path) @@ -43,7 +45,7 @@ def __init__( f" {onnx_should_tensorrt_cache}, tensorrt_cache_path:" f" {onnx_tensorrt_cache_path}, should_tensorrt_fp16:" f" {onnx_should_tensorrt_fp16}, reserved_system_memory:" - f" {reserved_system_memory}" + f" {reserved_system_memory}, available_memory_only {available_memory_only}" ) @property @@ -88,9 +90,13 @@ def onnx_should_tensorrt_fp16(self): def reserved_system_memory(self): return self.__reserved_system_memory + @property + def available_memory_only(self): + return self.__available_memory_only + __global_exec_options = ExecutionOptions( - "cpu", False, 0, 0, 0, "CPUExecutionProvider", False, "", False, 1024 + "cpu", False, 0, 0, 0, "CPUExecutionProvider", False, "", False, 1024, False ) @@ -116,6 +122,7 @@ class JsonExecutionOptions(TypedDict): onnxTensorRtCachePath: str onnxShouldTensorRtFp16: bool reservedSystemMemory: int + availableMemoryOnly: bool def parse_execution_options(json: JsonExecutionOptions) -> ExecutionOptions: @@ -130,4 +137,5 @@ def parse_execution_options(json: JsonExecutionOptions) -> ExecutionOptions: onnx_tensorrt_cache_path=json["onnxTensorRtCachePath"], onnx_should_tensorrt_fp16=json["onnxShouldTensorRtFp16"], reserved_system_memory=json["reservedSystemMemory"], + available_memory_only=json["availableMemoryOnly"], ) diff --git a/backend/src/packages/chaiNNer_pytorch/pytorch/processing/upscale_image.py b/backend/src/packages/chaiNNer_pytorch/pytorch/processing/upscale_image.py index b32933d079..be839fba41 100644 --- a/backend/src/packages/chaiNNer_pytorch/pytorch/processing/upscale_image.py +++ b/backend/src/packages/chaiNNer_pytorch/pytorch/processing/upscale_image.py @@ -59,10 +59,16 @@ def estimate(): if is_arm_mac: total_memory = psutil.virtual_memory().total + available_memory = psutil.virtual_memory().available reserved_system_memory = options.reserved_system_memory * (1024**2) pre_budget = int(total_memory - reserved_system_memory) - budget = max(total_memory * 0.2, min(pre_budget, total_memory * 0.8)) + if options.available_memory_only: + budget = available_memory * 0.8 + else: + budget = max( + total_memory * 0.2, min(pre_budget, total_memory * 0.8) + ) return MaxTileSize( estimate_tile_size( diff --git a/src/common/Backend.ts b/src/common/Backend.ts index 47a587eadb..015fb38f6c 100644 --- a/src/common/Backend.ts +++ b/src/common/Backend.ts @@ -78,6 +78,7 @@ export interface BackendExecutionOptions { onnxTensorRtCachePath: string; onnxShouldTensorRtFp16: boolean; reservedSystemMemory: number; + availableMemoryOnly: boolean; } export interface BackendRunRequest { data: BackendJsonNode[]; diff --git a/src/main/cli/run.ts b/src/main/cli/run.ts index 95d93da189..d6e6fb85ab 100644 --- a/src/main/cli/run.ts +++ b/src/main/cli/run.ts @@ -139,6 +139,7 @@ const getExecutionOptions = (): BackendExecutionOptions => { onnxTensorRtCachePath: getOnnxTensorRtCacheLocation(app.getPath('userData')), onnxShouldTensorRtFp16: getSetting('onnx-should-tensorrt-fp16', false), reservedSystemMemory: getSetting('reserved-system-memory', 0), + availableMemoryOnly: getSetting('use-available-memory-only', false), }; }; diff --git a/src/renderer/components/SettingsModal.tsx b/src/renderer/components/SettingsModal.tsx index 4eed89102b..7e863a2516 100644 --- a/src/renderer/components/SettingsModal.tsx +++ b/src/renderer/components/SettingsModal.tsx @@ -271,7 +271,8 @@ const AppearanceSettings = memo(() => { }); const EnvironmentSettings = memo(() => { - const { useStartupTemplate, useReservedSystemMemory } = useContext(SettingsContext); + const { useStartupTemplate, useReservedSystemMemory, useAvailableMemoryOnly } = + useContext(SettingsContext); const [startupTemplate, setStartupTemplate] = useStartupTemplate; @@ -299,6 +300,7 @@ const EnvironmentSettings = memo(() => { }, [startupTemplate, lastDirectory, setStartupTemplate]); const [reservedSystemMemory, setReservedSystemMemory] = useReservedSystemMemory; + const [availableMemoryOnly, setAvailableMemoryOnly] = useAvailableMemoryOnly; // Maximum amount reserved for the system is 80 % of the total memory const calculateMaxValue = () => (totalMemory / 1024 ** 2) * 0.8; @@ -319,6 +321,7 @@ const EnvironmentSettings = memo(() => { > { paddingRight="3.7rem" textAlign="right" /> - MB + {availableMemoryOnly ? ( + + MB + + ) : ( + MB + )} @@ -347,6 +359,20 @@ const EnvironmentSettings = memo(() => { ) : ( [] )} + + {isArmMac ? ( + { + setAvailableMemoryOnly((prev) => !prev); + }} + /> + ) : ( + [] + )} + ; useReservedSystemMemory: GetSetState; + useAvailableMemoryOnly: GetSetState; useSelectTheme: GetSetState; useAnimateChain: GetSetState; useExperimentalFeatures: GetSetState; @@ -72,6 +73,9 @@ export const SettingsProvider = memo(({ children }: React.PropsWithChildren { useOnnxShouldTensorRtCache, useOnnxShouldTensorRtFp16, useReservedSystemMemory, + useAvailableMemoryOnly, } = useContext(SettingsContext); const [isCpu] = useIsCpu; @@ -39,6 +40,7 @@ export const useBackendExecutionOptions = (): BackendExecutionOptions => { const [onnxShouldTensorRtFp16] = useOnnxShouldTensorRtFp16; const [reservedSystemMemory] = useReservedSystemMemory; + const [availableMemoryOnly] = useAvailableMemoryOnly; return { isCpu, @@ -51,5 +53,6 @@ export const useBackendExecutionOptions = (): BackendExecutionOptions => { onnxTensorRtCachePath, onnxShouldTensorRtFp16, reservedSystemMemory, + availableMemoryOnly, }; };