Skip to content

Commit

Permalink
Change torch.xpu to ipex.optimize, xpu device initialization and remo…
Browse files Browse the repository at this point in the history
…ve workaround for text node issue from older IPEX. (#3388)
  • Loading branch information
simonlui authored May 2, 2024
1 parent f81a6fa commit a56d02e
Showing 1 changed file with 2 additions and 4 deletions.
6 changes: 2 additions & 4 deletions comfy/model_management.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def get_torch_device():
return torch.device("cpu")
else:
if is_intel_xpu():
return torch.device("xpu")
return torch.device("xpu", torch.xpu.current_device())
else:
return torch.device(torch.cuda.current_device())

Expand Down Expand Up @@ -304,7 +304,7 @@ def model_load(self, lowvram_model_memory=0):
raise e

if is_intel_xpu() and not args.disable_ipex_optimize:
self.real_model = torch.xpu.optimize(self.real_model.eval(), inplace=True, auto_kernel_selection=True, graph_mode=True)
self.real_model = ipex.optimize(self.real_model.eval(), graph_mode=True, concat_linear=True)

self.weights_loaded = True
return self.real_model
Expand Down Expand Up @@ -552,8 +552,6 @@ def text_encoder_device():
if args.gpu_only:
return get_torch_device()
elif vram_state == VRAMState.HIGH_VRAM or vram_state == VRAMState.NORMAL_VRAM:
if is_intel_xpu():
return torch.device("cpu")
if should_use_fp16(prioritize_performance=False):
return get_torch_device()
else:
Expand Down

0 comments on commit a56d02e

Please sign in to comment.