Skip to content

Commit

Permalink
Add block replace transformer_options to flux.
Browse files Browse the repository at this point in the history
  • Loading branch information
comfyanonymous committed Nov 12, 2024
1 parent a72d152 commit 8ebf2d8
Show file tree
Hide file tree
Showing 2 changed files with 29 additions and 6 deletions.
30 changes: 26 additions & 4 deletions comfy/ldm/flux/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,9 @@ def forward_orig(
y: Tensor,
guidance: Tensor = None,
control=None,
transformer_options={},
) -> Tensor:
patches_replace = transformer_options.get("patches_replace", {})
if img.ndim != 3 or txt.ndim != 3:
raise ValueError("Input img and txt tensors must have 3 dimensions.")

Expand All @@ -114,8 +116,19 @@ def forward_orig(
ids = torch.cat((txt_ids, img_ids), dim=1)
pe = self.pe_embedder(ids)

blocks_replace = patches_replace.get("dit", {})
for i, block in enumerate(self.double_blocks):
img, txt = block(img=img, txt=txt, vec=vec, pe=pe)
if ("double_block", i) in blocks_replace:
def block_wrap(args):
out = {}
out["img"], out["txt"] = block(img=args["img"], txt=args["txt"], vec=args["vec"], pe=args["pe"])
return out

out = blocks_replace[("double_block", i)]({"img": img, "txt": txt, "vec": vec, "pe": pe}, {"original_block": block_wrap})
txt = out["txt"]
img = out["img"]
else:
img, txt = block(img=img, txt=txt, vec=vec, pe=pe)

if control is not None: # Controlnet
control_i = control.get("input")
Expand All @@ -127,7 +140,16 @@ def forward_orig(
img = torch.cat((txt, img), 1)

for i, block in enumerate(self.single_blocks):
img = block(img, vec=vec, pe=pe)
if ("single_block", i) in blocks_replace:
def block_wrap(args):
out = {}
out["img"] = block(args["img"], vec=args["vec"], pe=args["pe"])
return out

out = blocks_replace[("single_block", i)]({"img": img, "vec": vec, "pe": pe}, {"original_block": block_wrap})
img = out["img"]
else:
img = block(img, vec=vec, pe=pe)

if control is not None: # Controlnet
control_o = control.get("output")
Expand All @@ -141,7 +163,7 @@ def forward_orig(
img = self.final_layer(img, vec) # (N, T, patch_size ** 2 * out_channels)
return img

def forward(self, x, timestep, context, y, guidance, control=None, **kwargs):
def forward(self, x, timestep, context, y, guidance, control=None, transformer_options={}, **kwargs):
bs, c, h, w = x.shape
patch_size = 2
x = comfy.ldm.common_dit.pad_to_patch_size(x, (patch_size, patch_size))
Expand All @@ -156,5 +178,5 @@ def forward(self, x, timestep, context, y, guidance, control=None, **kwargs):
img_ids = repeat(img_ids, "h w c -> b (h w) c", b=bs)

txt_ids = torch.zeros((bs, context.shape[1], 3), device=x.device, dtype=x.dtype)
out = self.forward_orig(img, img_ids, context, txt_ids, timestep, y, guidance, control)
out = self.forward_orig(img, img_ids, context, txt_ids, timestep, y, guidance, control, transformer_options)
return rearrange(out, "b (h w) (c ph pw) -> b c (h ph) (w pw)", h=h_len, w=w_len, ph=2, pw=2)[:,:,:h,:w]
5 changes: 3 additions & 2 deletions comfy_extras/nodes_sd3.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,9 @@ def skip(args, extra_args):
sigma_start = model_sampling.percent_to_sigma(start_percent)
sigma_end = model_sampling.percent_to_sigma(end_percent)

layers = re.findall(r'\d+', layers)
layers = [int(i) for i in layers]

def post_cfg_function(args):
model = args["model"]
cond_pred = args["cond_denoised"]
Expand All @@ -147,8 +150,6 @@ def post_cfg_function(args):
cfg_result = cfg_result + (cond_pred - slg) * scale
return cfg_result

layers = re.findall(r'\d+', layers)
layers = [int(i) for i in layers]
m = model.clone()
m.set_model_sampler_post_cfg_function(post_cfg_function)

Expand Down

0 comments on commit 8ebf2d8

Please sign in to comment.