Skip to content

Commit 44bbd54

Browse files
committed
Fix incorrect defaults for use_combined_linear and add default for use_flash_attention in docstring
1 parent fd13c1b commit 44bbd54

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

monai/networks/nets/diffusion_model_unet.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1527,9 +1527,9 @@ class DiffusionModelUNet(nn.Module):
15271527
upcast_attention: if True, upcast attention operations to full precision.
15281528
dropout_cattn: if different from zero, this will be the dropout value for the cross-attention layers.
15291529
include_fc: whether to include the final linear layer. Default to True.
1530-
use_combined_linear: whether to use a single linear layer for qkv projection, default to True.
1530+
use_combined_linear: whether to use a single linear layer for qkv projection, default to False.
15311531
use_flash_attention: if True, use Pytorch's inbuilt flash attention for a memory efficient attention mechanism
1532-
(see https://pytorch.org/docs/2.2/generated/torch.nn.functional.scaled_dot_product_attention.html).
1532+
(see https://pytorch.org/docs/2.2/generated/torch.nn.functional.scaled_dot_product_attention.html), default to False.
15331533
"""
15341534

15351535
def __init__(

0 commit comments

Comments
 (0)