forked from ai-dawang/PlugNPlay-Modules
-
Notifications
You must be signed in to change notification settings - Fork 0
/
(CVPR 2024)FRFN.py
76 lines (58 loc) · 2.5 KB
/
(CVPR 2024)FRFN.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import torch
import torch.nn as nn
from einops import rearrange
import math
# 论文:Adapt or Perish: Adaptive Sparse Transformer with Attentive Feature Refinement for Image Restoration, CVPR 2024.
# 论文地址:https://openaccess.thecvf.com/content/CVPR2024/papers/Zhou_Adapt_or_Perish_Adaptive_Sparse_Transformer_with_Attentive_Feature_Refinement_CVPR_2024_paper.pdf
# 全网最全100➕即插即用模块GitHub地址:https://github.com/ai-dawang/PlugNPlay-Modules
class FRFN(nn.Module):
def __init__(self, dim=32, hidden_dim=128, act_layer=nn.GELU):
super().__init__()
self.linear1 = nn.Sequential(nn.Linear(dim, hidden_dim * 2),
act_layer())
self.dwconv = nn.Sequential(
nn.Conv2d(hidden_dim, hidden_dim, groups=hidden_dim, kernel_size=3, stride=1, padding=1),
act_layer())
self.linear2 = nn.Sequential(nn.Linear(hidden_dim, dim))
self.dim = dim
self.hidden_dim = hidden_dim
self.dim_conv = self.dim // 4
self.dim_untouched = self.dim - self.dim_conv
self.partial_conv3 = nn.Conv2d(self.dim_conv, self.dim_conv, 3, 1, 1, bias=False)
def forward(self, x):
x_init = x
# bs x hw x c
bs, hw, c = x.size()
hh = int(math.sqrt(hw))
# spatial restore
x = rearrange(x, ' b (h w) (c) -> b c h w ', h=hh, w=hh)
x1, x2, = torch.split(x, [self.dim_conv, self.dim_untouched], dim=1)
x1 = self.partial_conv3(x1)
x = torch.cat((x1, x2), 1)
# flaten
x = rearrange(x, ' b c h w -> b (h w) c', h=hh, w=hh)
x = self.linear1(x)
# gate mechanism
x_1, x_2 = x.chunk(2, dim=-1)
x_1 = rearrange(x_1, ' b (h w) (c) -> b c h w ', h=hh, w=hh)
x_1 = self.dwconv(x_1)
x_1 = rearrange(x_1, ' b c h w -> b (h w) c', h=hh, w=hh)
x = x_1 * x_2
x = self.linear2(x)
return x + x_init
if __name__ == '__main__':
# Instantiate the FRFN class
dim = 64 # Dimension of input features
# Create an instance of the FRFN module
frfn = FRFN(dim)
# Generate a random input tensor
B = 1 # Batch size
H = 64 # Height of the feature map
W = 64 # Width of the feature map
C = dim # Number of channels
input = torch.randn(B, H * W, C)
# Forward pass
output = frfn(input)
# Print input and output shapes
print(input.size())
print(output.size())