-
Notifications
You must be signed in to change notification settings - Fork 244
/
Copy pathMixStructure.py
91 lines (73 loc) · 2.86 KB
/
MixStructure.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import torch
import torch.nn as nn
#论文地址:https://arxiv.org/abs/2305.17654
#论文:Mix Structure Block contains multi-scale parallel large convolution kernel module and enhanced parallel attention module
class MixStructureBlock(nn.Module):
def __init__(self, dim):
super().__init__()
self.norm1 = nn.BatchNorm2d(dim)
self.norm2 = nn.BatchNorm2d(dim)
self.conv1 = nn.Conv2d(dim, dim, kernel_size=1)
self.conv2 = nn.Conv2d(dim, dim, kernel_size=5, padding=2, padding_mode='reflect')
self.conv3_19 = nn.Conv2d(dim, dim, kernel_size=7, padding=9, groups=dim, dilation=3, padding_mode='reflect')
self.conv3_13 = nn.Conv2d(dim, dim, kernel_size=5, padding=6, groups=dim, dilation=3, padding_mode='reflect')
self.conv3_7 = nn.Conv2d(dim, dim, kernel_size=3, padding=3, groups=dim, dilation=3, padding_mode='reflect')
# Simple Pixel Attention
self.Wv = nn.Sequential(
nn.Conv2d(dim, dim, 1),
nn.Conv2d(dim, dim, kernel_size=3, padding=3 // 2, groups=dim, padding_mode='reflect')
)
self.Wg = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(dim, dim, 1),
nn.Sigmoid()
)
# Channel Attention
self.ca = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(dim, dim, 1, padding=0, bias=True),
nn.GELU(),
# nn.ReLU(True),
nn.Conv2d(dim, dim, 1, padding=0, bias=True),
nn.Sigmoid()
)
# Pixel Attention
self.pa = nn.Sequential(
nn.Conv2d(dim, dim // 8, 1, padding=0, bias=True),
nn.GELU(),
# nn.ReLU(True),
nn.Conv2d(dim // 8, 1, 1, padding=0, bias=True),
nn.Sigmoid()
)
self.mlp = nn.Sequential(
nn.Conv2d(dim * 3, dim * 4, 1),
nn.GELU(),
# nn.ReLU(True),
nn.Conv2d(dim * 4, dim, 1)
)
self.mlp2 = nn.Sequential(
nn.Conv2d(dim * 3, dim * 4, 1),
nn.GELU(),
# nn.ReLU(True),
nn.Conv2d(dim * 4, dim, 1)
)
def forward(self, x):
identity = x
x = self.norm1(x)
x = self.conv1(x)
x = self.conv2(x)
x = torch.cat([self.conv3_19(x), self.conv3_13(x), self.conv3_7(x)], dim=1)
x = self.mlp(x)
x = identity + x
identity = x
x = self.norm2(x)
x = torch.cat([self.Wv(x) * self.Wg(x), self.ca(x) * x, self.pa(x) * x], dim=1)
x = self.mlp2(x)
x = identity + x
return x
if __name__ == '__main__':
block = MixStructureBlock(dim=64)
input = torch.rand(1, 64, 128, 128) # B C H W
output = block(input)
print(input.size())
print(output.size())