-
Notifications
You must be signed in to change notification settings - Fork 244
/
Copy pathLPA.py
76 lines (57 loc) · 2.44 KB
/
LPA.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import torch
import torch.nn as nn
#论文:SwinPA-Net: Swin Transformer-Based Multiscale Feature Pyramid Aggregation Network for Medical Image Segmentation
#论文地址:https://ieeexplore.ieee.org/document/9895210
class ChannelAttention(nn.Module):
def __init__(self, in_planes):
super(ChannelAttention, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.max_pool = nn.AdaptiveMaxPool2d(1)
self.fc1 = nn.Conv2d(in_planes, in_planes // 8, 1, bias=False)
self.relu1 = nn.ReLU()
self.fc2 = nn.Conv2d(in_planes // 8, in_planes, 1, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = self.fc2(self.relu1(self.fc1(self.avg_pool(x))))
max_out = self.fc2(self.relu1(self.fc1(self.max_pool(x))))
out = avg_out + max_out
return self.sigmoid(out)
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=3):
super(SpatialAttention, self).__init__()
assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
padding = 3 if kernel_size == 7 else 1
self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
x = torch.cat([avg_out, max_out], dim=1)
x = self.conv1(x)
return self.sigmoid(x)
class LPA(nn.Module):
def __init__(self, in_channel):
super(LPA, self).__init__()
self.ca = ChannelAttention(in_channel)
self.sa = SpatialAttention()
def forward(self, x):
x0, x1 = x.chunk(2, dim=2)
x0 = x0.chunk(2, dim=3)
x1 = x1.chunk(2, dim=3)
x0 = [self.ca(x0[-2]) * x0[-2], self.ca(x0[-1]) * x0[-1]]
x0 = [self.sa(x0[-2]) * x0[-2], self.sa(x0[-1]) * x0[-1]]
x1 = [self.ca(x1[-2]) * x1[-2], self.ca(x1[-1]) * x1[-1]]
x1 = [self.sa(x1[-2]) * x1[-2], self.sa(x1[-1]) * x1[-1]]
x0 = torch.cat(x0, dim=3)
x1 = torch.cat(x1, dim=3)
x3 = torch.cat((x0, x1), dim=2)
x4 = self.ca(x) * x
x4 = self.sa(x4) * x4
x = x3 + x4
return x
if __name__ == '__main__':
input = torch.rand(1, 28, 64, 64)
block = LPA(in_channel=28)
output = block(input)
print(input.size())
print(output.size())