Skip to content

Commit 58a93b3

Browse files
committed
source
0 parents  commit 58a93b3

File tree

2 files changed

+380
-0
lines changed

2 files changed

+380
-0
lines changed

MV3_1_true_2.py

Lines changed: 380 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,380 @@
1+
import torch.nn as nn
2+
import torch.utils.model_zoo as model_zoo
3+
4+
import torch
5+
from torch.nn import functional as F
6+
7+
models_urls = {
8+
'101_voc': 'https://cloudstor.aarnet.edu.au/plus/s/Owmttk9bdPROwc6/download',
9+
'18_imagenet': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
10+
'34_imagenet': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
11+
'50_imagenet': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
12+
'152_imagenet': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
13+
'101_imagenet': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
14+
}
15+
16+
17+
def maybe_download(model_name, model_url, model_dir=None, map_location=None):
18+
import os, sys
19+
from six.moves import urllib
20+
if model_dir is None:
21+
torch_home = os.path.expanduser(os.getenv('TORCH_HOME', '~/.torch'))
22+
model_dir = os.getenv('TORCH_MODEL_ZOO', os.path.join(torch_home, 'models'))
23+
if not os.path.exists(model_dir):
24+
os.makedirs(model_dir)
25+
filename = '{}.pth.tar'.format(model_name)
26+
cached_file = os.path.join(model_dir, filename)
27+
if not os.path.exists(cached_file):
28+
url = model_url
29+
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
30+
urllib.request.urlretrieve(url, cached_file)
31+
return torch.load(cached_file, map_location=map_location)
32+
33+
34+
def conv3x3(in_planes, out_planes, stride=1):
35+
"""3x3 convolution with padding"""
36+
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
37+
padding=1, bias=False)
38+
39+
40+
def conv1x1(in_planes, out_planes, stride=1):
41+
"""1x1 convolution"""
42+
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
43+
44+
## wrong!! change ReLU to BN
45+
def conv3x3_bn(in_channel, out_channel):
46+
return nn.Sequential(nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=1, padding=1, bias=False),
47+
nn.ReLU(inplace=True))
48+
49+
50+
class GAU(nn.Module):
51+
def __init__(self, in_size, out_size):
52+
super(GAU, self).__init__()
53+
self.in_size = in_size
54+
self.out_size = out_size
55+
self.conv = nn.Conv2d(in_size*2, out_size, kernel_size=1, stride=1, bias=False)
56+
self.avg_pool = nn.AdaptiveAvgPool2d(1)
57+
self.bn=nn.BatchNorm2d(in_size)
58+
self.relu=nn.ReLU(inplace=True)
59+
60+
def forward(self, input_low, input_high):
61+
62+
high_size = input_high.size()[2:]
63+
# low channel usually > high channel
64+
# if self.in_size != self.out_size:
65+
# input_low = self.conv(input_low)
66+
upsample_low = F.upsample(input_low, high_size, mode='bilinear')
67+
input_cat = torch.cat([upsample_low, input_high], dim=1)
68+
input_cat=self.conv(input_cat)
69+
input_cat=self.bn(input_cat)
70+
input_cat=self.relu(input_cat)
71+
72+
gp = self.avg_pool(input_cat)
73+
multiply=gp*input_cat
74+
# out=multiply+input_cat
75+
out = multiply + input_high
76+
return out
77+
78+
79+
class BasicBlock(nn.Module):
80+
expansion = 1
81+
82+
def __init__(self, inplanes, planes, stride=1, downsample=None):
83+
super(BasicBlock, self).__init__()
84+
self.conv1 = conv3x3(inplanes, planes, stride)
85+
self.bn1 = nn.BatchNorm2d(planes)
86+
self.relu = nn.ReLU(inplace=True)
87+
self.conv2 = conv3x3(planes, planes)
88+
self.bn2 = nn.BatchNorm2d(planes)
89+
self.downsample = downsample
90+
self.stride = stride
91+
92+
def forward(self, x):
93+
identity = x
94+
95+
out = self.conv1(x)
96+
out = self.bn1(out)
97+
out = self.relu(out)
98+
99+
out = self.conv2(out)
100+
out = self.bn2(out)
101+
102+
if self.downsample is not None:
103+
identity = self.downsample(x)
104+
105+
out += identity
106+
out = self.relu(out)
107+
108+
return out
109+
110+
111+
class Bottleneck(nn.Module):
112+
expansion = 4
113+
114+
def __init__(self, inplanes, planes, stride=1, downsample=None):
115+
super(Bottleneck, self).__init__()
116+
self.conv1 = conv1x1(inplanes, planes)
117+
self.bn1 = nn.BatchNorm2d(planes)
118+
self.conv2 = conv3x3(planes, planes, stride)
119+
self.bn2 = nn.BatchNorm2d(planes)
120+
self.conv3 = conv1x1(planes, planes * self.expansion)
121+
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
122+
self.relu = nn.ReLU(inplace=True)
123+
self.downsample = downsample
124+
self.stride = stride
125+
126+
def forward(self, x):
127+
identity = x
128+
129+
out = self.conv1(x)
130+
out = self.bn1(out)
131+
out = self.relu(out)
132+
133+
out = self.conv2(out)
134+
out = self.bn2(out)
135+
out = self.relu(out)
136+
137+
out = self.conv3(out)
138+
out = self.bn3(out)
139+
140+
if self.downsample is not None:
141+
identity = self.downsample(x)
142+
143+
out += identity
144+
out = self.relu(out)
145+
146+
return out
147+
148+
149+
class RefineBlock(nn.Module):
150+
def __init__(self, in_channel):
151+
super(RefineBlock, self).__init__()
152+
self.c1 = nn.Conv2d(in_channel, 512, kernel_size=1, stride=1, padding=0, bias=False)
153+
self.c3_1 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=False)
154+
self.bn = nn.BatchNorm2d(512)
155+
self.relu = nn.ReLU(inplace=True)
156+
self.c3_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=False)
157+
158+
def forward(self, x):
159+
x1 = self.c1(x)
160+
x = self.c3_1(x1)
161+
x = self.bn(x)
162+
x = self.relu(x)
163+
x = self.c3_2(x)
164+
out = x1 + x
165+
166+
return out
167+
168+
169+
class FPA(nn.Module):
170+
def __init__(self, in_channel, out_channel):
171+
super(FPA, self).__init__()
172+
173+
self.c15_1 = nn.Conv2d(in_channel, out_channel, kernel_size=15, stride=1, padding=7, bias=False)
174+
self.c11_1 = nn.Conv2d(in_channel, out_channel, kernel_size=11, stride=1, padding=5, bias=False)
175+
self.c7_1 = nn.Conv2d(in_channel, out_channel, kernel_size=7, stride=1, padding=3, bias=False)
176+
self.c3_1 = nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=1, padding=1, bias=False)
177+
178+
self.c15_2 = nn.Conv2d(in_channel, out_channel, kernel_size=15, stride=1, padding=7, bias=False)
179+
self.c11_2 = nn.Conv2d(in_channel, out_channel, kernel_size=11, stride=1, padding=5, bias=False)
180+
self.c7_2 = nn.Conv2d(in_channel, out_channel, kernel_size=7, stride=1, padding=3, bias=False)
181+
self.c3_2 = nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=1, padding=1, bias=False)
182+
183+
self.avg_pool = nn.AdaptiveAvgPool2d(1)
184+
self.c1_gpb = nn.Conv2d(in_channel, out_channel, kernel_size=1, bias=False)
185+
186+
self.bn = nn.BatchNorm2d(out_channel)
187+
self.relu = nn.ReLU(inplace=True)
188+
189+
def forward(self, x):
190+
input_size = x.size()[2:]
191+
192+
x15_1 = self.c15_1(x)
193+
x15_1 = self.bn(x15_1)
194+
x15_1 = self.relu(x15_1)
195+
x15_2 = self.c15_2(x15_1)
196+
x15_2 = self.bn(x15_2)
197+
198+
x11_1 = self.c11_1(x)
199+
x11_1 = self.bn(x11_1)
200+
x11_1 = self.relu(x11_1)
201+
x11_2 = self.c11_2(x11_1)
202+
x11_2 = self.bn(x11_2)
203+
204+
x7_1 = self.c7_1(x)
205+
x7_1 = self.bn(x7_1)
206+
x7_1 = self.relu(x7_1)
207+
x7_2 = self.c7_2(x7_1)
208+
x7_2 = self.bn(x7_2)
209+
210+
x3_1 = self.c3_1(x)
211+
x3_1 = self.bn(x3_1)
212+
x3_1 = self.relu(x3_1)
213+
x3_2 = self.c3_2(x3_1)
214+
x3_2 = self.bn(x3_2)
215+
216+
x_gp = self.avg_pool(x)
217+
x_gp = self.c1_gpb(x_gp)
218+
x_gp = self.bn(x_gp)
219+
x_gp = F.upsample(x_gp, size=input_size, mode='bilinear')
220+
221+
out = torch.cat([x_gp, x15_2, x11_2, x7_2, x3_2], dim=1)
222+
return out
223+
224+
225+
class CAN(nn.Module):
226+
227+
def __init__(self, block, layers, num_classes=1000):
228+
super(CAN, self).__init__()
229+
# self.do = nn.Dropout(p=0.5)
230+
231+
self.inplanes = 64
232+
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
233+
bias=False)
234+
self.bn1 = nn.BatchNorm2d(64)
235+
self.relu = nn.ReLU(inplace=True)
236+
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
237+
self.layer1 = self._make_layer(block, 64, layers[0])
238+
self.rb1_1 = RefineBlock(256)
239+
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
240+
self.rb2_1 = RefineBlock(512)
241+
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
242+
self.rb3_1 = RefineBlock(1024)
243+
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
244+
self.rb4_1 = RefineBlock(2048)
245+
# self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
246+
# self.fc = nn.Linear(512 * block.expansion, num_classes)
247+
# only for >=res50
248+
249+
# self.fpa=FPA(2048,512)
250+
self.fpa = FPA(512, 512)
251+
self.rb4_2 = RefineBlock(512 * 5)
252+
253+
self.fuse43 = GAU(512, 512)
254+
# self.post_proc43 = conv3x3_bn(512*2,512)
255+
self.rb3_2 = RefineBlock(512)
256+
self.fuse32 = GAU(512, 512)
257+
self.rb2_2 = RefineBlock(512)
258+
# self.post_proc32 = conv3x3_bn(512)
259+
self.fuse21 = GAU(512, 512)
260+
self.rb1_2 = RefineBlock(512)
261+
# self.post_proc21 = conv3x3_bn(512)
262+
263+
self.class_conv = nn.Conv2d(512, num_classes, kernel_size=3, stride=1,
264+
padding=1, bias=True)
265+
266+
def _make_layer(self, block, planes, blocks, stride=1):
267+
downsample = None
268+
if stride != 1 or self.inplanes != planes * block.expansion:
269+
downsample = nn.Sequential(
270+
conv1x1(self.inplanes, planes * block.expansion, stride),
271+
nn.BatchNorm2d(planes * block.expansion),
272+
)
273+
274+
layers = []
275+
layers.append(block(self.inplanes, planes, stride, downsample))
276+
self.inplanes = planes * block.expansion
277+
for _ in range(1, blocks):
278+
layers.append(block(self.inplanes, planes))
279+
280+
return nn.Sequential(*layers)
281+
282+
def forward(self, x):
283+
ori_size = x.size()[2:]
284+
x = self.conv1(x)
285+
x = self.bn1(x)
286+
x = self.relu(x)
287+
x = self.maxpool(x)
288+
289+
l1 = self.layer1(x)
290+
l2 = self.layer2(l1)
291+
l3 = self.layer3(l2)
292+
l4 = self.layer4(l3)
293+
294+
l1 = self.rb1_1(l1)
295+
l2 = self.rb2_1(l2)
296+
l3 = self.rb3_1(l3)
297+
l4 = self.rb4_1(l4)
298+
299+
l4 = self.fpa(l4)
300+
l4 = self.rb4_2(l4)
301+
302+
x_fuse43 = self.fuse43(l4, l3)
303+
x_fuse43 = self.rb3_2(x_fuse43)
304+
305+
x_fuse32 = self.fuse32(x_fuse43, l2)
306+
x_fuse32 = self.rb2_2(x_fuse32)
307+
x_fuse21 = self.fuse21(x_fuse32, l1)
308+
x_fuse21 = self.rb1_2(x_fuse21)
309+
310+
# x_fuse21=self.do(x_fuse21)
311+
x = self.class_conv(x_fuse21)
312+
x = F.upsample(x, ori_size, mode='bilinear')
313+
314+
return x
315+
316+
317+
def CAN18(num_classes, pretrained=False, **kwargs):
318+
"""Constructs a ResNet-18 model.
319+
Args:
320+
pretrained (bool): If True, returns a model pre-trained on ImageNet
321+
"""
322+
model = CAN(BasicBlock, [2, 2, 2, 2], **kwargs, num_classes=num_classes)
323+
if pretrained:
324+
key = '18_imagenet'
325+
url = models_urls[key]
326+
model.load_state_dict(maybe_download(key, url), strict=False)
327+
return model
328+
329+
330+
def CAN34(num_classes, pretrained=False, **kwargs):
331+
"""Constructs a ResNet-34 model.
332+
Args:
333+
pretrained (bool): If True, returns a model pre-trained on ImageNet
334+
"""
335+
model = CAN(BasicBlock, [3, 4, 6, 3], **kwargs, num_classes=num_classes)
336+
if pretrained:
337+
key = '34_imagenet'
338+
url = models_urls[key]
339+
model.load_state_dict(maybe_download(key, url), strict=False)
340+
return model
341+
342+
343+
def CAN50(num_classes, pretrained=True, **kwargs):
344+
"""Constructs a ResNet-50 model.
345+
Args:
346+
pretrained (bool): If True, returns a model pre-trained on ImageNet
347+
"""
348+
model = CAN(Bottleneck, [3, 4, 6, 3], **kwargs, num_classes=num_classes)
349+
if pretrained:
350+
key = '50_imagenet'
351+
url = models_urls[key]
352+
model.load_state_dict(maybe_download(key, url), strict=False)
353+
print("load imagenet res50")
354+
return model
355+
356+
357+
def CAN101(num_classes, pretrained=False, **kwargs):
358+
"""Constructs a ResNet-101 model.
359+
Args:
360+
pretrained (bool): If True, returns a model pre-trained on ImageNet
361+
"""
362+
model = CAN(Bottleneck, [3, 4, 23, 3], **kwargs, num_classes=num_classes)
363+
if pretrained:
364+
key = '101_imagenet'
365+
url = models_urls[key]
366+
model.load_state_dict(maybe_download(key, url), strict=False)
367+
return model
368+
369+
370+
def CAN152(num_classes, pretrained=False, **kwargs):
371+
"""Constructs a ResNet-152 model.
372+
Args:
373+
pretrained (bool): If True, returns a model pre-trained on ImageNet
374+
"""
375+
model = CAN(Bottleneck, [3, 8, 36, 3], **kwargs, num_classes=num_classes)
376+
if pretrained:
377+
key = '152_imagenet'
378+
url = models_urls[key]
379+
model.load_state_dict(maybe_download(key, url), strict=False)
380+
return model

README.md

Whitespace-only changes.

0 commit comments

Comments
 (0)