rec_resnet_fpn.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307
  1. #copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. #Licensed under the Apache License, Version 2.0 (the "License");
  4. #you may not use this file except in compliance with the License.
  5. #You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. #Unless required by applicable law or agreed to in writing, software
  10. #distributed under the License is distributed on an "AS IS" BASIS,
  11. #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. #See the License for the specific language governing permissions and
  13. #limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. from paddle import nn, ParamAttr
  18. from paddle.nn import functional as F
  19. import paddle.fluid as fluid
  20. import paddle
  21. import numpy as np
  22. __all__ = ["ResNetFPN"]
  23. class ResNetFPN(nn.Layer):
  24. def __init__(self, in_channels=1, layers=50, **kwargs):
  25. super(ResNetFPN, self).__init__()
  26. supported_layers = {
  27. 18: {
  28. 'depth': [2, 2, 2, 2],
  29. 'block_class': BasicBlock
  30. },
  31. 34: {
  32. 'depth': [3, 4, 6, 3],
  33. 'block_class': BasicBlock
  34. },
  35. 50: {
  36. 'depth': [3, 4, 6, 3],
  37. 'block_class': BottleneckBlock
  38. },
  39. 101: {
  40. 'depth': [3, 4, 23, 3],
  41. 'block_class': BottleneckBlock
  42. },
  43. 152: {
  44. 'depth': [3, 8, 36, 3],
  45. 'block_class': BottleneckBlock
  46. }
  47. }
  48. stride_list = [(2, 2), (2, 2), (1, 1), (1, 1)]
  49. num_filters = [64, 128, 256, 512]
  50. self.depth = supported_layers[layers]['depth']
  51. self.F = []
  52. self.conv = ConvBNLayer(
  53. in_channels=in_channels,
  54. out_channels=64,
  55. kernel_size=7,
  56. stride=2,
  57. act="relu",
  58. name="conv1")
  59. self.block_list = []
  60. in_ch = 64
  61. if layers >= 50:
  62. for block in range(len(self.depth)):
  63. for i in range(self.depth[block]):
  64. if layers in [101, 152] and block == 2:
  65. if i == 0:
  66. conv_name = "res" + str(block + 2) + "a"
  67. else:
  68. conv_name = "res" + str(block + 2) + "b" + str(i)
  69. else:
  70. conv_name = "res" + str(block + 2) + chr(97 + i)
  71. block_list = self.add_sublayer(
  72. "bottleneckBlock_{}_{}".format(block, i),
  73. BottleneckBlock(
  74. in_channels=in_ch,
  75. out_channels=num_filters[block],
  76. stride=stride_list[block] if i == 0 else 1,
  77. name=conv_name))
  78. in_ch = num_filters[block] * 4
  79. self.block_list.append(block_list)
  80. self.F.append(block_list)
  81. else:
  82. for block in range(len(self.depth)):
  83. for i in range(self.depth[block]):
  84. conv_name = "res" + str(block + 2) + chr(97 + i)
  85. if i == 0 and block != 0:
  86. stride = (2, 1)
  87. else:
  88. stride = (1, 1)
  89. basic_block = self.add_sublayer(
  90. conv_name,
  91. BasicBlock(
  92. in_channels=in_ch,
  93. out_channels=num_filters[block],
  94. stride=stride_list[block] if i == 0 else 1,
  95. is_first=block == i == 0,
  96. name=conv_name))
  97. in_ch = basic_block.out_channels
  98. self.block_list.append(basic_block)
  99. out_ch_list = [in_ch // 4, in_ch // 2, in_ch]
  100. self.base_block = []
  101. self.conv_trans = []
  102. self.bn_block = []
  103. for i in [-2, -3]:
  104. in_channels = out_ch_list[i + 1] + out_ch_list[i]
  105. self.base_block.append(
  106. self.add_sublayer(
  107. "F_{}_base_block_0".format(i),
  108. nn.Conv2D(
  109. in_channels=in_channels,
  110. out_channels=out_ch_list[i],
  111. kernel_size=1,
  112. weight_attr=ParamAttr(trainable=True),
  113. bias_attr=ParamAttr(trainable=True))))
  114. self.base_block.append(
  115. self.add_sublayer(
  116. "F_{}_base_block_1".format(i),
  117. nn.Conv2D(
  118. in_channels=out_ch_list[i],
  119. out_channels=out_ch_list[i],
  120. kernel_size=3,
  121. padding=1,
  122. weight_attr=ParamAttr(trainable=True),
  123. bias_attr=ParamAttr(trainable=True))))
  124. self.base_block.append(
  125. self.add_sublayer(
  126. "F_{}_base_block_2".format(i),
  127. nn.BatchNorm(
  128. num_channels=out_ch_list[i],
  129. act="relu",
  130. param_attr=ParamAttr(trainable=True),
  131. bias_attr=ParamAttr(trainable=True))))
  132. self.base_block.append(
  133. self.add_sublayer(
  134. "F_{}_base_block_3".format(i),
  135. nn.Conv2D(
  136. in_channels=out_ch_list[i],
  137. out_channels=512,
  138. kernel_size=1,
  139. bias_attr=ParamAttr(trainable=True),
  140. weight_attr=ParamAttr(trainable=True))))
  141. self.out_channels = 512
  142. def __call__(self, x):
  143. x = self.conv(x)
  144. fpn_list = []
  145. F = []
  146. for i in range(len(self.depth)):
  147. fpn_list.append(np.sum(self.depth[:i + 1]))
  148. for i, block in enumerate(self.block_list):
  149. x = block(x)
  150. for number in fpn_list:
  151. if i + 1 == number:
  152. F.append(x)
  153. base = F[-1]
  154. j = 0
  155. for i, block in enumerate(self.base_block):
  156. if i % 3 == 0 and i < 6:
  157. j = j + 1
  158. b, c, w, h = F[-j - 1].shape
  159. if [w, h] == list(base.shape[2:]):
  160. base = base
  161. else:
  162. base = self.conv_trans[j - 1](base)
  163. base = self.bn_block[j - 1](base)
  164. base = paddle.concat([base, F[-j - 1]], axis=1)
  165. base = block(base)
  166. return base
  167. class ConvBNLayer(nn.Layer):
  168. def __init__(self,
  169. in_channels,
  170. out_channels,
  171. kernel_size,
  172. stride=1,
  173. groups=1,
  174. act=None,
  175. name=None):
  176. super(ConvBNLayer, self).__init__()
  177. self.conv = nn.Conv2D(
  178. in_channels=in_channels,
  179. out_channels=out_channels,
  180. kernel_size=2 if stride == (1, 1) else kernel_size,
  181. dilation=2 if stride == (1, 1) else 1,
  182. stride=stride,
  183. padding=(kernel_size - 1) // 2,
  184. groups=groups,
  185. weight_attr=ParamAttr(name=name + '.conv2d.output.1.w_0'),
  186. bias_attr=False, )
  187. if name == "conv1":
  188. bn_name = "bn_" + name
  189. else:
  190. bn_name = "bn" + name[3:]
  191. self.bn = nn.BatchNorm(
  192. num_channels=out_channels,
  193. act=act,
  194. param_attr=ParamAttr(name=name + '.output.1.w_0'),
  195. bias_attr=ParamAttr(name=name + '.output.1.b_0'),
  196. moving_mean_name=bn_name + "_mean",
  197. moving_variance_name=bn_name + "_variance")
  198. def __call__(self, x):
  199. x = self.conv(x)
  200. x = self.bn(x)
  201. return x
  202. class ShortCut(nn.Layer):
  203. def __init__(self, in_channels, out_channels, stride, name, is_first=False):
  204. super(ShortCut, self).__init__()
  205. self.use_conv = True
  206. if in_channels != out_channels or stride != 1 or is_first == True:
  207. if stride == (1, 1):
  208. self.conv = ConvBNLayer(
  209. in_channels, out_channels, 1, 1, name=name)
  210. else: # stride==(2,2)
  211. self.conv = ConvBNLayer(
  212. in_channels, out_channels, 1, stride, name=name)
  213. else:
  214. self.use_conv = False
  215. def forward(self, x):
  216. if self.use_conv:
  217. x = self.conv(x)
  218. return x
  219. class BottleneckBlock(nn.Layer):
  220. def __init__(self, in_channels, out_channels, stride, name):
  221. super(BottleneckBlock, self).__init__()
  222. self.conv0 = ConvBNLayer(
  223. in_channels=in_channels,
  224. out_channels=out_channels,
  225. kernel_size=1,
  226. act='relu',
  227. name=name + "_branch2a")
  228. self.conv1 = ConvBNLayer(
  229. in_channels=out_channels,
  230. out_channels=out_channels,
  231. kernel_size=3,
  232. stride=stride,
  233. act='relu',
  234. name=name + "_branch2b")
  235. self.conv2 = ConvBNLayer(
  236. in_channels=out_channels,
  237. out_channels=out_channels * 4,
  238. kernel_size=1,
  239. act=None,
  240. name=name + "_branch2c")
  241. self.short = ShortCut(
  242. in_channels=in_channels,
  243. out_channels=out_channels * 4,
  244. stride=stride,
  245. is_first=False,
  246. name=name + "_branch1")
  247. self.out_channels = out_channels * 4
  248. def forward(self, x):
  249. y = self.conv0(x)
  250. y = self.conv1(y)
  251. y = self.conv2(y)
  252. y = y + self.short(x)
  253. y = F.relu(y)
  254. return y
  255. class BasicBlock(nn.Layer):
  256. def __init__(self, in_channels, out_channels, stride, name, is_first):
  257. super(BasicBlock, self).__init__()
  258. self.conv0 = ConvBNLayer(
  259. in_channels=in_channels,
  260. out_channels=out_channels,
  261. kernel_size=3,
  262. act='relu',
  263. stride=stride,
  264. name=name + "_branch2a")
  265. self.conv1 = ConvBNLayer(
  266. in_channels=out_channels,
  267. out_channels=out_channels,
  268. kernel_size=3,
  269. act=None,
  270. name=name + "_branch2b")
  271. self.short = ShortCut(
  272. in_channels=in_channels,
  273. out_channels=out_channels,
  274. stride=stride,
  275. is_first=is_first,
  276. name=name + "_branch1")
  277. self.out_channels = out_channels
  278. def forward(self, x):
  279. y = self.conv0(x)
  280. y = self.conv1(y)
  281. y = y + self.short(x)
  282. return F.relu(y)