mpr.py 19.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import numpy as np

W
wangna11BD 已提交
17
import paddle
18
import paddle.nn as nn
W
wangna11BD 已提交
19
import paddle.nn.functional as F
20

W
wangna11BD 已提交
21
from ...modules.init import kaiming_normal_, constant_
22 23 24 25 26 27

from .builder import GENERATORS


##########################################################################
def conv(in_channels, out_channels, kernel_size, bias_attr=False, stride=1):
L
LielinJiang 已提交
28 29 30 31 32 33
    return nn.Conv2D(in_channels,
                     out_channels,
                     kernel_size,
                     padding=(kernel_size // 2),
                     bias_attr=bias_attr,
                     stride=stride)
34 35 36 37 38 39 40 41 42 43 44


##########################################################################
## Channel Attention Layer
class CALayer(nn.Layer):
    def __init__(self, channel, reduction=16, bias_attr=False):
        super(CALayer, self).__init__()
        # global average pooling: feature --> point
        self.avg_pool = nn.AdaptiveAvgPool2D(1)
        # feature channel downscale and upscale --> channel weight
        self.conv_du = nn.Sequential(
L
LielinJiang 已提交
45 46 47 48 49 50 51 52 53 54
            nn.Conv2D(channel,
                      channel // reduction,
                      1,
                      padding=0,
                      bias_attr=bias_attr), nn.ReLU(),
            nn.Conv2D(channel // reduction,
                      channel,
                      1,
                      padding=0,
                      bias_attr=bias_attr), nn.Sigmoid())
55 56 57 58 59 60 61 62 63 64 65 66 67

    def forward(self, x):
        y = self.avg_pool(x)
        y = self.conv_du(y)
        return x * y


##########################################################################
## Channel Attention Block (CAB)
class CAB(nn.Layer):
    def __init__(self, n_feat, kernel_size, reduction, bias_attr, act):
        super(CAB, self).__init__()
        modules_body = []
L
LielinJiang 已提交
68 69
        modules_body.append(
            conv(n_feat, n_feat, kernel_size, bias_attr=bias_attr))
70
        modules_body.append(act)
L
LielinJiang 已提交
71 72
        modules_body.append(
            conv(n_feat, n_feat, kernel_size, bias_attr=bias_attr))
73 74 75 76 77 78 79 80 81 82 83 84

        self.CA = CALayer(n_feat, reduction, bias_attr=bias_attr)
        self.body = nn.Sequential(*modules_body)

    def forward(self, x):
        res = self.body(x)
        res = self.CA(res)
        res += x
        return res


##########################################################################
L
LielinJiang 已提交
85
##---------- Resizing Modules ----------
86
class DownSample(nn.Layer):
L
LielinJiang 已提交
87
    def __init__(self, in_channels, s_factor):
88
        super(DownSample, self).__init__()
L
LielinJiang 已提交
89 90 91 92 93 94 95 96
        self.down = nn.Sequential(
            nn.Upsample(scale_factor=0.5, mode='bilinear', align_corners=False),
            nn.Conv2D(in_channels,
                      in_channels + s_factor,
                      1,
                      stride=1,
                      padding=0,
                      bias_attr=False))
97 98 99 100 101

    def forward(self, x):
        x = self.down(x)
        return x

L
LielinJiang 已提交
102

103
class UpSample(nn.Layer):
L
LielinJiang 已提交
104
    def __init__(self, in_channels, s_factor):
105
        super(UpSample, self).__init__()
L
LielinJiang 已提交
106 107 108 109 110 111 112 113
        self.up = nn.Sequential(
            nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False),
            nn.Conv2D(in_channels + s_factor,
                      in_channels,
                      1,
                      stride=1,
                      padding=0,
                      bias_attr=False))
114 115 116 117 118 119 120

    def forward(self, x):
        x = self.up(x)
        return x


class SkipUpSample(nn.Layer):
L
LielinJiang 已提交
121
    def __init__(self, in_channels, s_factor):
122
        super(SkipUpSample, self).__init__()
L
LielinJiang 已提交
123 124 125 126 127 128 129 130
        self.up = nn.Sequential(
            nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False),
            nn.Conv2D(in_channels + s_factor,
                      in_channels,
                      1,
                      stride=1,
                      padding=0,
                      bias_attr=False))
131 132 133 134 135 136

    def forward(self, x, y):
        x = self.up(x)
        x = x + y
        return x

L
LielinJiang 已提交
137

138 139 140
##########################################################################
## U-Net
class Encoder(nn.Layer):
L
LielinJiang 已提交
141 142
    def __init__(self, n_feat, kernel_size, reduction, act, bias_attr,
                 scale_unetfeats, csff):
143 144
        super(Encoder, self).__init__()

L
LielinJiang 已提交
145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
        self.encoder_level1 = [
            CAB(n_feat, kernel_size, reduction, bias_attr=bias_attr, act=act)
            for _ in range(2)
        ]
        self.encoder_level2 = [
            CAB(n_feat + scale_unetfeats,
                kernel_size,
                reduction,
                bias_attr=bias_attr,
                act=act) for _ in range(2)
        ]
        self.encoder_level3 = [
            CAB(n_feat + (scale_unetfeats * 2),
                kernel_size,
                reduction,
                bias_attr=bias_attr,
                act=act) for _ in range(2)
        ]
163 164 165 166 167

        self.encoder_level1 = nn.Sequential(*self.encoder_level1)
        self.encoder_level2 = nn.Sequential(*self.encoder_level2)
        self.encoder_level3 = nn.Sequential(*self.encoder_level3)

L
LielinJiang 已提交
168 169
        self.down12 = DownSample(n_feat, scale_unetfeats)
        self.down23 = DownSample(n_feat + scale_unetfeats, scale_unetfeats)
170 171 172

        # Cross Stage Feature Fusion (CSFF)
        if csff:
L
LielinJiang 已提交
173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
            self.csff_enc1 = nn.Conv2D(n_feat,
                                       n_feat,
                                       kernel_size=1,
                                       bias_attr=bias_attr)
            self.csff_enc2 = nn.Conv2D(n_feat + scale_unetfeats,
                                       n_feat + scale_unetfeats,
                                       kernel_size=1,
                                       bias_attr=bias_attr)
            self.csff_enc3 = nn.Conv2D(n_feat + (scale_unetfeats * 2),
                                       n_feat + (scale_unetfeats * 2),
                                       kernel_size=1,
                                       bias_attr=bias_attr)

            self.csff_dec1 = nn.Conv2D(n_feat,
                                       n_feat,
                                       kernel_size=1,
                                       bias_attr=bias_attr)
            self.csff_dec2 = nn.Conv2D(n_feat + scale_unetfeats,
                                       n_feat + scale_unetfeats,
                                       kernel_size=1,
                                       bias_attr=bias_attr)
            self.csff_dec3 = nn.Conv2D(n_feat + (scale_unetfeats * 2),
                                       n_feat + (scale_unetfeats * 2),
                                       kernel_size=1,
                                       bias_attr=bias_attr)
198 199 200 201

    def forward(self, x, encoder_outs=None, decoder_outs=None):
        enc1 = self.encoder_level1(x)
        if (encoder_outs is not None) and (decoder_outs is not None):
L
LielinJiang 已提交
202 203
            enc1 = enc1 + self.csff_enc1(encoder_outs[0]) + self.csff_dec1(
                decoder_outs[0])
204 205 206 207 208

        x = self.down12(enc1)

        enc2 = self.encoder_level2(x)
        if (encoder_outs is not None) and (decoder_outs is not None):
L
LielinJiang 已提交
209 210
            enc2 = enc2 + self.csff_enc2(encoder_outs[1]) + self.csff_dec2(
                decoder_outs[1])
211 212 213 214 215

        x = self.down23(enc2)

        enc3 = self.encoder_level3(x)
        if (encoder_outs is not None) and (decoder_outs is not None):
L
LielinJiang 已提交
216 217 218
            enc3 = enc3 + self.csff_enc3(encoder_outs[2]) + self.csff_dec3(
                decoder_outs[2])

219 220 221 222
        return [enc1, enc2, enc3]


class Decoder(nn.Layer):
L
LielinJiang 已提交
223 224
    def __init__(self, n_feat, kernel_size, reduction, act, bias_attr,
                 scale_unetfeats):
225 226
        super(Decoder, self).__init__()

L
LielinJiang 已提交
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
        self.decoder_level1 = [
            CAB(n_feat, kernel_size, reduction, bias_attr=bias_attr, act=act)
            for _ in range(2)
        ]
        self.decoder_level2 = [
            CAB(n_feat + scale_unetfeats,
                kernel_size,
                reduction,
                bias_attr=bias_attr,
                act=act) for _ in range(2)
        ]
        self.decoder_level3 = [
            CAB(n_feat + (scale_unetfeats * 2),
                kernel_size,
                reduction,
                bias_attr=bias_attr,
                act=act) for _ in range(2)
        ]
245 246 247 248 249

        self.decoder_level1 = nn.Sequential(*self.decoder_level1)
        self.decoder_level2 = nn.Sequential(*self.decoder_level2)
        self.decoder_level3 = nn.Sequential(*self.decoder_level3)

L
LielinJiang 已提交
250 251 252 253 254 255 256 257 258 259
        self.skip_attn1 = CAB(n_feat,
                              kernel_size,
                              reduction,
                              bias_attr=bias_attr,
                              act=act)
        self.skip_attn2 = CAB(n_feat + scale_unetfeats,
                              kernel_size,
                              reduction,
                              bias_attr=bias_attr,
                              act=act)
260

L
LielinJiang 已提交
261 262
        self.up21 = SkipUpSample(n_feat, scale_unetfeats)
        self.up32 = SkipUpSample(n_feat + scale_unetfeats, scale_unetfeats)
263 264 265 266 267 268 269 270 271 272 273

    def forward(self, outs):
        enc1, enc2, enc3 = outs
        dec3 = self.decoder_level3(enc3)

        x = self.up32(dec3, self.skip_attn2(enc2))
        dec2 = self.decoder_level2(x)

        x = self.up21(dec2, self.skip_attn1(enc1))
        dec1 = self.decoder_level1(x)

L
LielinJiang 已提交
274
        return [dec1, dec2, dec3]
275 276 277 278 279 280 281 282


##########################################################################
## Original Resolution Block (ORB)
class ORB(nn.Layer):
    def __init__(self, n_feat, kernel_size, reduction, act, bias_attr, num_cab):
        super(ORB, self).__init__()
        modules_body = []
L
LielinJiang 已提交
283 284 285 286
        modules_body = [
            CAB(n_feat, kernel_size, reduction, bias_attr=bias_attr, act=act)
            for _ in range(num_cab)
        ]
287 288 289 290 291 292 293
        modules_body.append(conv(n_feat, n_feat, kernel_size))
        self.body = nn.Sequential(*modules_body)

    def forward(self, x):
        res = self.body(x)
        res += x
        return res
L
LielinJiang 已提交
294

295 296 297

##########################################################################
class ORSNet(nn.Layer):
L
LielinJiang 已提交
298 299
    def __init__(self, n_feat, scale_orsnetfeats, kernel_size, reduction, act,
                 bias_attr, scale_unetfeats, num_cab):
300 301
        super(ORSNet, self).__init__()

L
LielinJiang 已提交
302 303 304 305 306 307
        self.orb1 = ORB(n_feat + scale_orsnetfeats, kernel_size, reduction, act,
                        bias_attr, num_cab)
        self.orb2 = ORB(n_feat + scale_orsnetfeats, kernel_size, reduction, act,
                        bias_attr, num_cab)
        self.orb3 = ORB(n_feat + scale_orsnetfeats, kernel_size, reduction, act,
                        bias_attr, num_cab)
308 309 310 311

        self.up_enc1 = UpSample(n_feat, scale_unetfeats)
        self.up_dec1 = UpSample(n_feat, scale_unetfeats)

L
LielinJiang 已提交
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
        self.up_enc2 = nn.Sequential(
            UpSample(n_feat + scale_unetfeats, scale_unetfeats),
            UpSample(n_feat, scale_unetfeats))
        self.up_dec2 = nn.Sequential(
            UpSample(n_feat + scale_unetfeats, scale_unetfeats),
            UpSample(n_feat, scale_unetfeats))

        self.conv_enc1 = nn.Conv2D(n_feat,
                                   n_feat + scale_orsnetfeats,
                                   kernel_size=1,
                                   bias_attr=bias_attr)
        self.conv_enc2 = nn.Conv2D(n_feat,
                                   n_feat + scale_orsnetfeats,
                                   kernel_size=1,
                                   bias_attr=bias_attr)
        self.conv_enc3 = nn.Conv2D(n_feat,
                                   n_feat + scale_orsnetfeats,
                                   kernel_size=1,
                                   bias_attr=bias_attr)

        self.conv_dec1 = nn.Conv2D(n_feat,
                                   n_feat + scale_orsnetfeats,
                                   kernel_size=1,
                                   bias_attr=bias_attr)
        self.conv_dec2 = nn.Conv2D(n_feat,
                                   n_feat + scale_orsnetfeats,
                                   kernel_size=1,
                                   bias_attr=bias_attr)
        self.conv_dec3 = nn.Conv2D(n_feat,
                                   n_feat + scale_orsnetfeats,
                                   kernel_size=1,
                                   bias_attr=bias_attr)
344 345 346

    def forward(self, x, encoder_outs, decoder_outs):
        x = self.orb1(x)
L
LielinJiang 已提交
347 348
        x = x + self.conv_enc1(encoder_outs[0]) + self.conv_dec1(
            decoder_outs[0])
349 350

        x = self.orb2(x)
L
LielinJiang 已提交
351 352
        x = x + self.conv_enc2(self.up_enc1(encoder_outs[1])) + self.conv_dec2(
            self.up_dec1(decoder_outs[1]))
353 354

        x = self.orb3(x)
L
LielinJiang 已提交
355 356
        x = x + self.conv_enc3(self.up_enc2(encoder_outs[2])) + self.conv_dec3(
            self.up_dec2(decoder_outs[2]))
357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373

        return x


##########################################################################
## Supervised Attention Module
class SAM(nn.Layer):
    def __init__(self, n_feat, kernel_size, bias_attr):
        super(SAM, self).__init__()
        self.conv1 = conv(n_feat, n_feat, kernel_size, bias_attr=bias_attr)
        self.conv2 = conv(n_feat, 3, kernel_size, bias_attr=bias_attr)
        self.conv3 = conv(3, n_feat, kernel_size, bias_attr=bias_attr)

    def forward(self, x, x_img):
        x1 = self.conv1(x)
        img = self.conv2(x) + x_img
        x2 = F.sigmoid(self.conv3(img))
L
LielinJiang 已提交
374 375
        x1 = x1 * x2
        x1 = x1 + x
376 377 378 379 380
        return x1, img


@GENERATORS.register()
class MPRNet(nn.Layer):
L
LielinJiang 已提交
381 382 383 384 385 386 387 388 389 390
    def __init__(self,
                 in_c=3,
                 out_c=3,
                 n_feat=96,
                 scale_unetfeats=48,
                 scale_orsnetfeats=32,
                 num_cab=8,
                 kernel_size=3,
                 reduction=4,
                 bias_attr=False):
391
        super(MPRNet, self).__init__()
L
LielinJiang 已提交
392 393 394 395 396 397 398 399 400 401
        act = nn.PReLU()
        self.shallow_feat1 = nn.Sequential(
            conv(in_c, n_feat, kernel_size, bias_attr=bias_attr),
            CAB(n_feat, kernel_size, reduction, bias_attr=bias_attr, act=act))
        self.shallow_feat2 = nn.Sequential(
            conv(in_c, n_feat, kernel_size, bias_attr=bias_attr),
            CAB(n_feat, kernel_size, reduction, bias_attr=bias_attr, act=act))
        self.shallow_feat3 = nn.Sequential(
            conv(in_c, n_feat, kernel_size, bias_attr=bias_attr),
            CAB(n_feat, kernel_size, reduction, bias_attr=bias_attr, act=act))
402 403

        # Cross Stage Feature Fusion (CSFF)
L
LielinJiang 已提交
404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
        self.stage1_encoder = Encoder(n_feat,
                                      kernel_size,
                                      reduction,
                                      act,
                                      bias_attr,
                                      scale_unetfeats,
                                      csff=False)
        self.stage1_decoder = Decoder(n_feat, kernel_size, reduction, act,
                                      bias_attr, scale_unetfeats)

        self.stage2_encoder = Encoder(n_feat,
                                      kernel_size,
                                      reduction,
                                      act,
                                      bias_attr,
                                      scale_unetfeats,
                                      csff=True)
        self.stage2_decoder = Decoder(n_feat, kernel_size, reduction, act,
                                      bias_attr, scale_unetfeats)

        self.stage3_orsnet = ORSNet(n_feat, scale_orsnetfeats, kernel_size,
                                    reduction, act, bias_attr, scale_unetfeats,
                                    num_cab)
427 428 429

        self.sam12 = SAM(n_feat, kernel_size=1, bias_attr=bias_attr)
        self.sam23 = SAM(n_feat, kernel_size=1, bias_attr=bias_attr)
L
LielinJiang 已提交
430 431 432 433 434 435 436 437 438 439 440 441 442

        self.concat12 = conv(n_feat * 2,
                             n_feat,
                             kernel_size,
                             bias_attr=bias_attr)
        self.concat23 = conv(n_feat * 2,
                             n_feat + scale_orsnetfeats,
                             kernel_size,
                             bias_attr=bias_attr)
        self.tail = conv(n_feat + scale_orsnetfeats,
                         out_c,
                         kernel_size,
                         bias_attr=bias_attr)
443 444 445 446 447 448 449 450 451

    def forward(self, x3_img):
        # Original-resolution Image for Stage 3
        H = x3_img.shape[2]
        W = x3_img.shape[3]

        # Multi-Patch Hierarchy: Split Image into four non-overlapping patches

        # Two Patches for Stage 2
L
LielinJiang 已提交
452 453
        x2top_img = x3_img[:, :, 0:int(H / 2), :]
        x2bot_img = x3_img[:, :, int(H / 2):H, :]
454 455

        # Four Patches for Stage 1
L
LielinJiang 已提交
456 457 458 459
        x1ltop_img = x2top_img[:, :, :, 0:int(W / 2)]
        x1rtop_img = x2top_img[:, :, :, int(W / 2):W]
        x1lbot_img = x2bot_img[:, :, :, 0:int(W / 2)]
        x1rbot_img = x2bot_img[:, :, :, int(W / 2):W]
460 461 462 463 464 465 466 467 468

        ##-------------------------------------------
        ##-------------- Stage 1---------------------
        ##-------------------------------------------
        ## Compute Shallow Features
        x1ltop = self.shallow_feat1(x1ltop_img)
        x1rtop = self.shallow_feat1(x1rtop_img)
        x1lbot = self.shallow_feat1(x1lbot_img)
        x1rbot = self.shallow_feat1(x1rbot_img)
L
LielinJiang 已提交
469

470 471 472 473 474
        ## Process features of all 4 patches with Encoder of Stage 1
        feat1_ltop = self.stage1_encoder(x1ltop)
        feat1_rtop = self.stage1_encoder(x1rtop)
        feat1_lbot = self.stage1_encoder(x1lbot)
        feat1_rbot = self.stage1_encoder(x1rbot)
L
LielinJiang 已提交
475

476
        ## Concat deep features
L
LielinJiang 已提交
477 478 479 480 481 482 483
        feat1_top = [
            paddle.concat((k, v), 3) for k, v in zip(feat1_ltop, feat1_rtop)
        ]
        feat1_bot = [
            paddle.concat((k, v), 3) for k, v in zip(feat1_lbot, feat1_rbot)
        ]

484 485 486 487 488 489 490 491 492
        ## Pass features through Decoder of Stage 1
        res1_top = self.stage1_decoder(feat1_top)
        res1_bot = self.stage1_decoder(feat1_bot)

        ## Apply Supervised Attention Module (SAM)
        x2top_samfeats, stage1_img_top = self.sam12(res1_top[0], x2top_img)
        x2bot_samfeats, stage1_img_bot = self.sam12(res1_bot[0], x2bot_img)

        ## Output image at Stage 1
L
LielinJiang 已提交
493
        stage1_img = paddle.concat([stage1_img_top, stage1_img_bot], 2)
494 495 496 497
        ##-------------------------------------------
        ##-------------- Stage 2---------------------
        ##-------------------------------------------
        ## Compute Shallow Features
L
LielinJiang 已提交
498 499
        x2top = self.shallow_feat2(x2top_img)
        x2bot = self.shallow_feat2(x2bot_img)
500 501 502 503 504 505 506 507 508 509

        ## Concatenate SAM features of Stage 1 with shallow features of Stage 2
        x2top_cat = self.concat12(paddle.concat([x2top, x2top_samfeats], 1))
        x2bot_cat = self.concat12(paddle.concat([x2bot, x2bot_samfeats], 1))

        ## Process features of both patches with Encoder of Stage 2
        feat2_top = self.stage2_encoder(x2top_cat, feat1_top, res1_top)
        feat2_bot = self.stage2_encoder(x2bot_cat, feat1_bot, res1_bot)

        ## Concat deep features
L
LielinJiang 已提交
510
        feat2 = [paddle.concat((k, v), 2) for k, v in zip(feat2_top, feat2_bot)]
511 512 513 514 515 516 517 518 519 520 521

        ## Pass features through Decoder of Stage 2
        res2 = self.stage2_decoder(feat2)

        ## Apply SAM
        x3_samfeats, stage2_img = self.sam23(res2[0], x3_img)

        ##-------------------------------------------
        ##-------------- Stage 3---------------------
        ##-------------------------------------------
        ## Compute Shallow Features
L
LielinJiang 已提交
522
        x3 = self.shallow_feat3(x3_img)
523 524 525

        ## Concatenate SAM features of Stage 2 with shallow features of Stage 3
        x3_cat = self.concat23(paddle.concat([x3, x3_samfeats], 1))
L
LielinJiang 已提交
526

527 528 529 530
        x3_cat = self.stage3_orsnet(x3_cat, feat2, res2)

        stage3_img = self.tail(x3_cat)

L
LielinJiang 已提交
531
        return [stage3_img + x3_img, stage2_img, stage1_img]