nlayers.py 5.9 KB
Newer Older
Q
qingqing01 已提交
1
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
L
lijianshe02 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

L
lijianshe02 已提交
15
import paddle
L
LielinJiang 已提交
16 17
import functools
import numpy as np
L
fix nan  
LielinJiang 已提交
18
import paddle.nn as nn
L
lijianshe02 已提交
19 20
import paddle.nn.functional as F

L
lijianshe02 已提交
21
from ...modules.nn import Spectralnorm
L
LielinJiang 已提交
22 23 24 25 26 27
from ...modules.norm import build_norm_layer

from .builder import DISCRIMINATORS


@DISCRIMINATORS.register()
28
class NLayerDiscriminator(nn.Layer):
L
LielinJiang 已提交
29
    """Defines a PatchGAN discriminator"""
30
    def __init__(self, input_nc, ndf=64, n_layers=3, norm_type='instance', use_sigmoid=False):
L
LielinJiang 已提交
31 32
        """Construct a PatchGAN discriminator

L
lijianshe02 已提交
33
        Parameters:
L
LielinJiang 已提交
34 35 36
            input_nc (int)  -- the number of channels in input images
            ndf (int)       -- the number of filters in the last conv layer
            n_layers (int)  -- the number of conv layers in the discriminator
37
            norm_type (str)      -- normalization layer type
38
            use_sigmoid (bool)   -- whether use sigmoid at last
L
LielinJiang 已提交
39 40 41
        """
        super(NLayerDiscriminator, self).__init__()
        norm_layer = build_norm_layer(norm_type)
L
lijianshe02 已提交
42 43 44
        if type(
                norm_layer
        ) == functools.partial:  # no need to use bias as BatchNorm2d has affine parameters
L
LielinJiang 已提交
45
            use_bias = norm_layer.func == nn.InstanceNorm2D
L
LielinJiang 已提交
46
        else:
L
LielinJiang 已提交
47
            use_bias = norm_layer == nn.InstanceNorm2D
L
fix nan  
LielinJiang 已提交
48

L
LielinJiang 已提交
49 50
        kw = 4
        padw = 1
51 52 53 54

        if norm_type == 'spectral':
            sequence = [
                Spectralnorm(
L
LielinJiang 已提交
55
                    nn.Conv2D(input_nc,
L
lijianshe02 已提交
56 57 58 59
                              ndf,
                              kernel_size=kw,
                              stride=2,
                              padding=padw)),
60 61 62 63
                nn.LeakyReLU(0.01)
            ]
        else:
            sequence = [
L
LielinJiang 已提交
64
                nn.Conv2D(input_nc,
L
lijianshe02 已提交
65 66 67 68 69
                          ndf,
                          kernel_size=kw,
                          stride=2,
                          padding=padw,
                          bias_attr=use_bias),
70 71
                nn.LeakyReLU(0.2)
            ]
L
LielinJiang 已提交
72 73
        nf_mult = 1
        nf_mult_prev = 1
L
lijianshe02 已提交
74
        for n in range(1, n_layers):  # gradually increase the number of filters
L
LielinJiang 已提交
75
            nf_mult_prev = nf_mult
L
fix nan  
LielinJiang 已提交
76
            nf_mult = min(2**n, 8)
77 78 79
            if norm_type == 'spectral':
                sequence += [
                    Spectralnorm(
L
LielinJiang 已提交
80
                        nn.Conv2D(ndf * nf_mult_prev,
L
lijianshe02 已提交
81 82 83 84
                                  ndf * nf_mult,
                                  kernel_size=kw,
                                  stride=2,
                                  padding=padw)),
85 86 87 88
                    nn.LeakyReLU(0.01)
                ]
            else:
                sequence += [
L
LielinJiang 已提交
89
                    nn.Conv2D(ndf * nf_mult_prev,
L
lijianshe02 已提交
90 91 92 93 94
                              ndf * nf_mult,
                              kernel_size=kw,
                              stride=2,
                              padding=padw,
                              bias_attr=use_bias),
95 96 97 98 99 100 101
                    norm_layer(ndf * nf_mult),
                    nn.LeakyReLU(0.2)
                ]

        nf_mult_prev = nf_mult
        nf_mult = min(2**n_layers, 8)
        if norm_type == 'spectral':
L
LielinJiang 已提交
102
            sequence += [
L
lijianshe02 已提交
103
                Spectralnorm(
L
LielinJiang 已提交
104
                    nn.Conv2D(ndf * nf_mult_prev,
L
lijianshe02 已提交
105 106 107 108
                              ndf * nf_mult,
                              kernel_size=kw,
                              stride=1,
                              padding=padw)),
L
lijianshe02 已提交
109
                nn.LeakyReLU(0.01)
L
LielinJiang 已提交
110
            ]
111 112
        else:
            sequence += [
L
LielinJiang 已提交
113
                nn.Conv2D(ndf * nf_mult_prev,
L
lijianshe02 已提交
114 115 116 117 118
                          ndf * nf_mult,
                          kernel_size=kw,
                          stride=1,
                          padding=padw,
                          bias_attr=use_bias),
119 120 121
                norm_layer(ndf * nf_mult),
                nn.LeakyReLU(0.2)
            ]
L
LielinJiang 已提交
122

123 124 125
        if norm_type == 'spectral':
            sequence += [
                Spectralnorm(
L
LielinJiang 已提交
126
                    nn.Conv2D(ndf * nf_mult,
L
lijianshe02 已提交
127 128 129 130 131
                              1,
                              kernel_size=kw,
                              stride=1,
                              padding=padw,
                              bias_attr=False))
132 133 134
            ]  # output 1 channel prediction map
        else:
            sequence += [
L
LielinJiang 已提交
135
                nn.Conv2D(ndf * nf_mult,
L
lijianshe02 已提交
136 137 138
                          1,
                          kernel_size=kw,
                          stride=1,
L
LielinJiang 已提交
139
                          padding=padw)
140 141
            ]  # output 1 channel prediction map

L
LielinJiang 已提交
142
        self.model = nn.Sequential(*sequence)
143
        self.final_act = F.sigmoid if use_sigmoid else (lambda x:x)
L
LielinJiang 已提交
144 145 146

    def forward(self, input):
        """Standard forward."""
147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
        return self.final_act(self.model(input))


@DISCRIMINATORS.register()
class NLayerDiscriminatorWithClassification(NLayerDiscriminator):
    def __init__(self, input_nc, n_class=10, **kwargs):
        input_nc = input_nc + n_class
        super(NLayerDiscriminatorWithClassification, self).__init__(input_nc, **kwargs)

        self.n_class = n_class
    
    def forward(self, x, class_id):
        if self.n_class > 0:
            class_id = (class_id % self.n_class).detach()
            class_id = F.one_hot(class_id, self.n_class).astype('float32')
            class_id = class_id.reshape([x.shape[0], -1, 1, 1])
            class_id = class_id.tile([1,1,*x.shape[2:]])
            x = paddle.concat([x, class_id], 1)
        
        return super(NLayerDiscriminatorWithClassification, self).forward(x)