构建模型时使用了WeightNormParamAttr,当第二次调用到时,shape不一致
Created by: ManWingloeng
这个是我定义的判别器的部分代码。
def wn(name=None) :
if name is None:
name = get_parent_function_name()
return fluid.WeightNormParamAttr(dim=None, name=name,
initializer=fluid.initializer.ConstantInitializer(1.0))
def D(self, x, y_, name='D', is_test=False, reuse=False):
with fluid.unique_name.guard(name+'_'):
print("xshape: ",x.shape)
x = dropout(x, dropout_prob=0.2, is_test=False)
y = reshape(y_, [-1, self.y_dim, 1, 1]) #ten classes
x = conv_cond_concat(x, y)
#weight norm in paddlepaddle has finished
print("convbefore!!!!!!!:",x)
x = conv2d(x, num_filters=32, filter_size=3,
param_attr=wn(name=name+'_conv1_weight_norm_param'),
act='lrelu', reuse=reuse)
x = conv_cond_concat(x, y)
x = conv2d(x, num_filters=32, filter_size=3, stride=2,
param_attr=wn(name=name+'_conv2_weight_norm_param'),
act='lrelu', reuse=reuse)
print("D,con2d_2 shape:",x.shape)
x = dropout(x, dropout_prob=0.2)
x = conv_cond_concat(x, y)
x = conv2d(x, num_filters=64, filter_size=3, param_attr=wn(),
act='lrelu', reuse=reuse)
x = conv_cond_concat(x, y)
x = conv2d(x, num_filters=64, filter_size=3, stride=2, param_attr=wn(),
act='lrelu', reuse=reuse)
x = dropout(x, dropout_prob=0.2)
x = conv_cond_concat(x, y)
x = conv2d(x, num_filters=128, filter_size=3, param_attr=wn(),
act='lrelu', reuse=reuse)
x = conv_cond_concat(x, y)
x = conv2d(x, num_filters=128, filter_size=3, param_attr=wn(),
act='lrelu', reuse=reuse)
x = conv_cond_concat(x, y)
x = Global_Average_Pooling(x)
x = flatten(x)
x = concat(x, y_)
#IcGAN 每一层都要concat一下
x_logit = fc(x, 1)
out = sigmoid(x_logit)
return out, x_logit, x
调用D模型的program:
with fluid.program_guard(d_program):
# declare_data(self)
self.inputs = fluid.layers.data(shape=image_dims, name='real_images')
self.y = fluid.layers.data(shape=[self.y_dim], name='y')
self.z = fluid.layers.data(shape=[self.z_dim], name='z')
self.unlabelled_inputs = fluid.layers.data(shape=image_dims,
name='unlabelled_images')
print(self.inputs)
D_real, D_real_logits, _ = self.D(self.inputs, self.y, is_test=False)
G_train = self.G(self.z, self.y, is_test=False)
print(G_train)
D_fake, D_fake_logits, _ = self.D(G_train, self.y, is_test=False, reuse=True)
在实现TripleGAN的时候遇到这个问题,其中已经检查过inputs和G_train的shape是一致的,因为我需要对conv2d中的权重参数进行初始化,发现可以用WeightNormParamAttr,但是第当第二次调用即:D_fake, D_fake_logits, _ = self.D(G_train, self.y, is_test=False, reuse=True)
这里一直报与之前的权重w_g的shape不一致?