未验证 提交 145d1556 编写于 作者: W Wenyu 提交者: GitHub

add layer_norm for convnext outputs (#6201)

上级 941bbf7c
...@@ -141,6 +141,7 @@ class ConvNeXt(nn.Layer): ...@@ -141,6 +141,7 @@ class ConvNeXt(nn.Layer):
layer_scale_init_value=1e-6, layer_scale_init_value=1e-6,
head_init_scale=1., head_init_scale=1.,
return_idx=[1, 2, 3], return_idx=[1, 2, 3],
norm_output=True,
pretrained=None, ): pretrained=None, ):
super().__init__() super().__init__()
...@@ -178,6 +179,14 @@ class ConvNeXt(nn.Layer): ...@@ -178,6 +179,14 @@ class ConvNeXt(nn.Layer):
self.return_idx = return_idx self.return_idx = return_idx
self.dims = [dims[i] for i in return_idx] # [::-1] self.dims = [dims[i] for i in return_idx] # [::-1]
self.norm_output = norm_output
if norm_output:
self.norms = nn.LayerList([
LayerNorm(
c, eps=1e-6, data_format="channels_first")
for c in self.dims
])
self.apply(self._init_weights) self.apply(self._init_weights)
# self.head.weight.set_value(self.head.weight.numpy() * head_init_scale) # self.head.weight.set_value(self.head.weight.numpy() * head_init_scale)
# self.head.bias.set_value(self.head.weight.numpy() * head_init_scale) # self.head.bias.set_value(self.head.weight.numpy() * head_init_scale)
...@@ -202,9 +211,11 @@ class ConvNeXt(nn.Layer): ...@@ -202,9 +211,11 @@ class ConvNeXt(nn.Layer):
x = self.stages[i](x) x = self.stages[i](x)
output.append(x) output.append(x)
output = [output[i] for i in self.return_idx] outputs = [output[i] for i in self.return_idx]
if self.norm_output:
outputs = [self.norms[i](out) for i, out in enumerate(outputs)]
return output return outputs
def forward(self, x): def forward(self, x):
x = self.forward_features(x['image']) x = self.forward_features(x['image'])
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册