未验证 提交 948bc8b7 编写于 作者: L LielinJiang 提交者: GitHub

Add apply for Layer (#25812)

* add apply for Layer
上级 3dd2e380
...@@ -129,6 +129,45 @@ class Layer(core.Layer): ...@@ -129,6 +129,45 @@ class Layer(core.Layer):
for layer in self.sublayers(): for layer in self.sublayers():
layer.eval() layer.eval()
def apply(self, fn):
"""
Applies ``fn`` recursively to every sublayer (as returned by ``.sublayers()``)
as well as self. Typical use includes initializing the parameters of a model.
Parameters:
fn (function): a function to be applied to each sublayer
Returns:
Layer: self
Example::
.. code-block:: python
import paddle
import paddle.nn as nn
paddle.enable_imperative()
net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
def init_weights(layer):
if type(layer) == nn.Linear:
print('before init weight:', layer.weight.numpy())
new_weight = paddle.fill_constant(layer.weight.shape, layer.weight.dtype, value=0.9)
layer.weight.set_value(new_weight)
print('after init weight:', layer.weight.numpy())
net.apply(init_weights)
print(net.state_dict())
"""
for layer in self.sublayers():
layer.apply(fn)
fn(self)
return self
def full_name(self): def full_name(self):
"""Full name for this layer, composed by name_scope + "/" + MyLayer.__class__.__name__ """Full name for this layer, composed by name_scope + "/" + MyLayer.__class__.__name__
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import paddle
import paddle.nn as nn
import paddle.fluid as fluid
import numpy as np
class LeNetDygraph(fluid.dygraph.Layer):
def __init__(self, num_classes=10, classifier_activation='softmax'):
super(LeNetDygraph, self).__init__()
self.num_classes = num_classes
self.features = nn.Sequential(
nn.Conv2D(
1, 6, 3, stride=1, padding=1),
nn.ReLU(),
nn.Pool2D(2, 'max', 2),
nn.Conv2D(
6, 16, 5, stride=1, padding=0),
nn.ReLU(),
nn.Pool2D(2, 'max', 2))
if num_classes > 0:
self.fc = nn.Sequential(
nn.Linear(400, 120),
nn.Linear(120, 84),
nn.Linear(
84, 10, act=classifier_activation))
def forward(self, inputs):
x = self.features(inputs)
if self.num_classes > 0:
x = fluid.layers.flatten(x, 1)
x = self.fc(x)
return x
def init_weights(layer):
if type(layer) == nn.Linear:
new_weight = paddle.fill_constant(
layer.weight.shape, layer.weight.dtype, value=0.9)
layer.weight.set_value(new_weight)
new_bias = paddle.fill_constant(
layer.bias.shape, layer.bias.dtype, value=-0.1)
layer.bias.set_value(new_bias)
elif type(layer) == nn.Conv2D:
new_weight = paddle.fill_constant(
layer.weight.shape, layer.weight.dtype, value=0.7)
layer.weight.set_value(new_weight)
new_bias = paddle.fill_constant(
layer.bias.shape, layer.bias.dtype, value=-0.2)
layer.bias.set_value(new_bias)
class TestLayerApply(unittest.TestCase):
def test_apply_init_weight(self):
with fluid.dygraph.guard():
net = LeNetDygraph()
net.apply(init_weights)
for layer in net.sublayers():
if type(layer) == nn.Linear:
np.testing.assert_allclose(layer.weight.numpy(), 0.9)
np.testing.assert_allclose(layer.bias.numpy(), -0.1)
elif type(layer) == nn.Conv2D:
np.testing.assert_allclose(layer.weight.numpy(), 0.7)
np.testing.assert_allclose(layer.bias.numpy(), -0.2)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册