提交 006c32f9 编写于 作者: X Xin Pan

polish parameter names

parameters within a Layer instance should be unique.

test=develop
上级 e3dd6970
...@@ -17,7 +17,7 @@ import contextlib ...@@ -17,7 +17,7 @@ import contextlib
import sys import sys
import numpy as np import numpy as np
import collections import collections
from .. import unique_name
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid import framework from paddle.fluid import framework
from paddle.fluid.imperative import base from paddle.fluid.imperative import base
...@@ -26,14 +26,33 @@ __all__ = ['Layer', 'PyLayer'] ...@@ -26,14 +26,33 @@ __all__ = ['Layer', 'PyLayer']
class Layer(core.Layer): class Layer(core.Layer):
"""Layers composed of operators.""" """Layers composed of operators.
def __init__(self, dtype=core.VarDesc.VarType.FP32, name=None): Args:
name_scope: prefix name used by the layer to name parameters.
If prefix is "my_model/layer_1", parameter name in MyLayer
can be "my_model/layer_1/MyLayer/w_n", where w is the parameter
base name and n is an unique suffix auto-generated.
dtype: data type for the variables in the layer.
"""
def __init__(self, name_scope, dtype=core.VarDesc.VarType.FP32):
self._full_name = unique_name.generate(name_scope + "/" +
self.__class__.__name__)
self._built = False self._built = False
self._dtype = dtype self._dtype = dtype
self._parameters = collections.OrderedDict() self._parameters = collections.OrderedDict()
self._sub_layers = collections.OrderedDict() self._sub_layers = collections.OrderedDict()
def full_name(self):
"""Full name for this layers.
Full name is composed by name_scope + "/" + MyLayer.__class__.__name__
Returns full name of this name.
"""
return self._full_name
def parameters(self, include_sublayers=True): def parameters(self, include_sublayers=True):
"""Returns a list of Parameters from current and sub-layers. """Returns a list of Parameters from current and sub-layers.
......
...@@ -27,6 +27,7 @@ __all__ = ['Conv2D', 'Pool2D', 'FC', 'BatchNorm', 'Embedding'] ...@@ -27,6 +27,7 @@ __all__ = ['Conv2D', 'Pool2D', 'FC', 'BatchNorm', 'Embedding']
class Conv2D(layers.Layer): class Conv2D(layers.Layer):
def __init__(self, def __init__(self,
name_scope,
num_channels, num_channels,
num_filters, num_filters,
filter_size, filter_size,
...@@ -38,19 +39,17 @@ class Conv2D(layers.Layer): ...@@ -38,19 +39,17 @@ class Conv2D(layers.Layer):
act=None, act=None,
param_attr=None, param_attr=None,
bias_attr=None, bias_attr=None,
name=None,
dtype=core.VarDesc.VarType.FP32): dtype=core.VarDesc.VarType.FP32):
assert param_attr is not False, "param_attr should not be False here." assert param_attr is not False, "param_attr should not be False here."
super(Conv2D, self).__init__(name=name, dtype=dtype) super(Conv2D, self).__init__(name_scope, dtype=dtype)
# TODO(minqiyang): Move this to the top. # TODO(minqiyang): Move this to the top.
from ..layer_helper import LayerHelper from ..layer_helper import LayerHelper
self._helper = LayerHelper( self._helper = LayerHelper(
type(self).__name__, self.full_name(),
param_attr=param_attr, param_attr=param_attr,
bias_attr=bias_attr, bias_attr=bias_attr,
dtype=dtype, dtype=dtype,
name=name,
act=act) act=act)
self._groups = groups self._groups = groups
...@@ -143,6 +142,7 @@ class Conv2D(layers.Layer): ...@@ -143,6 +142,7 @@ class Conv2D(layers.Layer):
class Pool2D(layers.Layer): class Pool2D(layers.Layer):
def __init__(self, def __init__(self,
name_scope,
pool_size=-1, pool_size=-1,
pool_type="max", pool_type="max",
pool_stride=1, pool_stride=1,
...@@ -151,7 +151,6 @@ class Pool2D(layers.Layer): ...@@ -151,7 +151,6 @@ class Pool2D(layers.Layer):
use_cudnn=True, use_cudnn=True,
ceil_mode=False, ceil_mode=False,
exclusive=True, exclusive=True,
name=None,
dtype=core.VarDesc.VarType.FP32): dtype=core.VarDesc.VarType.FP32):
if pool_type not in ["max", "avg"]: if pool_type not in ["max", "avg"]:
raise ValueError( raise ValueError(
...@@ -166,10 +165,10 @@ class Pool2D(layers.Layer): ...@@ -166,10 +165,10 @@ class Pool2D(layers.Layer):
if not isinstance(use_cudnn, bool): if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False") raise ValueError("use_cudnn should be True or False")
super(Pool2D, self).__init__(name=name, dtype=dtype) super(Pool2D, self).__init__(name_scope, dtype=dtype)
from ..layer_helper import LayerHelper from ..layer_helper import LayerHelper
self._helper = LayerHelper(type(self).__name__, dtype=dtype, name=name) self._helper = LayerHelper(self.full_name(), dtype=dtype)
self._pool_type = pool_type self._pool_type = pool_type
self._pool_size = utils.convert_to_list(pool_size, 2, 'pool_size') self._pool_size = utils.convert_to_list(pool_size, 2, 'pool_size')
...@@ -205,25 +204,24 @@ class Pool2D(layers.Layer): ...@@ -205,25 +204,24 @@ class Pool2D(layers.Layer):
class FC(layers.Layer): class FC(layers.Layer):
def __init__(self, def __init__(self,
name_scope,
size, size,
param_attr=None, param_attr=None,
bias_attr=None, bias_attr=None,
num_flatten_dims=1, num_flatten_dims=1,
dtype=core.VarDesc.VarType.FP32, dtype=core.VarDesc.VarType.FP32,
act=None, act=None):
name=None): super(FC, self).__init__(name_scope)
super(FC, self).__init__()
self._size = size self._size = size
self._num_flatten_dims = num_flatten_dims self._num_flatten_dims = num_flatten_dims
self._dtype = dtype self._dtype = dtype
from ..layer_helper import LayerHelper from ..layer_helper import LayerHelper
self._helper = LayerHelper( self._helper = LayerHelper(
'FC', self.full_name(),
param_attr=param_attr, param_attr=param_attr,
bias_attr=bias_attr, bias_attr=bias_attr,
act=act, act=act)
name=name)
def _build_once(self, input): def _build_once(self, input):
input_shape = input.shape input_shape = input.shape
...@@ -282,6 +280,7 @@ class FC(layers.Layer): ...@@ -282,6 +280,7 @@ class FC(layers.Layer):
class BatchNorm(layers.Layer): class BatchNorm(layers.Layer):
def __init__(self, def __init__(self,
name_scope,
num_channels, num_channels,
act=None, act=None,
is_test=False, is_test=False,
...@@ -292,22 +291,20 @@ class BatchNorm(layers.Layer): ...@@ -292,22 +291,20 @@ class BatchNorm(layers.Layer):
dtype=core.VarDesc.VarType.FP32, dtype=core.VarDesc.VarType.FP32,
data_layout='NCHW', data_layout='NCHW',
in_place=False, in_place=False,
name=None,
moving_mean_name=None, moving_mean_name=None,
moving_variance_name=None, moving_variance_name=None,
do_model_average_for_mean_and_var=False, do_model_average_for_mean_and_var=False,
fuse_with_relu=False, fuse_with_relu=False,
use_global_stats=False): use_global_stats=False):
super(BatchNorm, self).__init__() super(BatchNorm, self).__init__(name_scope)
assert bias_attr is not False, "bias_attr should not be False in batch_norm." assert bias_attr is not False, "bias_attr should not be False in batch_norm."
from ..layer_helper import LayerHelper from ..layer_helper import LayerHelper
self._helper = LayerHelper( self._helper = LayerHelper(
'batch_norm', self.full_name(),
param_attr=param_attr, param_attr=param_attr,
bias_attr=bias_attr, bias_attr=bias_attr,
name=name,
act=act) act=act)
if dtype == core.VarDesc.VarType.FP16: if dtype == core.VarDesc.VarType.FP16:
...@@ -419,6 +416,7 @@ class Embedding(layers.Layer): ...@@ -419,6 +416,7 @@ class Embedding(layers.Layer):
constructor. constructor.
Args: Args:
name_scope: See base class.
size(tuple|list): The shape of the look up table parameter. It should size(tuple|list): The shape of the look up table parameter. It should
have two elements which indicate the size of the dictionary of have two elements which indicate the size of the dictionary of
embeddings and the size of each embedding vector respectively. embeddings and the size of each embedding vector respectively.
...@@ -446,6 +444,7 @@ class Embedding(layers.Layer): ...@@ -446,6 +444,7 @@ class Embedding(layers.Layer):
""" """
def __init__(self, def __init__(self,
name_scope,
size, size,
is_sparse=False, is_sparse=False,
is_distributed=False, is_distributed=False,
...@@ -453,7 +452,7 @@ class Embedding(layers.Layer): ...@@ -453,7 +452,7 @@ class Embedding(layers.Layer):
param_attr=None, param_attr=None,
dtype='float32'): dtype='float32'):
super(Embedding, self).__init__() super(Embedding, self).__init__(name_scope)
self._size = size self._size = size
self._is_sparse = is_sparse self._is_sparse = is_sparse
self._is_distributed = is_distributed self._is_distributed = is_distributed
...@@ -468,7 +467,7 @@ class Embedding(layers.Layer): ...@@ -468,7 +467,7 @@ class Embedding(layers.Layer):
assert self._is_sparse is True and self._is_distributed is False assert self._is_sparse is True and self._is_distributed is False
from ..layer_helper import LayerHelper from ..layer_helper import LayerHelper
self._helper = LayerHelper('embedding', param_attr=param_attr) self._helper = LayerHelper(self.full_name(), param_attr=param_attr)
self._w = self._helper.create_parameter( self._w = self._helper.create_parameter(
attr=self._param_attr, attr=self._param_attr,
shape=self._size, shape=self._size,
......
...@@ -34,6 +34,9 @@ class LayerHelper(object): ...@@ -34,6 +34,9 @@ class LayerHelper(object):
self.kwargs = kwargs self.kwargs = kwargs
self.layer_type = layer_type self.layer_type = layer_type
name = self.kwargs.get('name', None) name = self.kwargs.get('name', None)
# TODO(panyx0718, minqiyang): imperative mode
# can not use both `layer_type` and `name`. Deprecate LayerHelper
# and write a Helper for imperative mode.
if name is None: if name is None:
self.kwargs['name'] = unique_name.generate(self.layer_type) self.kwargs['name'] = unique_name.generate(self.layer_type)
......
...@@ -20,10 +20,10 @@ from paddle.fluid.layer_helper import LayerHelper ...@@ -20,10 +20,10 @@ from paddle.fluid.layer_helper import LayerHelper
class L1(fluid.imperative.Layer): class L1(fluid.imperative.Layer):
def __init__(self): def __init__(self, prefix):
super(L1, self).__init__() super(L1, self).__init__(prefix)
self._helper = LayerHelper( self._helper = LayerHelper(
'MyLayer', self.full_name(),
param_attr=fluid.ParamAttr( param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.1))) initializer=fluid.initializer.Constant(value=0.1)))
...@@ -43,20 +43,20 @@ class L1(fluid.imperative.Layer): ...@@ -43,20 +43,20 @@ class L1(fluid.imperative.Layer):
class L2(fluid.imperative.Layer): class L2(fluid.imperative.Layer):
def __init__(self): def __init__(self, prefix):
super(L2, self).__init__() super(L2, self).__init__(prefix)
self.layer1 = L1() self.layer1 = L1(self.full_name())
self.layer2 = L1() self.layer2 = L1(self.full_name())
def forward(self): def forward(self):
return self.layer1() + self.layer2() return self.layer1() + self.layer2()
class L3(fluid.imperative.Layer): class L3(fluid.imperative.Layer):
def __init__(self): def __init__(self, prefix):
super(L3, self).__init__() super(L3, self).__init__(prefix)
self.layer1 = L2() self.layer1 = L2(self.full_name())
self.layer2 = L2() self.layer2 = L2(self.full_name())
def forward(self): def forward(self):
return self.layer1() + self.layer2() return self.layer1() + self.layer2()
...@@ -65,16 +65,23 @@ class L3(fluid.imperative.Layer): ...@@ -65,16 +65,23 @@ class L3(fluid.imperative.Layer):
class TestBaseLayer(unittest.TestCase): class TestBaseLayer(unittest.TestCase):
def test_one_level(self): def test_one_level(self):
with fluid.imperative.guard(): with fluid.imperative.guard():
l = L1() l = L1('test_one_level')
ret = l() ret = l()
self.assertEqual(l.w1.name, "MyLayer_0.w_0") self.assertEqual(l.w1.name, "test_one_level/L1_0_0.w_0")
self.assertEqual(l.w2.name, "MyLayer_0.w_1") self.assertEqual(l.w2.name, "test_one_level/L1_0_0.w_1")
self.assertTrue(np.allclose(ret._numpy(), 0.2 * np.ones([2, 2]))) self.assertTrue(np.allclose(ret._numpy(), 0.2 * np.ones([2, 2])))
def test_three_level(self): def test_three_level(self):
with fluid.imperative.guard(): with fluid.imperative.guard():
l = L3() l = L3('test_three_level')
names = [p.name for p in l.parameters()]
ret = l() ret = l()
self.assertEqual(names[0], "test_three_level/L3_0/L2_0/L1_0_0.w_0")
self.assertEqual(names[1], "test_three_level/L3_0/L2_0/L1_0_0.w_1")
self.assertEqual(names[2], "test_three_level/L3_0/L2_0/L1_1_0.w_0")
self.assertEqual(names[3], "test_three_level/L3_0/L2_0/L1_1_0.w_1")
self.assertEqual(names[4], "test_three_level/L3_0/L2_1/L1_0_0.w_0")
self.assertEqual(names[5], "test_three_level/L3_0/L2_1/L1_0_0.w_1")
self.assertTrue(np.allclose(ret._numpy(), 0.8 * np.ones([2, 2]))) self.assertTrue(np.allclose(ret._numpy(), 0.8 * np.ones([2, 2])))
......
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
import contextlib import contextlib
import unittest import unittest
import numpy as np import numpy as np
import sys
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import core from paddle.fluid import core
...@@ -24,8 +23,8 @@ from test_imperative_base import new_program_scope ...@@ -24,8 +23,8 @@ from test_imperative_base import new_program_scope
class MyLayer(fluid.imperative.Layer): class MyLayer(fluid.imperative.Layer):
def __init__(self): def __init__(self, name_scope):
super(MyLayer, self).__init__() super(MyLayer, self).__init__(name_scope)
def forward(self, inputs): def forward(self, inputs):
x = fluid.layers.relu(inputs) x = fluid.layers.relu(inputs)
...@@ -50,12 +49,14 @@ class MyPyLayer(fluid.imperative.PyLayer): ...@@ -50,12 +49,14 @@ class MyPyLayer(fluid.imperative.PyLayer):
class MLP(fluid.imperative.Layer): class MLP(fluid.imperative.Layer):
def __init__(self): def __init__(self, name_scope):
super(MLP, self).__init__() super(MLP, self).__init__(name_scope)
self._fc1 = FC(3, self._fc1 = FC(self.full_name(),
3,
fluid.ParamAttr( fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.1))) initializer=fluid.initializer.Constant(value=0.1)))
self._fc2 = FC(4, self._fc2 = FC(self.full_name(),
4,
fluid.ParamAttr( fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.1))) initializer=fluid.initializer.Constant(value=0.1)))
...@@ -67,8 +68,9 @@ class MLP(fluid.imperative.Layer): ...@@ -67,8 +68,9 @@ class MLP(fluid.imperative.Layer):
class SimpleRNNCell(fluid.imperative.Layer): class SimpleRNNCell(fluid.imperative.Layer):
def __init__(self, step_input_size, hidden_size, output_size, param_attr): def __init__(self, name_scope, step_input_size, hidden_size, output_size,
super(SimpleRNNCell, self).__init__() param_attr):
super(SimpleRNNCell, self).__init__(name_scope)
self.step_input_size = step_input_size self.step_input_size = step_input_size
self.hidden_size = hidden_size self.hidden_size = hidden_size
self.output_size = output_size self.output_size = output_size
...@@ -158,10 +160,11 @@ class SimpleRNNCell(fluid.imperative.Layer): ...@@ -158,10 +160,11 @@ class SimpleRNNCell(fluid.imperative.Layer):
class SimpleRNN(fluid.imperative.Layer): class SimpleRNN(fluid.imperative.Layer):
def __init__(self): def __init__(self, name_scope):
super(SimpleRNN, self).__init__() super(SimpleRNN, self).__init__(name_scope)
self.seq_len = 4 self.seq_len = 4
self._cell = SimpleRNNCell( self._cell = SimpleRNNCell(
self.full_name(),
3, 3,
3, 3,
3, 3,
...@@ -205,7 +208,7 @@ class TestImperative(unittest.TestCase): ...@@ -205,7 +208,7 @@ class TestImperative(unittest.TestCase):
with fluid.imperative.guard(): with fluid.imperative.guard():
cl = core.Layer() cl = core.Layer()
cl.forward([]) cl.forward([])
l = fluid.imperative.Layer() l = fluid.imperative.Layer("l")
self.assertRaises(NotImplementedError, l.forward, []) self.assertRaises(NotImplementedError, l.forward, [])
def test_pylayer_func_id(self): def test_pylayer_func_id(self):
...@@ -281,7 +284,7 @@ class TestImperative(unittest.TestCase): ...@@ -281,7 +284,7 @@ class TestImperative(unittest.TestCase):
np_inp = np.array([1.0, 2.0, -1.0], dtype=np.float32) np_inp = np.array([1.0, 2.0, -1.0], dtype=np.float32)
with fluid.imperative.guard(): with fluid.imperative.guard():
var_inp = fluid.imperative.base.to_variable(np_inp) var_inp = fluid.imperative.base.to_variable(np_inp)
l = MyLayer() l = MyLayer("my_layer")
x = l(var_inp)[0] x = l(var_inp)[0]
self.assertIsNotNone(x) self.assertIsNotNone(x)
dy_out = x._numpy() dy_out = x._numpy()
...@@ -291,7 +294,7 @@ class TestImperative(unittest.TestCase): ...@@ -291,7 +294,7 @@ class TestImperative(unittest.TestCase):
with new_program_scope(): with new_program_scope():
inp = fluid.layers.data( inp = fluid.layers.data(
name="inp", shape=[3], append_batch_size=False) name="inp", shape=[3], append_batch_size=False)
l = MyLayer() l = MyLayer("my_layer")
x = l(inp)[0] x = l(inp)[0]
param_grads = fluid.backward.append_backward( param_grads = fluid.backward.append_backward(
x, parameter_list=[l._x_for_debug.name])[0] x, parameter_list=[l._x_for_debug.name])[0]
...@@ -309,7 +312,7 @@ class TestImperative(unittest.TestCase): ...@@ -309,7 +312,7 @@ class TestImperative(unittest.TestCase):
np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32) np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
with fluid.imperative.guard(): with fluid.imperative.guard():
var_inp = fluid.imperative.base.to_variable(np_inp) var_inp = fluid.imperative.base.to_variable(np_inp)
mlp = MLP() mlp = MLP("mlp")
out = mlp(var_inp) out = mlp(var_inp)
dy_out = out._numpy() dy_out = out._numpy()
out._backward() out._backward()
...@@ -318,7 +321,7 @@ class TestImperative(unittest.TestCase): ...@@ -318,7 +321,7 @@ class TestImperative(unittest.TestCase):
with new_program_scope(): with new_program_scope():
inp = fluid.layers.data( inp = fluid.layers.data(
name="inp", shape=[2, 2], append_batch_size=False) name="inp", shape=[2, 2], append_batch_size=False)
mlp = MLP() mlp = MLP("mlp")
out = mlp(inp) out = mlp(inp)
param_grads = fluid.backward.append_backward( param_grads = fluid.backward.append_backward(
out, parameter_list=[mlp._fc1._w.name])[0] out, parameter_list=[mlp._fc1._w.name])[0]
...@@ -334,10 +337,10 @@ class TestImperative(unittest.TestCase): ...@@ -334,10 +337,10 @@ class TestImperative(unittest.TestCase):
self.assertTrue(np.allclose(dy_grad, static_grad)) self.assertTrue(np.allclose(dy_grad, static_grad))
params = mlp.parameters(True) params = mlp.parameters(True)
self.assertEqual("FC_0.w_0", params[0].name) self.assertEqual("mlp/MLP_0/FC_0_0.w_0", params[0].name)
self.assertEqual("FC_0.b_0", params[1].name) self.assertEqual("mlp/MLP_0/FC_0_0.b_0", params[1].name)
self.assertEqual("FC_1.w_0", params[2].name) self.assertEqual("mlp/MLP_0/FC_1_0.w_0", params[2].name)
self.assertEqual("FC_1.b_0", params[3].name) self.assertEqual("mlp/MLP_0/FC_1_0.b_0", params[3].name)
self.assertEqual(len(params), 4) self.assertEqual(len(params), 4)
sublayers = mlp.sublayers(True) sublayers = mlp.sublayers(True)
...@@ -353,7 +356,7 @@ class TestImperative(unittest.TestCase): ...@@ -353,7 +356,7 @@ class TestImperative(unittest.TestCase):
with fluid.imperative.guard(): with fluid.imperative.guard():
var_inp = fluid.imperative.base.to_variable(np_inp) var_inp = fluid.imperative.base.to_variable(np_inp)
var_inp = fluid.layers.reshape(var_inp, shape=[1, 4, 3]) var_inp = fluid.layers.reshape(var_inp, shape=[1, 4, 3])
simple_rnn = SimpleRNN() simple_rnn = SimpleRNN("simple_rnn")
outs, pre_hiddens = simple_rnn.forward(var_inp) outs, pre_hiddens = simple_rnn.forward(var_inp)
dy_out = outs[3]._numpy() dy_out = outs[3]._numpy()
outs[3]._backward() outs[3]._backward()
...@@ -364,7 +367,7 @@ class TestImperative(unittest.TestCase): ...@@ -364,7 +367,7 @@ class TestImperative(unittest.TestCase):
with new_program_scope(): with new_program_scope():
inp = fluid.layers.data( inp = fluid.layers.data(
name="inp", shape=[1, 4, 3], append_batch_size=False) name="inp", shape=[1, 4, 3], append_batch_size=False)
simple_rnn = SimpleRNN() simple_rnn = SimpleRNN("simple_rnn")
outs, pre_hiddens = simple_rnn(inp) outs, pre_hiddens = simple_rnn(inp)
param_grads = fluid.backward.append_backward(outs[3]) param_grads = fluid.backward.append_backward(outs[3])
exe = fluid.Executor(fluid.CPUPlace()) exe = fluid.Executor(fluid.CPUPlace())
......
...@@ -28,10 +28,10 @@ from paddle.fluid.imperative.base import to_variable ...@@ -28,10 +28,10 @@ from paddle.fluid.imperative.base import to_variable
class Discriminator(fluid.imperative.Layer): class Discriminator(fluid.imperative.Layer):
def __init__(self): def __init__(self, name_scope):
super(Discriminator, self).__init__() super(Discriminator, self).__init__(name_scope)
self._fc1 = FC(size=32, act='elu', name="d_fc1") self._fc1 = FC(self.full_name(), size=32, act='elu')
self._fc2 = FC(size=1, name="d_fc2") self._fc2 = FC(self.full_name(), size=1)
def forward(self, inputs): def forward(self, inputs):
x = self._fc1(inputs) x = self._fc1(inputs)
...@@ -39,11 +39,11 @@ class Discriminator(fluid.imperative.Layer): ...@@ -39,11 +39,11 @@ class Discriminator(fluid.imperative.Layer):
class Generator(fluid.imperative.Layer): class Generator(fluid.imperative.Layer):
def __init__(self): def __init__(self, name_scope):
super(Generator, self).__init__() super(Generator, self).__init__(name_scope)
self._fc1 = FC(size=64, act='elu', name="g_fc1") self._fc1 = FC(self.full_name(), size=64, act='elu')
self._fc2 = FC(size=64, act='elu', name="g_fc2") self._fc2 = FC(self.full_name(), size=64, act='elu')
self._fc3 = FC(size=1, name="g_fc3") self._fc3 = FC(self.full_name(), size=1)
def forward(self, inputs): def forward(self, inputs):
x = self._fc1(inputs) x = self._fc1(inputs)
...@@ -65,8 +65,8 @@ class TestImperativeMnist(unittest.TestCase): ...@@ -65,8 +65,8 @@ class TestImperativeMnist(unittest.TestCase):
scope = fluid.core.Scope() scope = fluid.core.Scope()
with new_program_scope( with new_program_scope(
main=discriminate_p, startup=startup, scope=scope): main=discriminate_p, startup=startup, scope=scope):
discriminator = Discriminator() discriminator = Discriminator("d")
generator = Generator() generator = Generator("g")
img = fluid.layers.data( img = fluid.layers.data(
name="img", shape=[2, 1], append_batch_size=False) name="img", shape=[2, 1], append_batch_size=False)
...@@ -93,8 +93,8 @@ class TestImperativeMnist(unittest.TestCase): ...@@ -93,8 +93,8 @@ class TestImperativeMnist(unittest.TestCase):
sgd.minimize(d_loss) sgd.minimize(d_loss)
with new_program_scope(main=generate_p, startup=startup, scope=scope): with new_program_scope(main=generate_p, startup=startup, scope=scope):
discriminator = Discriminator() discriminator = Discriminator("d")
generator = Generator() generator = Generator("g")
noise = fluid.layers.data( noise = fluid.layers.data(
name="noise", shape=[2, 2], append_batch_size=False) name="noise", shape=[2, 2], append_batch_size=False)
...@@ -134,8 +134,8 @@ class TestImperativeMnist(unittest.TestCase): ...@@ -134,8 +134,8 @@ class TestImperativeMnist(unittest.TestCase):
fluid.default_startup_program().random_seed = seed fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed fluid.default_main_program().random_seed = seed
discriminator = Discriminator() discriminator = Discriminator("d")
generator = Generator() generator = Generator("g")
sgd = SGDOptimizer(learning_rate=1e-3) sgd = SGDOptimizer(learning_rate=1e-3)
d_real = discriminator(to_variable(np.ones([2, 1], np.float32))) d_real = discriminator(to_variable(np.ones([2, 1], np.float32)))
......
...@@ -28,6 +28,7 @@ from test_imperative_base import new_program_scope ...@@ -28,6 +28,7 @@ from test_imperative_base import new_program_scope
class SimpleImgConvPool(fluid.imperative.Layer): class SimpleImgConvPool(fluid.imperative.Layer):
def __init__(self, def __init__(self,
name_scope,
num_channels, num_channels,
num_filters, num_filters,
filter_size, filter_size,
...@@ -44,9 +45,10 @@ class SimpleImgConvPool(fluid.imperative.Layer): ...@@ -44,9 +45,10 @@ class SimpleImgConvPool(fluid.imperative.Layer):
use_cudnn=False, use_cudnn=False,
param_attr=None, param_attr=None,
bias_attr=None): bias_attr=None):
super(SimpleImgConvPool, self).__init__() super(SimpleImgConvPool, self).__init__(name_scope)
self._conv2d = Conv2D( self._conv2d = Conv2D(
self.full_name(),
num_channels=num_channels, num_channels=num_channels,
num_filters=num_filters, num_filters=num_filters,
filter_size=filter_size, filter_size=filter_size,
...@@ -59,6 +61,7 @@ class SimpleImgConvPool(fluid.imperative.Layer): ...@@ -59,6 +61,7 @@ class SimpleImgConvPool(fluid.imperative.Layer):
use_cudnn=use_cudnn) use_cudnn=use_cudnn)
self._pool2d = Pool2D( self._pool2d = Pool2D(
self.full_name(),
pool_size=pool_size, pool_size=pool_size,
pool_type=pool_type, pool_type=pool_type,
pool_stride=pool_stride, pool_stride=pool_stride,
...@@ -73,19 +76,20 @@ class SimpleImgConvPool(fluid.imperative.Layer): ...@@ -73,19 +76,20 @@ class SimpleImgConvPool(fluid.imperative.Layer):
class MNIST(fluid.imperative.Layer): class MNIST(fluid.imperative.Layer):
def __init__(self, param_attr=None, bias_attr=None): def __init__(self, name_scope, param_attr=None, bias_attr=None):
super(MNIST, self).__init__() super(MNIST, self).__init__(name_scope)
self._simple_img_conv_pool_1 = SimpleImgConvPool( self._simple_img_conv_pool_1 = SimpleImgConvPool(
1, 20, 5, 2, 2, act="relu") self.full_name(), 1, 20, 5, 2, 2, act="relu")
self._simple_img_conv_pool_2 = SimpleImgConvPool( self._simple_img_conv_pool_2 = SimpleImgConvPool(
20, 50, 5, 2, 2, act="relu") self.full_name(), 20, 50, 5, 2, 2, act="relu")
pool_2_shape = 50 * 4 * 4 pool_2_shape = 50 * 4 * 4
SIZE = 10 SIZE = 10
scale = (2.0 / (pool_2_shape**2 * SIZE))**0.5 scale = (2.0 / (pool_2_shape**2 * SIZE))**0.5
self._fc = FC(10, self._fc = FC(self.full_name(),
10,
param_attr=fluid.param_attr.ParamAttr( param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.NormalInitializer( initializer=fluid.initializer.NormalInitializer(
loc=0.0, scale=scale)), loc=0.0, scale=scale)),
...@@ -106,7 +110,7 @@ class TestImperativeMnist(unittest.TestCase): ...@@ -106,7 +110,7 @@ class TestImperativeMnist(unittest.TestCase):
fluid.default_startup_program().random_seed = seed fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed fluid.default_main_program().random_seed = seed
mnist = MNIST() mnist = MNIST("mnist")
sgd = SGDOptimizer(learning_rate=1e-3) sgd = SGDOptimizer(learning_rate=1e-3)
train_reader = paddle.batch( train_reader = paddle.batch(
paddle.dataset.mnist.train(), batch_size=128) paddle.dataset.mnist.train(), batch_size=128)
...@@ -150,7 +154,7 @@ class TestImperativeMnist(unittest.TestCase): ...@@ -150,7 +154,7 @@ class TestImperativeMnist(unittest.TestCase):
exe = fluid.Executor(fluid.CPUPlace( exe = fluid.Executor(fluid.CPUPlace(
) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
mnist = MNIST() mnist = MNIST("mnist")
sgd = SGDOptimizer(learning_rate=1e-3) sgd = SGDOptimizer(learning_rate=1e-3)
train_reader = paddle.batch( train_reader = paddle.batch(
paddle.dataset.mnist.train(), batch_size=128) paddle.dataset.mnist.train(), batch_size=128)
......
...@@ -28,12 +28,13 @@ from paddle.fluid.backward import append_backward ...@@ -28,12 +28,13 @@ from paddle.fluid.backward import append_backward
class SimpleLSTMRNN(fluid.imperative.Layer): class SimpleLSTMRNN(fluid.imperative.Layer):
def __init__(self, def __init__(self,
name_scope,
hidden_size, hidden_size,
num_steps, num_steps,
num_layers=2, num_layers=2,
init_scale=0.1, init_scale=0.1,
dropout=None): dropout=None):
super(SimpleLSTMRNN, self).__init__() super(SimpleLSTMRNN, self).__init__(name_scope)
self._hidden_size = hidden_size self._hidden_size = hidden_size
self._num_layers = num_layers self._num_layers = num_layers
self._init_scale = init_scale self._init_scale = init_scale
...@@ -130,13 +131,14 @@ class SimpleLSTMRNN(fluid.imperative.Layer): ...@@ -130,13 +131,14 @@ class SimpleLSTMRNN(fluid.imperative.Layer):
class PtbModel(fluid.imperative.Layer): class PtbModel(fluid.imperative.Layer):
def __init__(self, def __init__(self,
name_scope,
hidden_size, hidden_size,
vocab_size, vocab_size,
num_layers=2, num_layers=2,
num_steps=20, num_steps=20,
init_scale=0.1, init_scale=0.1,
dropout=None): dropout=None):
super(PtbModel, self).__init__() super(PtbModel, self).__init__(name_scope)
self.hidden_size = hidden_size self.hidden_size = hidden_size
self.vocab_size = vocab_size self.vocab_size = vocab_size
self.init_scale = init_scale self.init_scale = init_scale
...@@ -146,12 +148,14 @@ class PtbModel(fluid.imperative.Layer): ...@@ -146,12 +148,14 @@ class PtbModel(fluid.imperative.Layer):
from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.layer_helper import LayerHelper
self._helper = LayerHelper('PtbModel', act="tanh") self._helper = LayerHelper('PtbModel', act="tanh")
self.simple_lstm_rnn = SimpleLSTMRNN( self.simple_lstm_rnn = SimpleLSTMRNN(
self.full_name(),
hidden_size, hidden_size,
num_steps, num_steps,
num_layers=num_layers, num_layers=num_layers,
init_scale=init_scale, init_scale=init_scale,
dropout=dropout) dropout=dropout)
self.embedding = Embedding( self.embedding = Embedding(
self.full_name(),
size=[vocab_size, hidden_size], size=[vocab_size, hidden_size],
dtype='float32', dtype='float32',
is_sparse=False, is_sparse=False,
...@@ -226,6 +230,7 @@ class TestImperativePtbRnn(unittest.TestCase): ...@@ -226,6 +230,7 @@ class TestImperativePtbRnn(unittest.TestCase):
fluid.default_main_program().random_seed = seed fluid.default_main_program().random_seed = seed
# TODO: marsyang1993 Change seed to # TODO: marsyang1993 Change seed to
ptb_model = PtbModel( ptb_model = PtbModel(
"ptb_model",
hidden_size=hidden_size, hidden_size=hidden_size,
vocab_size=vocab_size, vocab_size=vocab_size,
num_layers=num_layers, num_layers=num_layers,
...@@ -265,6 +270,7 @@ class TestImperativePtbRnn(unittest.TestCase): ...@@ -265,6 +270,7 @@ class TestImperativePtbRnn(unittest.TestCase):
fluid.default_startup_program().random_seed = seed fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed fluid.default_main_program().random_seed = seed
ptb_model = PtbModel( ptb_model = PtbModel(
"ptb_model",
hidden_size=hidden_size, hidden_size=hidden_size,
vocab_size=vocab_size, vocab_size=vocab_size,
num_layers=num_layers, num_layers=num_layers,
......
...@@ -70,15 +70,17 @@ def optimizer_setting(params): ...@@ -70,15 +70,17 @@ def optimizer_setting(params):
class ConvBNLayer(fluid.imperative.Layer): class ConvBNLayer(fluid.imperative.Layer):
def __init__(self, def __init__(self,
name_scope,
num_channels, num_channels,
num_filters, num_filters,
filter_size, filter_size,
stride=1, stride=1,
groups=1, groups=1,
act=None): act=None):
super(ConvBNLayer, self).__init__() super(ConvBNLayer, self).__init__(name_scope)
self._conv = Conv2D( self._conv = Conv2D(
self.full_name(),
num_channels=num_channels, num_channels=num_channels,
num_filters=num_filters, num_filters=num_filters,
filter_size=filter_size, filter_size=filter_size,
...@@ -88,7 +90,7 @@ class ConvBNLayer(fluid.imperative.Layer): ...@@ -88,7 +90,7 @@ class ConvBNLayer(fluid.imperative.Layer):
act=None, act=None,
bias_attr=None) bias_attr=None)
self._batch_norm = BatchNorm(num_filters, act=act) self._batch_norm = BatchNorm(self.full_name(), num_filters, act=act)
def forward(self, inputs): def forward(self, inputs):
y = self._conv(inputs) y = self._conv(inputs)
...@@ -98,21 +100,29 @@ class ConvBNLayer(fluid.imperative.Layer): ...@@ -98,21 +100,29 @@ class ConvBNLayer(fluid.imperative.Layer):
class BottleneckBlock(fluid.imperative.Layer): class BottleneckBlock(fluid.imperative.Layer):
def __init__(self, num_channels, num_filters, stride, shortcut=True): def __init__(self,
super(BottleneckBlock, self).__init__() name_scope,
num_channels,
num_filters,
stride,
shortcut=True):
super(BottleneckBlock, self).__init__(name_scope)
self.conv0 = ConvBNLayer( self.conv0 = ConvBNLayer(
self.full_name(),
num_channels=num_channels, num_channels=num_channels,
num_filters=num_filters, num_filters=num_filters,
filter_size=1, filter_size=1,
act='relu') act='relu')
self.conv1 = ConvBNLayer( self.conv1 = ConvBNLayer(
self.full_name(),
num_channels=num_filters, num_channels=num_filters,
num_filters=num_filters, num_filters=num_filters,
filter_size=3, filter_size=3,
stride=stride, stride=stride,
act='relu') act='relu')
self.conv2 = ConvBNLayer( self.conv2 = ConvBNLayer(
self.full_name(),
num_channels=num_filters, num_channels=num_filters,
num_filters=num_filters * 4, num_filters=num_filters * 4,
filter_size=1, filter_size=1,
...@@ -120,6 +130,7 @@ class BottleneckBlock(fluid.imperative.Layer): ...@@ -120,6 +130,7 @@ class BottleneckBlock(fluid.imperative.Layer):
if not shortcut: if not shortcut:
self.short = ConvBNLayer( self.short = ConvBNLayer(
self.full_name(),
num_channels=num_channels, num_channels=num_channels,
num_filters=num_filters * 4, num_filters=num_filters * 4,
filter_size=1, filter_size=1,
...@@ -141,13 +152,13 @@ class BottleneckBlock(fluid.imperative.Layer): ...@@ -141,13 +152,13 @@ class BottleneckBlock(fluid.imperative.Layer):
y = fluid.layers.elementwise_add(x=short, y=conv2) y = fluid.layers.elementwise_add(x=short, y=conv2)
layer_helper = LayerHelper('elementwise_add_activation', act='relu') layer_helper = LayerHelper(self.full_name(), act='relu')
return layer_helper.append_activation(y) return layer_helper.append_activation(y)
class ResNet(fluid.imperative.Layer): class ResNet(fluid.imperative.Layer):
def __init__(self, layers=50, class_dim=102): def __init__(self, name_scope, layers=50, class_dim=102):
super(ResNet, self).__init__() super(ResNet, self).__init__(name_scope)
self.layers = layers self.layers = layers
supported_layers = [50, 101, 152] supported_layers = [50, 101, 152]
...@@ -163,9 +174,18 @@ class ResNet(fluid.imperative.Layer): ...@@ -163,9 +174,18 @@ class ResNet(fluid.imperative.Layer):
num_filters = [64, 128, 256, 512] num_filters = [64, 128, 256, 512]
self.conv = ConvBNLayer( self.conv = ConvBNLayer(
num_channels=3, num_filters=64, filter_size=7, stride=2, act='relu') self.full_name(),
num_channels=3,
num_filters=64,
filter_size=7,
stride=2,
act='relu')
self.pool2d_max = Pool2D( self.pool2d_max = Pool2D(
pool_size=3, pool_stride=2, pool_padding=1, pool_type='max') self.full_name(),
pool_size=3,
pool_stride=2,
pool_padding=1,
pool_type='max')
self.bottleneck_block_list = [] self.bottleneck_block_list = []
num_channels = 64 num_channels = 64
...@@ -175,6 +195,7 @@ class ResNet(fluid.imperative.Layer): ...@@ -175,6 +195,7 @@ class ResNet(fluid.imperative.Layer):
bottleneck_block = self.add_sublayer( bottleneck_block = self.add_sublayer(
'bb_%d_%d' % (block, i), 'bb_%d_%d' % (block, i),
BottleneckBlock( BottleneckBlock(
self.full_name(),
num_channels=num_channels, num_channels=num_channels,
num_filters=num_filters[block], num_filters=num_filters[block],
stride=2 if i == 0 and block != 0 else 1, stride=2 if i == 0 and block != 0 else 1,
...@@ -184,12 +205,13 @@ class ResNet(fluid.imperative.Layer): ...@@ -184,12 +205,13 @@ class ResNet(fluid.imperative.Layer):
shortcut = True shortcut = True
self.pool2d_avg = Pool2D( self.pool2d_avg = Pool2D(
pool_size=7, pool_type='avg', global_pooling=True) self.full_name(), pool_size=7, pool_type='avg', global_pooling=True)
import math import math
stdv = 1.0 / math.sqrt(2048 * 1.0) stdv = 1.0 / math.sqrt(2048 * 1.0)
self.out = FC(size=class_dim, self.out = FC(self.full_name(),
size=class_dim,
act='softmax', act='softmax',
param_attr=fluid.param_attr.ParamAttr( param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv))) initializer=fluid.initializer.Uniform(-stdv, stdv)))
...@@ -214,7 +236,7 @@ class TestImperativeResnet(unittest.TestCase): ...@@ -214,7 +236,7 @@ class TestImperativeResnet(unittest.TestCase):
fluid.default_startup_program().random_seed = seed fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed fluid.default_main_program().random_seed = seed
resnet = ResNet() resnet = ResNet("resnet")
optimizer = optimizer_setting(train_parameters) optimizer = optimizer_setting(train_parameters)
np.random.seed(seed) np.random.seed(seed)
import random import random
...@@ -275,7 +297,7 @@ class TestImperativeResnet(unittest.TestCase): ...@@ -275,7 +297,7 @@ class TestImperativeResnet(unittest.TestCase):
exe = fluid.Executor(fluid.CPUPlace( exe = fluid.Executor(fluid.CPUPlace(
) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
resnet = ResNet() resnet = ResNet("resnet")
optimizer = optimizer_setting(train_parameters) optimizer = optimizer_setting(train_parameters)
np.random.seed(seed) np.random.seed(seed)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册