提交 7177c276 编写于 作者: C chengduoZH

reorder parameters of layer

上级 51e7c26f
...@@ -17,13 +17,13 @@ __all__ = [ ...@@ -17,13 +17,13 @@ __all__ = [
def fc(input, def fc(input,
size, size,
num_flatten_dims=1,
param_attr=None, param_attr=None,
param_initializer=None, param_initializer=None,
bias_attr=None, bias_attr=None,
bias_initializer=None, bias_initializer=None,
name=None,
act=None, act=None,
num_flatten_dims=1, name=None,
main_program=None, main_program=None,
startup_program=None): startup_program=None):
""" """
...@@ -32,15 +32,15 @@ def fc(input, ...@@ -32,15 +32,15 @@ def fc(input,
Args: Args:
input: The input tensor to the function input: The input tensor to the function
size: The size of the layer size: The size of the layer
num_flatten_dims: Number of columns in input
param_attr: The parameters/weights to the FC Layer param_attr: The parameters/weights to the FC Layer
param_initializer: Initializer used for the weight/parameter. param_initializer: Initializer used for the weight/parameter.
If None, XavierInitializer() is used If None, XavierInitializer() is used
bias_attr: The bias parameter for the FC layer bias_attr: The bias parameter for the FC layer
bias_initializer: Initializer used for the bias. bias_initializer: Initializer used for the bias.
If None, then ConstantInitializer() is used If None, then ConstantInitializer() is used
name: Name/alias of the function
act: Activation to be applied to the output of FC layer act: Activation to be applied to the output of FC layer
num_flatten_dims: Number of columns in input name: Name/alias of the function
main_program: Name of the main program that calls this main_program: Name of the main program that calls this
startup_program: Name of the startup program startup_program: Name of the startup program
...@@ -111,9 +111,9 @@ def fc(input, ...@@ -111,9 +111,9 @@ def fc(input,
def embedding(input, def embedding(input,
size, size,
data_type='float32',
is_sparse=False, is_sparse=False,
param_attr=None, param_attr=None,
data_type='float32',
main_program=None, main_program=None,
startup_program=None): startup_program=None):
""" """
...@@ -122,9 +122,9 @@ def embedding(input, ...@@ -122,9 +122,9 @@ def embedding(input,
Args: Args:
input: The input to the function input: The input to the function
size: The size of the layer size: The size of the layer
data_type: The type of data : float32, float_16, int etc
is_sparse: A flag that decleares whether the input is sparse is_sparse: A flag that decleares whether the input is sparse
param_attr: Parameters for this layer param_attr: Parameters for this layer
data_type: The type of data : float32, float_16, int etc
main_program: Name of the main program that calls this main_program: Name of the main program that calls this
startup_program: Name of the startup program startup_program: Name of the startup program
...@@ -152,7 +152,6 @@ def embedding(input, ...@@ -152,7 +152,6 @@ def embedding(input,
# TODO(qijun): expose H0 and C0 # TODO(qijun): expose H0 and C0
def dynamic_lstm(input, def dynamic_lstm(input,
size, size,
data_type='float32',
param_attr=None, param_attr=None,
bias_attr=None, bias_attr=None,
use_peepholes=True, use_peepholes=True,
...@@ -160,6 +159,7 @@ def dynamic_lstm(input, ...@@ -160,6 +159,7 @@ def dynamic_lstm(input,
gate_activation='sigmoid', gate_activation='sigmoid',
cell_activation='tanh', cell_activation='tanh',
candidate_activation='tanh', candidate_activation='tanh',
data_type='float32',
main_program=None, main_program=None,
startup_program=None): startup_program=None):
helper = LayerHelper('lstm', **locals()) helper = LayerHelper('lstm', **locals())
...@@ -200,9 +200,9 @@ def dynamic_lstm(input, ...@@ -200,9 +200,9 @@ def dynamic_lstm(input,
def data(name, def data(name,
shape, shape,
append_batch_size=True,
data_type='float32', data_type='float32',
type=core.VarDesc.VarType.LOD_TENSOR, type=core.VarDesc.VarType.LOD_TENSOR,
append_batch_size=True,
main_program=None, main_program=None,
startup_program=None, startup_program=None,
stop_gradient=True): stop_gradient=True):
...@@ -212,9 +212,9 @@ def data(name, ...@@ -212,9 +212,9 @@ def data(name,
Args: Args:
name: The name/alias of the function name: The name/alias of the function
shape: Tuple declaring the shape. shape: Tuple declaring the shape.
append_batch_size: Whether or not to append the data as a batch.
data_type: The type of data : float32, float_16, int etc data_type: The type of data : float32, float_16, int etc
type: The output type. By default it is LOD_TENSOR. type: The output type. By default it is LOD_TENSOR.
append_batch_size: Whether or not to append the data as a batch.
main_program: Name of the main program that calls this main_program: Name of the main program that calls this
startup_program: Name of the startup program startup_program: Name of the startup program
stop_gradient: A boolean that mentions whether gradient should flow. stop_gradient: A boolean that mentions whether gradient should flow.
...@@ -600,12 +600,12 @@ def sequence_conv(input, ...@@ -600,12 +600,12 @@ def sequence_conv(input,
num_filters, num_filters,
filter_size=3, filter_size=3,
filter_stride=1, filter_stride=1,
act=None,
padding=None, padding=None,
bias_attr=None, bias_attr=None,
bias_initializer=None, bias_initializer=None,
param_attr=None, param_attr=None,
param_initializer=None, param_initializer=None,
act=None,
main_program=None, main_program=None,
startup_program=None): startup_program=None):
""" """
...@@ -658,16 +658,16 @@ def sequence_conv(input, ...@@ -658,16 +658,16 @@ def sequence_conv(input,
def conv2d(input, def conv2d(input,
num_filters, num_filters,
name=None, filter_size,
filter_size=[1, 1],
act=None,
groups=None,
stride=[1, 1], stride=[1, 1],
padding=None, padding=None,
bias_attr=None, groups=None,
bias_initializer=None,
param_attr=None, param_attr=None,
param_initializer=None, param_initializer=None,
bias_attr=None,
bias_initializer=None,
act=None,
name=None,
main_program=None, main_program=None,
startup_program=None): startup_program=None):
""" """
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册