未验证 提交 df9c13a8 编写于 作者: C Cao Ying 提交者: GitHub

Merge pull request #7485 from lcy-seso/wrapper_for_elementwise_math_op

add wrapper for element-wise math operator.
...@@ -358,3 +358,132 @@ reduce_min ...@@ -358,3 +358,132 @@ reduce_min
.. autofunction:: paddle.v2.fluid.layers.reduce_min .. autofunction:: paddle.v2.fluid.layers.reduce_min
:noindex: :noindex:
logsigmoid
----------
.. autofunction:: paddle.v2.fluid.layers.logsigmoid
:noindex:
exp
---
.. autofunction:: paddle.v2.fluid.layers.exp
:noindex:
relu
----
.. autofunction:: paddle.v2.fluid.layers.relu
:noindex:
tanh
----
.. autofunction:: paddle.v2.fluid.layers.tanh
:noindex:
tanh_shrink
-----------
.. autofunction:: paddle.v2.fluid.layers.tanh_shrink
:noindex:
softshrink
----------
.. autofunction:: paddle.v2.fluid.layers.softshrink
:noindex:
sqrt
----
.. autofunction:: paddle.v2.fluid.layers.sqrt
:noindex:
abs
----
.. autofunction:: paddle.v2.fluid.layers.abs
:noindex:
ceil
----
.. autofunction:: paddle.v2.fluid.layers.ceil
:noindex:
floor
-----
.. autofunction:: paddle.v2.fluid.layers.floor
:noindex:
round
-----
.. autofunction:: paddle.v2.fluid.layers.round
:noindex:
reciprocal
----------
.. autofunction:: paddle.v2.fluid.layers.reciprocal
:noindex:
log
---
.. autofunction:: paddle.v2.fluid.layers.log
:noindex:
square
------
.. autofunction:: paddle.v2.fluid.layers.square
:noindex:
softplus
--------
.. autofunction:: paddle.v2.fluid.layers.softplus
:noindex:
softsign
---------
.. autofunction:: paddle.v2.fluid.layers.softsign
:noindex:
brelu
-----
.. autofunction:: paddle.v2.fluid.layers.brelu
:noindex:
leaky_relu
----------
.. autofunction:: paddle.v2.fluid.layers.leaky_relu
:noindex:
soft_relu
---------
.. autofunction:: paddle.v2.fluid.layers.soft_relu
:noindex:
elu
----
.. autofunction:: paddle.v2.fluid.layers.elu
:noindex:
relu6
-----
.. autofunction:: paddle.v2.fluid.layers.relu6
:noindex:
pow
----
.. autofunction:: paddle.v2.fluid.layers.pow
:noindex:
hard_shrink
-----------
.. autofunction:: paddle.v2.fluid.layers.hard_shrink
:noindex:
thresholded_relu
----------------
.. autofunction:: paddle.v2.fluid.layers.thresholded_relu
:noindex:
hard_sigmoid
-------------
.. autofunction:: paddle.v2.fluid.layers.hard_sigmoid
:noindex:
swish
------
.. autofunction:: paddle.v2.fluid.layers.swish
:noindex:
...@@ -23,9 +23,22 @@ from memory_optimization_transpiler import memory_optimize ...@@ -23,9 +23,22 @@ from memory_optimization_transpiler import memory_optimize
Tensor = LoDTensor Tensor = LoDTensor
__all__ = framework.__all__ + executor.__all__ + [ __all__ = framework.__all__ + executor.__all__ + [
'io', 'initializer', 'layers', 'nets', 'optimizer', 'backward', 'io',
'regularizer', 'LoDTensor', 'CPUPlace', 'CUDAPlace', 'Tensor', 'ParamAttr' 'initializer',
'DataFeeder', 'clip', 'DistributeTranspiler', 'memory_optimize' 'layers',
'nets',
'optimizer',
'backward',
'regularizer',
'LoDTensor',
'CPUPlace',
'CUDAPlace',
'Tensor',
'ParamAttr'
'DataFeeder',
'clip',
'DistributeTranspiler',
'memory_optimize',
] ]
......
...@@ -3,7 +3,10 @@ from . import core ...@@ -3,7 +3,10 @@ from . import core
import collections import collections
import copy import copy
__all__ = ['append_backward', 'calc_gradient'] __all__ = [
'append_backward',
'calc_gradient',
]
def _rename_arg_(op_descs, old_name, new_name, begin_idx=None, end_idx=None): def _rename_arg_(op_descs, old_name, new_name, begin_idx=None, end_idx=None):
......
...@@ -3,7 +3,9 @@ import layers ...@@ -3,7 +3,9 @@ import layers
from . import core from . import core
__all__ = [ __all__ = [
'GradientClipByValue', 'append_gradient_clip_ops', 'error_clip_callback' 'GradientClipByValue',
'append_gradient_clip_ops',
'error_clip_callback',
] ]
......
""" """
Default scope function. Default scope function.
`Paddle` manages Scope as programming language's scope. It just a `Paddle` manages Scope as programming language's scope. It just a
thread-local stack of Scope. Top of that stack is current scope, the bottom thread-local stack of Scope. Top of that stack is current scope, the bottom
of that stack is all scopes' parent. of that stack is all scopes' parent.
Invoking `var/find_var` can `new/find` variable in current scope. Invoking `var/find_var` can `new/find` variable in current scope.
Invoking `enter_local_scope/leave_local_scope` can create or destroy local Invoking `enter_local_scope/leave_local_scope` can create or destroy local
scope. scope.
A `scoped_function` will take a `function` as input. That function will be A `scoped_function` will take a `function` as input. That function will be
invoked in a new local scope. invoked in a new local scope.
""" """
import paddle.v2.fluid.core import paddle.v2.fluid.core
...@@ -19,8 +19,12 @@ import threading ...@@ -19,8 +19,12 @@ import threading
__tl_scope__ = threading.local() __tl_scope__ = threading.local()
__all__ = [ __all__ = [
'get_cur_scope', 'enter_local_scope', 'leave_local_scope', 'var', 'get_cur_scope',
'find_var', 'scoped_function' 'enter_local_scope',
'leave_local_scope',
'var',
'find_var',
'scoped_function',
] ]
...@@ -71,7 +75,7 @@ def find_var(name): ...@@ -71,7 +75,7 @@ def find_var(name):
def scoped_function(func): def scoped_function(func):
""" """
invoke `func` in new scope. invoke `func` in new scope.
:param func: a callable function that will be run in new scope. :param func: a callable function that will be run in new scope.
:type func: callable :type func: callable
""" """
......
...@@ -4,7 +4,10 @@ import layers ...@@ -4,7 +4,10 @@ import layers
from framework import Program, unique_name, Variable, program_guard from framework import Program, unique_name, Variable, program_guard
from layer_helper import LayerHelper from layer_helper import LayerHelper
__all__ = ['Accuracy', 'ChunkEvaluator'] __all__ = [
'Accuracy',
'ChunkEvaluator',
]
def _clone_var_(block, var): def _clone_var_(block, var):
...@@ -21,19 +24,19 @@ def _clone_var_(block, var): ...@@ -21,19 +24,19 @@ def _clone_var_(block, var):
class Evaluator(object): class Evaluator(object):
""" """
Base Class for all evaluators Base Class for all evaluators
Args: Args:
name(str): The name of evaluator. such as, "accuracy". Used for generate name(str): The name of evaluator. such as, "accuracy". Used for generate
temporary variable name. temporary variable name.
main_program(Program, optional): The evaluator should be added to this main_program(Program, optional): The evaluator should be added to this
main_program. Default default_main_program() main_program. Default default_main_program()
startup_program(Program, optional):The parameter should be added to this startup_program(Program, optional):The parameter should be added to this
startup_program. Default default_startup_program() startup_program. Default default_startup_program()
Attributes: Attributes:
states(list): The list of state variables. states will be reset to zero states(list): The list of state variables. states will be reset to zero
when `reset` is invoked. when `reset` is invoked.
metrics(list): The list of metrics variables. They will be calculate metrics(list): The list of metrics variables. They will be calculate
every mini-batch every mini-batch
""" """
...@@ -66,14 +69,14 @@ class Evaluator(object): ...@@ -66,14 +69,14 @@ class Evaluator(object):
def create_state(self, suffix, dtype, shape): def create_state(self, suffix, dtype, shape):
""" """
Create state variable. Create state variable.
NOTE: It is not a public API. NOTE: It is not a public API.
Args: Args:
suffix(str): the state suffix. suffix(str): the state suffix.
dtype(str|core.DataType): the state data type dtype(str|core.DataType): the state data type
shape(tuple|list): the shape of state shape(tuple|list): the shape of state
Returns: State variable Returns: State variable
...@@ -127,8 +130,8 @@ class Accuracy(Evaluator): ...@@ -127,8 +130,8 @@ class Accuracy(Evaluator):
class ChunkEvaluator(Evaluator): class ChunkEvaluator(Evaluator):
""" """
Accumulate counter numbers output by chunk_eval from mini-batches and Accumulate counter numbers output by chunk_eval from mini-batches and
compute the precision recall and F1-score using the accumulated counter compute the precision recall and F1-score using the accumulated counter
numbers. numbers.
""" """
......
...@@ -7,9 +7,15 @@ import proto.framework_pb2 as framework_pb2 ...@@ -7,9 +7,15 @@ import proto.framework_pb2 as framework_pb2
from . import core from . import core
__all__ = [ __all__ = [
'Block', 'Variable', 'Program', 'Operator', 'default_startup_program', 'Block',
'default_main_program', 'program_guard', 'switch_startup_program', 'Variable',
'switch_main_program' 'Program',
'Operator',
'default_startup_program',
'default_main_program',
'program_guard',
'switch_startup_program',
'switch_main_program',
] ]
EMPTY_VAR_NAME = core.kEmptyVarName() EMPTY_VAR_NAME = core.kEmptyVarName()
......
import framework import framework
import numpy as np import numpy as np
__all__ = ['Constant', 'Uniform', 'Normal', 'Xavier'] __all__ = [
'Constant',
'Uniform',
'Normal',
'Xavier',
]
class Initializer(object): class Initializer(object):
......
...@@ -4,9 +4,15 @@ import cPickle as pickle ...@@ -4,9 +4,15 @@ import cPickle as pickle
from paddle.v2.fluid.framework import Program, Parameter, default_main_program, Variable from paddle.v2.fluid.framework import Program, Parameter, default_main_program, Variable
__all__ = [ __all__ = [
'save_vars', 'save_params', 'save_persistables', 'load_vars', 'load_params', 'save_vars',
'load_persistables', "save_inference_model", "load_inference_model", 'save_params',
"get_inference_program" 'save_persistables',
'load_vars',
'load_params',
'load_persistables',
'save_inference_model',
'load_inference_model',
'get_inference_program',
] ]
......
...@@ -9,12 +9,33 @@ from ..param_attr import ParamAttr ...@@ -9,12 +9,33 @@ from ..param_attr import ParamAttr
from tensor import concat from tensor import concat
__all__ = [ __all__ = [
'fc', 'embedding', 'dynamic_lstm', 'gru_unit', 'linear_chain_crf', 'fc',
'crf_decoding', 'cos_sim', 'cross_entropy', 'square_error_cost', 'accuracy', 'embedding',
'chunk_eval', 'sequence_conv', 'conv2d', 'sequence_pool', 'pool2d', 'dynamic_lstm',
'batch_norm', 'beam_search_decode', 'conv2d_transpose', 'sequence_expand', 'gru_unit',
'lstm_unit', 'reduce_sum', 'reduce_mean', 'reduce_max', 'reduce_min', 'linear_chain_crf',
'sequence_first_step', 'sequence_last_step', 'dropout' 'crf_decoding',
'cos_sim',
'cross_entropy',
'square_error_cost',
'accuracy',
'chunk_eval',
'sequence_conv',
'conv2d',
'sequence_pool',
'pool2d',
'batch_norm',
'beam_search_decode',
'conv2d_transpose',
'sequence_expand',
'lstm_unit',
'reduce_sum',
'reduce_mean',
'reduce_max',
'reduce_min',
'sequence_first_step',
'sequence_last_step',
'dropout',
] ]
...@@ -248,13 +269,13 @@ def gru_unit(input, ...@@ -248,13 +269,13 @@ def gru_unit(input,
h_t & = dot((1-u_t), m_t) + dot(u_t, h_{t-1}) h_t & = dot((1-u_t), m_t) + dot(u_t, h_{t-1})
The inputs of gru unit includes :math:`z_t`, :math:`h_{t-1}`. In terms The inputs of gru unit includes :math:`z_t`, :math:`h_{t-1}`. In terms
of the equation above, the :math:`z_t` is split into 3 parts - of the equation above, the :math:`z_t` is split into 3 parts -
:math:`xu_t`, :math:`xr_t` and :math:`xm_t`. This means that in order to :math:`xu_t`, :math:`xr_t` and :math:`xm_t`. This means that in order to
implement a full GRU unit operator for an input, a fully implement a full GRU unit operator for an input, a fully
connected layer has to be applied, such that :math:`z_t = W_{fc}x_t`. connected layer has to be applied, such that :math:`z_t = W_{fc}x_t`.
The terms :math:`u_t` and :math:`r_t` represent the update and reset gates The terms :math:`u_t` and :math:`r_t` represent the update and reset gates
of the GRU cell. Unlike LSTM, GRU has one lesser gate. However, there is of the GRU cell. Unlike LSTM, GRU has one lesser gate. However, there is
an intermediate candidate hidden output, which is denoted by :math:`m_t`. an intermediate candidate hidden output, which is denoted by :math:`m_t`.
This layer has three outputs :math:`h_t`, :math:`dot(r_t, h_{t-1})` This layer has three outputs :math:`h_t`, :math:`dot(r_t, h_{t-1})`
and concatenation of :math:`u_t`, :math:`r_t` and :math:`m_t`. and concatenation of :math:`u_t`, :math:`r_t` and :math:`m_t`.
...@@ -276,7 +297,7 @@ def gru_unit(input, ...@@ -276,7 +297,7 @@ def gru_unit(input,
.. code-block:: python .. code-block:: python
# assuming we have x_t_data and prev_hidden of size=10 # assuming we have x_t_data and prev_hidden of size=10
x_t = fluid.layers.fc(input=x_t_data, size=30) x_t = fluid.layers.fc(input=x_t_data, size=30)
hidden_val, r_h_val, gate_val = fluid.layers.gru_unit(input=x_t, hidden_val, r_h_val, gate_val = fluid.layers.gru_unit(input=x_t,
hidden = prev_hidden) hidden = prev_hidden)
......
from ..registry import register_layer from ..registry import register_layer
__activations__ = [ __activations__ = [
'abs', 'tanh', 'sigmoid', 'relu', 'sqrt', 'ceil', 'floor', 'log', 'round' 'sigmoid',
'logsigmoid',
'exp',
'relu',
'tanh',
'tanh_shrink',
'softshrink',
'sqrt',
'abs',
'ceil',
'floor',
'round',
'reciprocal',
'log',
'square',
'softplus',
'softsign',
'brelu',
'leaky_relu',
'soft_relu',
'elu',
'relu6',
'pow',
'stanh',
'hard_shrink',
'thresholded_relu',
'hard_sigmoid',
'swish',
] ]
__all__ = [ __all__ = [
......
...@@ -6,8 +6,16 @@ from ..core import DataType ...@@ -6,8 +6,16 @@ from ..core import DataType
import numpy import numpy
__all__ = [ __all__ = [
'create_tensor', 'create_parameter', 'cast', 'concat', 'sums', 'assign', 'create_tensor',
'fill_constant_batch_size_like', 'fill_constant', 'ones', 'zeros' 'create_parameter',
'cast',
'concat',
'sums',
'assign',
'fill_constant_batch_size_like',
'fill_constant',
'ones',
'zeros',
] ]
......
...@@ -121,8 +121,10 @@ class ControlFlowGraph(object): ...@@ -121,8 +121,10 @@ class ControlFlowGraph(object):
# and dtype_to_size[cache_dtype] # and dtype_to_size[cache_dtype]
if x_dtype == cache_dtype: if x_dtype == cache_dtype:
print( print(
"Hit Cache !!!! cache pool index is %d, var name is %s, cached var name is %s, var shape is %s " ("Hit Cache !!!! cache pool index "
% "is %d, var name is %s, "
"cached var name is %s, "
"var shape is %s ") %
(index, x, cache_var, str(cache_shape))) (index, x, cache_var, str(cache_shape)))
self.pool.pop(index) self.pool.pop(index)
_rename_arg_( _rename_arg_(
......
import layers import layers
__all__ = ["simple_img_conv_pool", "sequence_conv_pool"] __all__ = [
"simple_img_conv_pool",
"sequence_conv_pool",
]
def simple_img_conv_pool(input, def simple_img_conv_pool(input,
......
...@@ -8,7 +8,11 @@ import proto.framework_pb2 as framework_pb2 ...@@ -8,7 +8,11 @@ import proto.framework_pb2 as framework_pb2
from framework import OpProtoHolder, Variable, Program, Operator from framework import OpProtoHolder, Variable, Program, Operator
from paddle.v2.fluid.layer_helper import LayerHelper, unique_name from paddle.v2.fluid.layer_helper import LayerHelper, unique_name
__all__ = ['deprecated', 'register_layer', 'autodoc'] __all__ = [
'deprecated',
'register_layer',
'autodoc',
]
def _convert_(name): def _convert_(name):
...@@ -80,11 +84,10 @@ def _generate_doc_string_(op_proto): ...@@ -80,11 +84,10 @@ def _generate_doc_string_(op_proto):
def register_layer(op_type): def register_layer(op_type):
""" """Register the Python layer for an Operator.
Register an Python layer for an Operator
Args: Args:
op_type: The name of the operator to be created op_type: The name of the operator to be created.
This function takes in the operator type (sigmoid, mean , average etc) and This function takes in the operator type (sigmoid, mean , average etc) and
creates the operator functionality. creates the operator functionality.
...@@ -98,16 +101,16 @@ def register_layer(op_type): ...@@ -98,16 +101,16 @@ def register_layer(op_type):
if len(not_intermediate_outputs) != 1: if len(not_intermediate_outputs) != 1:
raise ValueError("Only one non intermediate output operator can be", raise ValueError("Only one non intermediate output operator can be",
"automatically generated") "automatically generated.")
if not_intermediate_outputs[0].duplicable: if not_intermediate_outputs[0].duplicable:
raise ValueError( raise ValueError(
"Only non duplicable op can be automatically generated") "Only non duplicable op can be automatically generated.")
for output in intermediate_outputs: for output in intermediate_outputs:
if output.duplicable: if output.duplicable:
raise ValueError("The op can be automatically generated only when ", raise ValueError("The op can be automatically generated only when ",
"all intermediate ops are not duplicable") "all intermediate ops are not duplicable.")
o_name = not_intermediate_outputs[0].name o_name = not_intermediate_outputs[0].name
intermediate_output_names = [output.name for output in intermediate_outputs] intermediate_output_names = [output.name for output in intermediate_outputs]
......
import framework import framework
__all__ = ['append_regularization_ops', 'L1Decay', 'L2Decay'] __all__ = [
'append_regularization_ops',
'L1Decay',
'L2Decay',
]
def append_regularization_ops(parameters_and_grads, regularization=None): def append_regularization_ops(parameters_and_grads, regularization=None):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册