提交 8ac744f3 编写于 作者: Y ying

add wrapper for elementwise math operator.

上级 5dbd5370
...@@ -358,3 +358,132 @@ reduce_min ...@@ -358,3 +358,132 @@ reduce_min
.. autofunction:: paddle.v2.fluid.layers.reduce_min .. autofunction:: paddle.v2.fluid.layers.reduce_min
:noindex: :noindex:
logsigmoid
----------
.. autofunction:: paddle.v2.fluid.layers.logsigmoid
:noindex:
exp
---
.. autofunction:: paddle.v2.fluid.layers.exp
:noindex:
relu
----
.. autofunction:: paddle.v2.fluid.layers.relu
:noindex:
tanh
----
.. autofunction:: paddle.v2.fluid.layers.tanh
:noindex:
tanh_shrink
-----------
.. autofunction:: paddle.v2.fluid.layers.tanh_shrink
:noindex:
softshrink
----------
.. autofunction:: paddle.v2.fluid.layers.softshrink
:noindex:
sqrt
----
.. autofunction:: paddle.v2.fluid.layers.sqrt
:noindex:
abs
----
.. autofunction:: paddle.v2.fluid.layers.abs
:noindex:
ceil
----
.. autofunction:: paddle.v2.fluid.layers.ceil
:noindex:
floor
-----
.. autofunction:: paddle.v2.fluid.layers.floor
:noindex:
round
-----
.. autofunction:: paddle.v2.fluid.layers.round
:noindex:
reciprocal
----------
.. autofunction:: paddle.v2.fluid.layers.reciprocal
:noindex:
log
---
.. autofunction:: paddle.v2.fluid.layers.log
:noindex:
square
------
.. autofunction:: paddle.v2.fluid.layers.square
:noindex:
softplus
--------
.. autofunction:: paddle.v2.fluid.layers.softplus
:noindex:
softsign
---------
.. autofunction:: paddle.v2.fluid.layers.softsign
:noindex:
brelu
-----
.. autofunction:: paddle.v2.fluid.layers.brelu
:noindex:
leaky_relu
----------
.. autofunction:: paddle.v2.fluid.layers.leaky_relu
:noindex:
soft_relu
---------
.. autofunction:: paddle.v2.fluid.layers.soft_relu
:noindex:
elu
----
.. autofunction:: paddle.v2.fluid.layers.elu
:noindex:
relu6
-----
.. autofunction:: paddle.v2.fluid.layers.relu6
:noindex:
pow
----
.. autofunction:: paddle.v2.fluid.layers.pow
:noindex:
hard_shrink
-----------
.. autofunction:: paddle.v2.fluid.layers.hard_shrink
:noindex:
thresholded_relu
----------------
.. autofunction:: paddle.v2.fluid.layers.thresholded_relu
:noindex:
hard_sigmoid
-------------
.. autofunction:: paddle.v2.fluid.layers.hard_sigmoid
:noindex:
swish
------
.. autofunction:: paddle.v2.fluid.layers.swish
:noindex:
...@@ -23,9 +23,22 @@ from memory_optimization_transpiler import memory_optimize ...@@ -23,9 +23,22 @@ from memory_optimization_transpiler import memory_optimize
Tensor = LoDTensor Tensor = LoDTensor
__all__ = framework.__all__ + executor.__all__ + [ __all__ = framework.__all__ + executor.__all__ + [
'io', 'initializer', 'layers', 'nets', 'optimizer', 'backward', 'io',
'regularizer', 'LoDTensor', 'CPUPlace', 'CUDAPlace', 'Tensor', 'ParamAttr' 'initializer',
'DataFeeder', 'clip', 'DistributeTranspiler', 'memory_optimize' 'layers',
'nets',
'optimizer',
'backward',
'regularizer',
'LoDTensor',
'CPUPlace',
'CUDAPlace',
'Tensor',
'ParamAttr'
'DataFeeder',
'clip',
'DistributeTranspiler',
'memory_optimize',
] ]
......
...@@ -3,7 +3,10 @@ from . import core ...@@ -3,7 +3,10 @@ from . import core
import collections import collections
import copy import copy
__all__ = ['append_backward', 'calc_gradient'] __all__ = [
'append_backward',
'calc_gradient',
]
def _rename_arg_(op_descs, old_name, new_name, begin_idx=None, end_idx=None): def _rename_arg_(op_descs, old_name, new_name, begin_idx=None, end_idx=None):
......
...@@ -3,7 +3,9 @@ import layers ...@@ -3,7 +3,9 @@ import layers
from . import core from . import core
__all__ = [ __all__ = [
'GradientClipByValue', 'append_gradient_clip_ops', 'error_clip_callback' 'GradientClipByValue',
'append_gradient_clip_ops',
'error_clip_callback',
] ]
......
...@@ -19,8 +19,12 @@ import threading ...@@ -19,8 +19,12 @@ import threading
__tl_scope__ = threading.local() __tl_scope__ = threading.local()
__all__ = [ __all__ = [
'get_cur_scope', 'enter_local_scope', 'leave_local_scope', 'var', 'get_cur_scope',
'find_var', 'scoped_function' 'enter_local_scope',
'leave_local_scope',
'var',
'find_var',
'scoped_function',
] ]
......
...@@ -4,7 +4,10 @@ import layers ...@@ -4,7 +4,10 @@ import layers
from framework import Program, unique_name, Variable, program_guard from framework import Program, unique_name, Variable, program_guard
from layer_helper import LayerHelper from layer_helper import LayerHelper
__all__ = ['Accuracy', 'ChunkEvaluator'] __all__ = [
'Accuracy',
'ChunkEvaluator',
]
def _clone_var_(block, var): def _clone_var_(block, var):
......
...@@ -7,9 +7,15 @@ import proto.framework_pb2 as framework_pb2 ...@@ -7,9 +7,15 @@ import proto.framework_pb2 as framework_pb2
from . import core from . import core
__all__ = [ __all__ = [
'Block', 'Variable', 'Program', 'Operator', 'default_startup_program', 'Block',
'default_main_program', 'program_guard', 'switch_startup_program', 'Variable',
'switch_main_program' 'Program',
'Operator',
'default_startup_program',
'default_main_program',
'program_guard',
'switch_startup_program',
'switch_main_program',
] ]
EMPTY_VAR_NAME = core.kEmptyVarName() EMPTY_VAR_NAME = core.kEmptyVarName()
......
import framework import framework
import numpy as np import numpy as np
__all__ = ['Constant', 'Uniform', 'Normal', 'Xavier'] __all__ = [
'Constant',
'Uniform',
'Normal',
'Xavier',
]
class Initializer(object): class Initializer(object):
......
...@@ -4,9 +4,15 @@ import cPickle as pickle ...@@ -4,9 +4,15 @@ import cPickle as pickle
from paddle.v2.fluid.framework import Program, Parameter, default_main_program, Variable from paddle.v2.fluid.framework import Program, Parameter, default_main_program, Variable
__all__ = [ __all__ = [
'save_vars', 'save_params', 'save_persistables', 'load_vars', 'load_params', 'save_vars',
'load_persistables', "save_inference_model", "load_inference_model", 'save_params',
"get_inference_program" 'save_persistables',
'load_vars',
'load_params',
'load_persistables',
'save_inference_model',
'load_inference_model',
'get_inference_program',
] ]
......
...@@ -9,12 +9,33 @@ from ..param_attr import ParamAttr ...@@ -9,12 +9,33 @@ from ..param_attr import ParamAttr
from tensor import concat from tensor import concat
__all__ = [ __all__ = [
'fc', 'embedding', 'dynamic_lstm', 'gru_unit', 'linear_chain_crf', 'fc',
'crf_decoding', 'cos_sim', 'cross_entropy', 'square_error_cost', 'accuracy', 'embedding',
'chunk_eval', 'sequence_conv', 'conv2d', 'sequence_pool', 'pool2d', 'dynamic_lstm',
'batch_norm', 'beam_search_decode', 'conv2d_transpose', 'sequence_expand', 'gru_unit',
'lstm_unit', 'reduce_sum', 'reduce_mean', 'reduce_max', 'reduce_min', 'linear_chain_crf',
'sequence_first_step', 'sequence_last_step', 'dropout' 'crf_decoding',
'cos_sim',
'cross_entropy',
'square_error_cost',
'accuracy',
'chunk_eval',
'sequence_conv',
'conv2d',
'sequence_pool',
'pool2d',
'batch_norm',
'beam_search_decode',
'conv2d_transpose',
'sequence_expand',
'lstm_unit',
'reduce_sum',
'reduce_mean',
'reduce_max',
'reduce_min',
'sequence_first_step',
'sequence_last_step',
'dropout',
] ]
......
from ..registry import register_layer from ..registry import register_layer
__activations__ = [ __activations__ = [
'abs', 'tanh', 'sigmoid', 'relu', 'sqrt', 'ceil', 'floor', 'log', 'round' 'sigmoid',
'logsigmoid',
'exp',
'relu',
'tanh',
'tanh_shrink',
'softshrink',
'sqrt',
'abs',
'ceil',
'floor',
'round',
'reciprocal',
'log',
'square',
'softplus',
'softsign',
'brelu',
'leaky_relu',
'soft_relu',
'elu',
'relu6',
'pow',
'stanh',
'hard_shrink',
'thresholded_relu',
'hard_sigmoid',
'swish',
] ]
__all__ = [ __all__ = [
......
...@@ -2,8 +2,16 @@ from ..layer_helper import LayerHelper ...@@ -2,8 +2,16 @@ from ..layer_helper import LayerHelper
from ..param_attr import ParamAttr from ..param_attr import ParamAttr
__all__ = [ __all__ = [
'create_tensor', 'create_parameter', 'cast', 'concat', 'sums', 'assign', 'create_tensor',
'fill_constant_batch_size_like', 'fill_constant', 'ones', 'zeros' 'create_parameter',
'cast',
'concat',
'sums',
'assign',
'fill_constant_batch_size_like',
'fill_constant',
'ones',
'zeros',
] ]
......
...@@ -121,8 +121,10 @@ class ControlFlowGraph(object): ...@@ -121,8 +121,10 @@ class ControlFlowGraph(object):
# and dtype_to_size[cache_dtype] # and dtype_to_size[cache_dtype]
if x_dtype == cache_dtype: if x_dtype == cache_dtype:
print( print(
"Hit Cache !!!! cache pool index is %d, var name is %s, cached var name is %s, var shape is %s " ("Hit Cache !!!! cache pool index "
% "is %d, var name is %s, "
"cached var name is %s, "
"var shape is %s ") %
(index, x, cache_var, str(cache_shape))) (index, x, cache_var, str(cache_shape)))
self.pool.pop(index) self.pool.pop(index)
_rename_arg_( _rename_arg_(
......
import layers import layers
__all__ = ["simple_img_conv_pool", "sequence_conv_pool"] __all__ = [
"simple_img_conv_pool",
"sequence_conv_pool",
]
def simple_img_conv_pool(input, def simple_img_conv_pool(input,
......
...@@ -8,7 +8,11 @@ import proto.framework_pb2 as framework_pb2 ...@@ -8,7 +8,11 @@ import proto.framework_pb2 as framework_pb2
from framework import OpProtoHolder, Variable, Program, Operator from framework import OpProtoHolder, Variable, Program, Operator
from paddle.v2.fluid.layer_helper import LayerHelper, unique_name from paddle.v2.fluid.layer_helper import LayerHelper, unique_name
__all__ = ['deprecated', 'register_layer', 'autodoc'] __all__ = [
'deprecated',
'register_layer',
'autodoc',
]
def _convert_(name): def _convert_(name):
...@@ -80,11 +84,10 @@ def _generate_doc_string_(op_proto): ...@@ -80,11 +84,10 @@ def _generate_doc_string_(op_proto):
def register_layer(op_type): def register_layer(op_type):
""" """Register the Python layer for an Operator.
Register an Python layer for an Operator
Args: Args:
op_type: The name of the operator to be created op_type: The name of the operator to be created.
This function takes in the operator type (sigmoid, mean , average etc) and This function takes in the operator type (sigmoid, mean , average etc) and
creates the operator functionality. creates the operator functionality.
...@@ -98,16 +101,16 @@ def register_layer(op_type): ...@@ -98,16 +101,16 @@ def register_layer(op_type):
if len(not_intermediate_outputs) != 1: if len(not_intermediate_outputs) != 1:
raise ValueError("Only one non intermediate output operator can be", raise ValueError("Only one non intermediate output operator can be",
"automatically generated") "automatically generated.")
if not_intermediate_outputs[0].duplicable: if not_intermediate_outputs[0].duplicable:
raise ValueError( raise ValueError(
"Only non duplicable op can be automatically generated") "Only non duplicable op can be automatically generated.")
for output in intermediate_outputs: for output in intermediate_outputs:
if output.duplicable: if output.duplicable:
raise ValueError("The op can be automatically generated only when ", raise ValueError("The op can be automatically generated only when ",
"all intermediate ops are not duplicable") "all intermediate ops are not duplicable.")
o_name = not_intermediate_outputs[0].name o_name = not_intermediate_outputs[0].name
intermediate_output_names = [output.name for output in intermediate_outputs] intermediate_output_names = [output.name for output in intermediate_outputs]
......
import framework import framework
__all__ = ['append_regularization_ops', 'L1Decay', 'L2Decay'] __all__ = [
'append_regularization_ops',
'L1Decay',
'L2Decay',
]
def append_regularization_ops(parameters_and_grads, regularization=None): def append_regularization_ops(parameters_and_grads, regularization=None):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册