diff --git a/python/paddle/fluid/executor.py b/python/paddle/fluid/executor.py index e314f6966a564fc42d4603b20fe7b1963f9d4d57..7d067b6347844e7a8c87f83304b311672c61a237 100644 --- a/python/paddle/fluid/executor.py +++ b/python/paddle/fluid/executor.py @@ -96,6 +96,7 @@ def scope_guard(scope): import paddle import numpy + paddle.enable_static() new_scope = paddle.static.Scope() with paddle.static.scope_guard(new_scope): diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index e1a7bddbfae439da5d79dffc66b942c679a25b7d..2c84aa482d787b3212c91b1bc56e2e839c9ba394 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -13511,14 +13511,14 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None): :api_attr: Static Graph This OP is used to register customized Python OP to Paddle. The design - principe of py_func is that LodTensor and numpy array can be converted to each + principe of py_func is that Tensor and numpy array can be converted to each other easily. So you can use Python and numpy API to register a python OP. The forward function of the registered OP is ``func`` and the backward function of that is ``backward_func``. Paddle will call ``func`` at forward runtime and call ``backward_func`` at backward runtime(if ``backward_func`` is not None). - ``x`` is the input of ``func``, whose type must be LoDTensor; ``out`` is - the output of ``func``, whose type can be either LoDTensor or numpy array. + ``x`` is the input of ``func``, whose type must be Tensor; ``out`` is + the output of ``func``, whose type can be either Tensor or numpy array. The input of the backward function ``backward_func`` is ``x``, ``out`` and the gradient of ``out``. If some variables of ``out`` have no gradient, the @@ -13536,14 +13536,14 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None): func (callable): The forward function of the registered OP. When the network is running, the forward output ``out`` will be calculated according to this function and the forward input ``x``. In ``func`` , it's suggested that we - actively convert LoDTensor into a numpy array, so that we can use Python and + actively convert Tensor into a numpy array, so that we can use Python and numpy API arbitrarily. If not, some operations of numpy may not be compatible. x (Variable|tuple(Variale)|list[Variale]): The input of the forward function ``func``. - It can be Variable|tuple(Variale)|list[Variale], where Variable is LoDTensor or + It can be Variable|tuple(Variale)|list[Variale], where Variable is Tensor or Tenosor. In addition, Multiple Variable should be passed in the form of tuple(Variale) or list[Variale]. out (Variable|tuple(Variale)|list[Variale]): The output of the forward function ``func``, - it can be Variable|tuple(Variale)|list[Variale], where Variable can be either LoDTensor + it can be Variable|tuple(Variale)|list[Variale], where Variable can be either Tensor or numpy array. Since Paddle cannot automatically infer the shape and type of ``out``, you must create ``out`` in advance. backward_func (callable, optional): The backward function of the registered OP. @@ -13567,13 +13567,15 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None): import paddle import six - # Creates a forward function, LodTensor can be input directly without + paddle.enable_static() + + # Creates a forward function, Tensor can be input directly without # being converted into numpy array. def tanh(x): return np.tanh(x) # Skip x in backward function and return the gradient of x - # LodTensor must be actively converted to numpy array, otherwise, + # Tensor must be actively converted to numpy array, otherwise, # operations such as +/- can't be used. def tanh_grad(y, dy): return np.array(dy) * (1 - np.square(np.array(y))) @@ -13598,7 +13600,7 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None): out=new_hidden, backward_func=tanh_grad, skip_vars_in_backward_input=hidden) - # User-defined debug functions that print out the input LodTensor + # User-defined debug functions that print out the input Tensor paddle.static.nn.py_func(func=debug_func, x=hidden, out=None) prediction = paddle.static.nn.fc(hidden, size=10, act='softmax') @@ -13606,7 +13608,7 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None): return paddle.mean(loss) # example 2: - # This example shows how to turn LoDTensor into numpy array and + # This example shows how to turn Tensor into numpy array and # use numpy API to register an Python OP import paddle import numpy as np @@ -13614,7 +13616,7 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None): paddle.enable_static() def element_wise_add(x, y): - # LodTensor must be actively converted to numpy array, otherwise, + # Tensor must be actively converted to numpy array, otherwise, # numpy.shape can't be used. x = np.array(x) y = np.array(y) diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index 58d015c89dd7721682384d2a9290aed1e16d15e6..c633f7022d75e1c352f7ae1f51dc324064359e31 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -104,6 +104,7 @@ def create_parameter(shape, .. code-block:: python import paddle + paddle.enable_static() W = paddle.static.create_parameter(shape=[784, 200], dtype='float32') """ check_type(shape, 'shape', (list, tuple, numpy.ndarray), 'create_parameter') @@ -161,6 +162,7 @@ def create_global_var(shape, .. code-block:: python import paddle + paddle.enable_static() var = paddle.static.create_global_var(shape=[2,3], value=1.0, dtype='float32', persistable=True, force_cpu=True, name='new_var') """ diff --git a/python/paddle/fluid/param_attr.py b/python/paddle/fluid/param_attr.py index 17a09fb6c0c3b29856e1b12c5e37bbedfbacfac0..3cdc5913946cd0b6cc2ef95ce0ff6fed74490e6a 100644 --- a/python/paddle/fluid/param_attr.py +++ b/python/paddle/fluid/param_attr.py @@ -62,13 +62,13 @@ class ParamAttr(object): .. code-block:: python import paddle + paddle.enable_static() w_param_attrs = paddle.ParamAttr(name="fc_weight", learning_rate=0.5, regularizer=paddle.regularizer.L2Decay(1.0), trainable=True) print(w_param_attrs.name) # "fc_weight" - paddle.enable_static() x = paddle.static.data(name='X', shape=[None, 1], dtype='float32') y_predict = paddle.static.nn.fc(input=x, size=10, param_attr=w_param_attrs) """