提交 2d669443 编写于 作者: J jiweibo

update 2.0 api.

上级 d014e29f
......@@ -94,12 +94,12 @@ def scope_guard(scope):
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
import numpy
new_scope = fluid.Scope()
with fluid.scope_guard(new_scope):
fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())
new_scope = paddle.static.Scope()
with paddle.static.scope_guard(new_scope):
paddle.static.global_scope().var("data").get_tensor().set(paddle.ones((2, 2)), paddle.CPUPlace())
numpy.array(new_scope.find_var("data").get_tensor())
"""
......
......@@ -13510,7 +13510,7 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None):
"""
:api_attr: Static Graph
This OP is used to register customized Python OP to Paddle Fluid. The design
This OP is used to register customized Python OP to Paddle. The design
principe of py_func is that LodTensor and numpy array can be converted to each
other easily. So you can use Python and numpy API to register a python OP.
......@@ -13564,7 +13564,7 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None):
.. code-block:: python
# example 1:
import paddle.fluid as fluid
import paddle
import six
# Creates a forward function, LodTensor can be input directly without
......@@ -13583,34 +13583,36 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None):
print(x)
def create_tmp_var(name, dtype, shape):
return fluid.default_main_program().current_block().create_var(
return paddle.static.default_main_program().current_block().create_var(
name=name, dtype=dtype, shape=shape)
def simple_net(img, label):
hidden = img
for idx in six.moves.range(4):
hidden = fluid.layers.fc(hidden, size=200)
hidden = paddle.static.nn.fc(hidden, size=200)
new_hidden = create_tmp_var(name='hidden_{}'.format(idx),
dtype=hidden.dtype, shape=hidden.shape)
# User-defined forward and backward
hidden = fluid.layers.py_func(func=tanh, x=hidden,
hidden = paddle.static.nn.py_func(func=tanh, x=hidden,
out=new_hidden, backward_func=tanh_grad,
skip_vars_in_backward_input=hidden)
# User-defined debug functions that print out the input LodTensor
fluid.layers.py_func(func=debug_func, x=hidden, out=None)
paddle.static.nn.py_func(func=debug_func, x=hidden, out=None)
prediction = fluid.layers.fc(hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
return fluid.layers.mean(loss)
prediction = paddle.static.nn.fc(hidden, size=10, act='softmax')
loss = paddle.static.nn.cross_entropy(input=prediction, label=label)
return paddle.mean(loss)
# example 2:
# This example shows how to turn LoDTensor into numpy array and
# use numpy API to register an Python OP
import paddle.fluid as fluid
import paddle
import numpy as np
paddle.enable_static()
def element_wise_add(x, y):
# LodTensor must be actively converted to numpy array, otherwise,
# numpy.shape can't be used.
......@@ -13628,24 +13630,24 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None):
return result
def create_tmp_var(name, dtype, shape):
return fluid.default_main_program().current_block().create_var(
return paddle.static.default_main_program().current_block().create_var(
name=name, dtype=dtype, shape=shape)
def py_func_demo():
start_program = fluid.default_startup_program()
main_program = fluid.default_main_program()
start_program = paddle.static.default_startup_program()
main_program = paddle.static.default_main_program()
# Input of the forward function
x = fluid.data(name='x', shape=[2,3], dtype='int32')
y = fluid.data(name='y', shape=[2,3], dtype='int32')
x = paddle.data(name='x', shape=[2,3], dtype='int32')
y = paddle.data(name='y', shape=[2,3], dtype='int32')
# Output of the forward function, name/dtype/shape must be specified
output = create_tmp_var('output','int32', [3,1])
# Multiple Variable should be passed in the form of tuple(Variale) or list[Variale]
fluid.layers.py_func(func=element_wise_add, x=[x,y], out=output)
paddle.static.nn.py_func(func=element_wise_add, x=[x,y], out=output)
exe=fluid.Executor(fluid.CPUPlace())
exe=paddle.static.Executor(paddle.CPUPlace())
exe.run(start_program)
# Feed numpy array to main_program
......
......@@ -103,9 +103,8 @@ def create_parameter(shape,
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
W = layers.create_parameter(shape=[784, 200], dtype='float32')
import paddle
W = paddle.static.create_parameter(shape=[784, 200], dtype='float32')
"""
check_type(shape, 'shape', (list, tuple, numpy.ndarray), 'create_parameter')
for item in shape:
......@@ -161,9 +160,8 @@ def create_global_var(shape,
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
var = layers.create_global_var(shape=[2,3], value=1.0, dtype='float32',
import paddle
var = paddle.static.create_global_var(shape=[2,3], value=1.0, dtype='float32',
persistable=True, force_cpu=True, name='new_var')
"""
check_type(shape, 'shape', (list, tuple, numpy.ndarray),
......
......@@ -61,15 +61,16 @@ class ParamAttr(object):
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
w_param_attrs = fluid.ParamAttr(name="fc_weight",
learning_rate=0.5,
regularizer=fluid.regularizer.L2Decay(1.0),
trainable=True)
w_param_attrs = paddle.ParamAttr(name="fc_weight",
learning_rate=0.5,
regularizer=paddle.regularizer.L2Decay(1.0),
trainable=True)
print(w_param_attrs.name) # "fc_weight"
x = fluid.data(name='X', shape=[None, 1], dtype='float32')
y_predict = fluid.layers.fc(input=x, size=10, param_attr=w_param_attrs)
paddle.enable_static()
x = paddle.data(name='X', shape=[None, 1], dtype='float32')
y_predict = paddle.static.nn.fc(input=x, size=10, param_attr=w_param_attrs)
"""
def __init__(self,
......@@ -202,7 +203,7 @@ class ParamAttr(object):
class WeightNormParamAttr(ParamAttr):
"""
:api_attr: Static Graph
:api_attr: Static Graph
Note:
Please use 'paddle.nn.utils.weight_norm' in dygraph mode.
......
......@@ -23,6 +23,7 @@ __all__ = [
]
from . import nn
from .fluid import Scope #DEFINE_ALIAS
from .input import data #DEFINE_ALIAS
from .input import InputSpec #DEFINE_ALIAS
from ..fluid.executor import Executor #DEFINE_ALIAS
......@@ -48,3 +49,5 @@ from ..fluid.io import save_inference_model #DEFINE_ALIAS
from ..fluid.io import load_inference_model #DEFINE_ALIAS
from ..fluid.io import load_program_state #DEFINE_ALIAS
from ..fluid.io import set_program_state #DEFINE_ALIAS
from ..fluid.layers import create_parameter #DEFINE_ALIAS
from ..fluid.layers import create_global_var #DEFINE_ALIAS
......@@ -32,6 +32,7 @@ __all__ = [
'multi_box_head',
'nce',
'prelu',
'py_func',
'row_conv',
'spectral_norm',
]
......@@ -54,6 +55,7 @@ from ...fluid.layers import layer_norm #DEFINE_ALIAS
from ...fluid.layers import multi_box_head #DEFINE_ALIAS
from ...fluid.layers import nce #DEFINE_ALIAS
from ...fluid.layers import prelu #DEFINE_ALIAS
from ...fluid.layers import py_func #DEFINE_ALIAS
from ...fluid.layers import row_conv #DEFINE_ALIAS
from ...fluid.layers import spectral_norm #DEFINE_ALIAS
......
......@@ -279,7 +279,6 @@
"thresholded_relu",
"group_norm",
"random_crop",
"py_func",
"row_conv",
"hard_shrink",
"ssd_loss",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册