未验证 提交 d05058d2 编写于 作者: C chentianyu03 提交者: GitHub

Remove and reorganize the alias of APIs (#27717)

* modify cond while_loop to paddle.static.nn.cond

* modify crop_tensor to paddle.crop

* modify Variable to paddle.static.Variable

* remove nn.beam_search, nn.beam_search_decode, nn.gather_tree

* remove bpr_loss, center_loss, rank_loss, smooth_l1, teacher_student_sigmoid_loss, edit_distance, sampled_softmax_with_cross_entropy in nn.functional

* remove apis in nn.functional.learn_rate.py

* remove pool2d, pool3d, adaptive_pool2d, adaptive_pool3d in nn.functional

* remove apis in nn.functional.vision

* remove erf, soft_relu in nn.functional.activation

* remove apis in nn.functional.extension

* remove nn.functional.rnn

* remove hash from nn.functional.lod

* remove row_conv from nn.functional.extension

* remove one_hot, pad2d, pad_constant_like from nn.functional.common

* remove nn.gather_tree, nn.BilinearTensorProduct, nn.Pool2D, nn.Pad2D

* remove apis from optimizer.__init

* remove tensor.creation.fill_constant

* remove elementwise_mul in nn.functional.common and  modify to paddle.multiply

* remove  tensor.stat.reduce_mean

* remove reduce_all, reduce_any in tensor.logic

* remove apis in tensor.math

* remove apis in tensor.__init__

* remove has_inf, has_nan in tensor.search

* remove apis in framework.__init__

* remove apis in paddle.__init__

* remove apis in nn.functional.__init__

* modify removed alias apis to raw api in doc and unittests

* fix remove grid_sample bug

* modify removed alias apis to raw api in doc and unittests

* modify removed alias apis to raw api in doc and unittests

* modify removed alias apis to raw api in doc and unittests

* modify removed alias apis to raw api in doc and unittests

* modify removed alias apis to raw api in doc and unittests

* modify removed alias apis to raw api in doc and unittests

* delete alias api relastions in doc

* reserve paddle.compat, paddle.sysconfig

* remove unittest for paddle.reduce_all, paddle.reduce_any

* modify removed alias apis to raw api in doc and unittests

* recover paddle.save and paddle.load

* resolve conflicts

* fix sample code missing paddle.enable_static() bug

* fix sample code missing paddle.enable_static() bug

* fix to_string sample code error
上级 6e5034e2
...@@ -712,7 +712,7 @@ void BindImperative(py::module *m_ptr) { ...@@ -712,7 +712,7 @@ void BindImperative(py::module *m_ptr) {
tmp.stop_gradient=False tmp.stop_gradient=False
inputs.append(tmp) inputs.append(tmp)
ret = paddle.sums(inputs2) ret = paddle.sums(inputs2)
loss = paddle.reduce_sum(ret) loss = paddle.fluid.layers.reduce_sum(ret)
loss.backward() loss.backward()
print("Before clear_gradient {}".format(loss.grad)) print("Before clear_gradient {}".format(loss.grad))
loss.clear_gradient() loss.clear_gradient()
......
...@@ -59,10 +59,9 @@ from .tensor.random import bernoulli ...@@ -59,10 +59,9 @@ from .tensor.random import bernoulli
from .tensor.attribute import rank #DEFINE_ALIAS from .tensor.attribute import rank #DEFINE_ALIAS
from .tensor.attribute import shape #DEFINE_ALIAS from .tensor.attribute import shape #DEFINE_ALIAS
from .tensor.creation import to_tensor #DEFINE_ALIAS from .tensor.creation import to_tensor #DEFINE_ALIAS
from .tensor.creation import crop_tensor #DEFINE_ALIAS
from .tensor.creation import diag #DEFINE_ALIAS from .tensor.creation import diag #DEFINE_ALIAS
from .tensor.creation import eye #DEFINE_ALIAS from .tensor.creation import eye #DEFINE_ALIAS
from .tensor.creation import fill_constant #DEFINE_ALIAS # from .tensor.creation import fill_constant #DEFINE_ALIAS
# from .tensor.creation import get_tensor_from_selected_rows #DEFINE_ALIAS # from .tensor.creation import get_tensor_from_selected_rows #DEFINE_ALIAS
from .tensor.creation import linspace #DEFINE_ALIAS from .tensor.creation import linspace #DEFINE_ALIAS
from .tensor.creation import ones #DEFINE_ALIAS from .tensor.creation import ones #DEFINE_ALIAS
...@@ -103,8 +102,8 @@ from .tensor.logic import logical_not #DEFINE_ALIAS ...@@ -103,8 +102,8 @@ from .tensor.logic import logical_not #DEFINE_ALIAS
from .tensor.logic import logical_or #DEFINE_ALIAS from .tensor.logic import logical_or #DEFINE_ALIAS
from .tensor.logic import logical_xor #DEFINE_ALIAS from .tensor.logic import logical_xor #DEFINE_ALIAS
from .tensor.logic import not_equal #DEFINE_ALIAS from .tensor.logic import not_equal #DEFINE_ALIAS
from .tensor.logic import reduce_all #DEFINE_ALIAS # from .tensor.logic import reduce_all #DEFINE_ALIAS
from .tensor.logic import reduce_any #DEFINE_ALIAS # from .tensor.logic import reduce_any #DEFINE_ALIAS
from .tensor.logic import allclose #DEFINE_ALIAS from .tensor.logic import allclose #DEFINE_ALIAS
from .tensor.logic import equal_all #DEFINE_ALIAS from .tensor.logic import equal_all #DEFINE_ALIAS
# from .tensor.logic import isnan #DEFINE_ALIAS # from .tensor.logic import isnan #DEFINE_ALIAS
...@@ -144,12 +143,12 @@ from .tensor.math import ceil #DEFINE_ALIAS ...@@ -144,12 +143,12 @@ from .tensor.math import ceil #DEFINE_ALIAS
from .tensor.math import cos #DEFINE_ALIAS from .tensor.math import cos #DEFINE_ALIAS
from .tensor.math import cosh #DEFINE_ALIAS from .tensor.math import cosh #DEFINE_ALIAS
from .tensor.math import cumsum #DEFINE_ALIAS from .tensor.math import cumsum #DEFINE_ALIAS
from .tensor.math import elementwise_add #DEFINE_ALIAS # from .tensor.math import elementwise_add #DEFINE_ALIAS
from .tensor.math import elementwise_div #DEFINE_ALIAS # from .tensor.math import elementwise_div #DEFINE_ALIAS
from .tensor.math import elementwise_floordiv #DEFINE_ALIAS # from .tensor.math import elementwise_floordiv #DEFINE_ALIAS
from .tensor.math import elementwise_mod #DEFINE_ALIAS # from .tensor.math import elementwise_mod #DEFINE_ALIAS
from .tensor.math import elementwise_pow #DEFINE_ALIAS # from .tensor.math import elementwise_pow #DEFINE_ALIAS
from .tensor.math import elementwise_sub #DEFINE_ALIAS # from .tensor.math import elementwise_sub #DEFINE_ALIAS
from .tensor.math import exp #DEFINE_ALIAS from .tensor.math import exp #DEFINE_ALIAS
from .tensor.math import floor #DEFINE_ALIAS from .tensor.math import floor #DEFINE_ALIAS
from .tensor.math import increment #DEFINE_ALIAS from .tensor.math import increment #DEFINE_ALIAS
...@@ -157,10 +156,10 @@ from .tensor.math import log #DEFINE_ALIAS ...@@ -157,10 +156,10 @@ from .tensor.math import log #DEFINE_ALIAS
from .tensor.math import multiplex #DEFINE_ALIAS from .tensor.math import multiplex #DEFINE_ALIAS
from .tensor.math import pow #DEFINE_ALIAS from .tensor.math import pow #DEFINE_ALIAS
from .tensor.math import reciprocal #DEFINE_ALIAS from .tensor.math import reciprocal #DEFINE_ALIAS
from .tensor.math import reduce_max #DEFINE_ALIAS # from .tensor.math import reduce_max #DEFINE_ALIAS
from .tensor.math import reduce_min #DEFINE_ALIAS # from .tensor.math import reduce_min #DEFINE_ALIAS
from .tensor.math import reduce_prod #DEFINE_ALIAS # from .tensor.math import reduce_prod #DEFINE_ALIAS
from .tensor.math import reduce_sum #DEFINE_ALIAS # from .tensor.math import reduce_sum #DEFINE_ALIAS
from .tensor.math import round #DEFINE_ALIAS from .tensor.math import round #DEFINE_ALIAS
from .tensor.math import rsqrt #DEFINE_ALIAS from .tensor.math import rsqrt #DEFINE_ALIAS
from .tensor.math import scale #DEFINE_ALIAS from .tensor.math import scale #DEFINE_ALIAS
...@@ -190,7 +189,7 @@ from .tensor.math import logsumexp #DEFINE_ALIAS ...@@ -190,7 +189,7 @@ from .tensor.math import logsumexp #DEFINE_ALIAS
from .tensor.math import inverse #DEFINE_ALIAS from .tensor.math import inverse #DEFINE_ALIAS
from .tensor.math import log1p #DEFINE_ALIAS from .tensor.math import log1p #DEFINE_ALIAS
from .tensor.math import erf #DEFINE_ALIAS from .tensor.math import erf #DEFINE_ALIAS
from .tensor.math import addcmul #DEFINE_ALIAS # from .tensor.math import addcmul #DEFINE_ALIAS
from .tensor.math import addmm #DEFINE_ALIAS from .tensor.math import addmm #DEFINE_ALIAS
from .tensor.math import clip #DEFINE_ALIAS from .tensor.math import clip #DEFINE_ALIAS
from .tensor.math import trace #DEFINE_ALIAS from .tensor.math import trace #DEFINE_ALIAS
...@@ -210,8 +209,8 @@ from .tensor.random import randperm #DEFINE_ALIAS ...@@ -210,8 +209,8 @@ from .tensor.random import randperm #DEFINE_ALIAS
from .tensor.search import argmax #DEFINE_ALIAS from .tensor.search import argmax #DEFINE_ALIAS
from .tensor.search import argmin #DEFINE_ALIAS from .tensor.search import argmin #DEFINE_ALIAS
from .tensor.search import argsort #DEFINE_ALIAS from .tensor.search import argsort #DEFINE_ALIAS
from .tensor.search import has_inf #DEFINE_ALIAS # from .tensor.search import has_inf #DEFINE_ALIAS
from .tensor.search import has_nan #DEFINE_ALIAS # from .tensor.search import has_nan #DEFINE_ALIAS
from .tensor.search import masked_select #DEFINE_ALIAS from .tensor.search import masked_select #DEFINE_ALIAS
from .tensor.search import topk #DEFINE_ALIAS from .tensor.search import topk #DEFINE_ALIAS
from .tensor.search import where #DEFINE_ALIAS from .tensor.search import where #DEFINE_ALIAS
...@@ -224,9 +223,8 @@ from .tensor.to_string import set_printoptions ...@@ -224,9 +223,8 @@ from .tensor.to_string import set_printoptions
from .framework.random import manual_seed #DEFINE_ALIAS from .framework.random import manual_seed #DEFINE_ALIAS
from .framework.random import get_cuda_rng_state #DEFINE_ALIAS from .framework.random import get_cuda_rng_state #DEFINE_ALIAS
from .framework.random import set_cuda_rng_state #DEFINE_ALIAS from .framework.random import set_cuda_rng_state #DEFINE_ALIAS
from .framework import Variable #DEFINE_ALIAS
from .framework import ParamAttr #DEFINE_ALIAS from .framework import ParamAttr #DEFINE_ALIAS
from .framework import create_global_var #DEFINE_ALIAS # from .framework import create_global_var #DEFINE_ALIAS
from .framework import create_parameter #DEFINE_ALIAS from .framework import create_parameter #DEFINE_ALIAS
from .framework import CPUPlace #DEFINE_ALIAS from .framework import CPUPlace #DEFINE_ALIAS
from .framework import CUDAPlace #DEFINE_ALIAS from .framework import CUDAPlace #DEFINE_ALIAS
...@@ -243,10 +241,10 @@ from .framework import get_default_dtype #DEFINE_ALIAS ...@@ -243,10 +241,10 @@ from .framework import get_default_dtype #DEFINE_ALIAS
from .tensor.search import index_sample #DEFINE_ALIAS from .tensor.search import index_sample #DEFINE_ALIAS
from .tensor.stat import mean #DEFINE_ALIAS from .tensor.stat import mean #DEFINE_ALIAS
from .tensor.stat import reduce_mean #DEFINE_ALIAS # from .tensor.stat import reduce_mean #DEFINE_ALIAS
from .tensor.stat import std #DEFINE_ALIAS from .tensor.stat import std #DEFINE_ALIAS
from .tensor.stat import var #DEFINE_ALIAS from .tensor.stat import var #DEFINE_ALIAS
from .fluid.data import data # from .fluid.data import data
from .tensor.stat import numel #DEFINE_ALIAS from .tensor.stat import numel #DEFINE_ALIAS
from .device import get_cudnn_version from .device import get_cudnn_version
from .device import set_device from .device import set_device
...@@ -262,6 +260,8 @@ from .fluid.dygraph.base import enable_dygraph as disable_static #DEFINE_ALIAS ...@@ -262,6 +260,8 @@ from .fluid.dygraph.base import enable_dygraph as disable_static #DEFINE_ALIAS
from .fluid.dygraph.base import disable_dygraph as enable_static #DEFINE_ALIAS from .fluid.dygraph.base import disable_dygraph as enable_static #DEFINE_ALIAS
from .fluid.framework import in_dygraph_mode as in_dynamic_mode #DEFINE_ALIAS from .fluid.framework import in_dygraph_mode as in_dynamic_mode #DEFINE_ALIAS
from .fluid.dygraph.base import no_grad_ as no_grad #DEFINE_ALIAS from .fluid.dygraph.base import no_grad_ as no_grad #DEFINE_ALIAS
from .fluid.layers import crop_tensor as crop #DEFINE_ALIAS
from . import jit from . import jit
from . import static from . import static
......
...@@ -56,7 +56,7 @@ class GradScaler(AmpScaler): ...@@ -56,7 +56,7 @@ class GradScaler(AmpScaler):
data = paddle.rand([10, 3, 32, 32]) data = paddle.rand([10, 3, 32, 32])
with paddle.amp.auto_cast(): with paddle.amp.auto_cast():
conv = model(data) conv = model(data)
loss = paddle.reduce_mean(conv) loss = paddle.fluid.layers.reduce_mean(conv)
scaled = scaler.scale(loss) # scale the loss scaled = scaler.scale(loss) # scale the loss
scaled.backward() # do backward scaled.backward() # do backward
scaler.minimize(optimizer, scaled) # update parameters scaler.minimize(optimizer, scaled) # update parameters
...@@ -96,7 +96,7 @@ class GradScaler(AmpScaler): ...@@ -96,7 +96,7 @@ class GradScaler(AmpScaler):
data = paddle.rand([10, 3, 32, 32]) data = paddle.rand([10, 3, 32, 32])
with paddle.amp.auto_cast(): with paddle.amp.auto_cast():
conv = model(data) conv = model(data)
loss = paddle.reduce_mean(conv) loss = paddle.fluid.layers.reduce_mean(conv)
scaled = scaler.scale(loss) # scale the loss scaled = scaler.scale(loss) # scale the loss
scaled.backward() # do backward scaled.backward() # do backward
scaler.minimize(optimizer, scaled) # update parameters scaler.minimize(optimizer, scaled) # update parameters
...@@ -128,7 +128,7 @@ class GradScaler(AmpScaler): ...@@ -128,7 +128,7 @@ class GradScaler(AmpScaler):
data = paddle.rand([10, 3, 32, 32]) data = paddle.rand([10, 3, 32, 32])
with paddle.amp.auto_cast(): with paddle.amp.auto_cast():
conv = model(data) conv = model(data)
loss = paddle.reduce_mean(conv) loss = paddle.fluid.layers.reduce_mean(conv)
scaled = scaler.scale(loss) # scale the loss scaled = scaler.scale(loss) # scale the loss
scaled.backward() # do backward scaled.backward() # do backward
scaler.minimize(optimizer, scaled) # update parameters scaler.minimize(optimizer, scaled) # update parameters
......
...@@ -439,7 +439,7 @@ def barrier(group=0): ...@@ -439,7 +439,7 @@ def barrier(group=0):
paddle.distributed.barrier() paddle.distributed.barrier()
""" """
op_type = 'barrier' op_type = 'barrier'
temp = paddle.fill_constant([1], dtype="int32", value="1") temp = fill_constant([1], dtype="int32", value="1")
if in_dygraph_mode(): if in_dygraph_mode():
return core.ops.barrier(temp, temp, 'ring_id', group) return core.ops.barrier(temp, temp, 'ring_id', group)
if not isinstance(group, int): if not isinstance(group, int):
......
...@@ -25,9 +25,9 @@ from .fluid.layers import control_flow ...@@ -25,9 +25,9 @@ from .fluid.layers import control_flow
from .fluid.layers import tensor from .fluid.layers import tensor
from .fluid.layers import ops from .fluid.layers import ops
from .fluid.layers import nn from .fluid.layers import nn
from .fluid.layers import elementwise_mul, elementwise_div, elementwise_add, elementwise_sub
from .fluid import core from .fluid import core
from .fluid.framework import in_dygraph_mode from .fluid.framework import in_dygraph_mode
from .tensor.math import elementwise_mul, elementwise_div, elementwise_add, elementwise_sub
from .tensor import arange, gather_nd, concat, multinomial from .tensor import arange, gather_nd, concat, multinomial
import math import math
import numpy as np import numpy as np
......
...@@ -480,7 +480,7 @@ def grad(outputs, ...@@ -480,7 +480,7 @@ def grad(outputs,
paddle.disable_static() paddle.disable_static()
def test_dygraph_grad(grad_outputs=None): def test_dygraph_grad(grad_outputs=None):
x = paddle.fill_constant(shape=[1], value=2.0, dtype='float32') x = paddle.fluid.layers.fill_constant(shape=[1], value=2.0, dtype='float32')
x.stop_gradient = False x.stop_gradient = False
y1 = x * x y1 = x * x
...@@ -503,7 +503,7 @@ def grad(outputs, ...@@ -503,7 +503,7 @@ def grad(outputs,
return dx.numpy() return dx.numpy()
grad_value = paddle.fill_constant(shape=[1], value=4.0, dtype='float32') grad_value = paddle.fluid.layers.fill_constant(shape=[1], value=4.0, dtype='float32')
# dy1 = [1], dy2 = [1] # dy1 = [1], dy2 = [1]
print(test_dygraph_grad(None)) # [7.] print(test_dygraph_grad(None)) # [7.]
...@@ -515,7 +515,7 @@ def grad(outputs, ...@@ -515,7 +515,7 @@ def grad(outputs,
print(test_dygraph_grad([grad_value, None])) # [19.] print(test_dygraph_grad([grad_value, None])) # [19.]
# dy1 = [3], dy2 = [4] # dy1 = [3], dy2 = [4]
grad_y1 = paddle.fill_constant(shape=[1], value=3.0, dtype='float32') grad_y1 = paddle.fluid.layers.fill_constant(shape=[1], value=3.0, dtype='float32')
print(test_dygraph_grad([grad_y1, grad_value])) # [24.] print(test_dygraph_grad([grad_y1, grad_value])) # [24.]
''' '''
......
...@@ -87,7 +87,7 @@ def create_static_variable_gast_node(name): ...@@ -87,7 +87,7 @@ def create_static_variable_gast_node(name):
def create_fill_constant_node(name, value): def create_fill_constant_node(name, value):
func_code = "{} = paddle.fill_constant(shape=[1], ".format(name) func_code = "{} = paddle.fluid.layers.fill_constant(shape=[1], ".format(name)
if isinstance(value, bool): if isinstance(value, bool):
func_code += "dtype='bool', value={})".format(value) func_code += "dtype='bool', value={})".format(value)
return gast.parse(func_code).body[0] return gast.parse(func_code).body[0]
......
...@@ -702,9 +702,6 @@ class Conv3DTranspose(layers.Layer): ...@@ -702,9 +702,6 @@ class Conv3DTranspose(layers.Layer):
class Pool2D(layers.Layer): class Pool2D(layers.Layer):
""" """
:alias_main: paddle.nn.Pool2D
:alias: paddle.nn.Pool2D,paddle.nn.layer.Pool2D,paddle.nn.layer.common.Pool2D
:old_api: paddle.fluid.dygraph.Pool2D
This interface is used to construct a callable object of the ``Pool2D`` class. This interface is used to construct a callable object of the ``Pool2D`` class.
For more details, refer to code examples. For more details, refer to code examples.
...@@ -2354,9 +2351,6 @@ class PRelu(layers.Layer): ...@@ -2354,9 +2351,6 @@ class PRelu(layers.Layer):
class BilinearTensorProduct(layers.Layer): class BilinearTensorProduct(layers.Layer):
""" """
:alias_main: paddle.nn.BilinearTensorProduct
:alias: paddle.nn.BilinearTensorProduct,paddle.nn.layer.BilinearTensorProduct,paddle.nn.layer.common.BilinearTensorProduct
:old_api: paddle.fluid.dygraph.BilinearTensorProduct
**Add Bilinear Tensor Product Layer** **Add Bilinear Tensor Product Layer**
......
...@@ -163,7 +163,7 @@ def monkey_patch_varbase(): ...@@ -163,7 +163,7 @@ def monkey_patch_varbase():
tmp.stop_gradient=False tmp.stop_gradient=False
inputs.append(tmp) inputs.append(tmp)
ret = paddle.sums(inputs) ret = paddle.sums(inputs)
loss = paddle.reduce_sum(ret) loss = paddle.fluid.layers.reduce_sum(ret)
loss.backward() loss.backward()
""" """
......
...@@ -543,7 +543,7 @@ def name_scope(prefix=None): ...@@ -543,7 +543,7 @@ def name_scope(prefix=None):
import paddle import paddle
paddle.enable_static() paddle.enable_static()
with paddle.static.name_scope("s1"): with paddle.static.name_scope("s1"):
a = paddle.data(name='data', shape=[None, 1], dtype='int32') a = paddle.fluid.data(name='data', shape=[None, 1], dtype='int32')
b = a + 1 b = a + 1
with paddle.static.name_scope("s2"): with paddle.static.name_scope("s2"):
c = b * 1 c = b * 1
...@@ -1193,7 +1193,7 @@ class Variable(object): ...@@ -1193,7 +1193,7 @@ class Variable(object):
tmp.stop_gradient=False tmp.stop_gradient=False
inputs.append(tmp) inputs.append(tmp)
ret = paddle.sums(inputs) ret = paddle.sums(inputs)
loss = paddle.reduce_sum(ret) loss = paddle.fluid.layers.reduce_sum(ret)
loss.backward() loss.backward()
""" """
...@@ -1343,7 +1343,9 @@ class Variable(object): ...@@ -1343,7 +1343,9 @@ class Variable(object):
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
cur_program = fluid.Program() cur_program = fluid.Program()
cur_block = cur_program.current_block() cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X", new_variable = cur_block.create_var(name="X",
...@@ -5355,8 +5357,8 @@ def default_startup_program(): ...@@ -5355,8 +5357,8 @@ def default_startup_program():
main_program = paddle.static.Program() main_program = paddle.static.Program()
startup_program = paddle.static.Program() startup_program = paddle.static.Program()
with paddle.static.program_guard(main_program=main_program, startup_program=startup_program): with paddle.static.program_guard(main_program=main_program, startup_program=startup_program):
x = paddle.data(name="x", shape=[-1, 784], dtype='float32') x = paddle.fluid.data(name="x", shape=[-1, 784], dtype='float32')
y = paddle.data(name="y", shape=[-1, 1], dtype='int32') y = paddle.fluid.data(name="y", shape=[-1, 1], dtype='int32')
z = paddle.static.nn.fc(name="fc", x=x, size=10, activation="relu") z = paddle.static.nn.fc(name="fc", x=x, size=10, activation="relu")
print("main program is: {}".format(paddle.static.default_main_program())) print("main program is: {}".format(paddle.static.default_main_program()))
...@@ -5370,7 +5372,7 @@ def default_main_program(): ...@@ -5370,7 +5372,7 @@ def default_main_program():
This API can be used to get ``default main program`` which store the This API can be used to get ``default main program`` which store the
descriptions of Ops and tensors. descriptions of Ops and tensors.
For example ``z = paddle.elementwise_add(x, y)`` will create a new ``elementwise_add`` For example ``z = paddle.fluid.layers.elementwise_add(x, y)`` will create a new ``elementwise_add``
Op and a new ``z`` tensor, and they will be recorded in ``default main program`` . Op and a new ``z`` tensor, and they will be recorded in ``default main program`` .
The ``default main program`` is the default value for ``Program`` parameter in The ``default main program`` is the default value for ``Program`` parameter in
...@@ -5389,15 +5391,15 @@ def default_main_program(): ...@@ -5389,15 +5391,15 @@ def default_main_program():
paddle.enable_static() paddle.enable_static()
# Sample Network: # Sample Network:
data = paddle.data(name='image', shape=[None, 3, 224, 224], dtype='float32') data = paddle.fluid.data(name='image', shape=[None, 3, 224, 224], dtype='float32')
label = paddle.data(name='label', shape=[None, 1], dtype='int64') label = paddle.fluid.data(name='label', shape=[None, 1], dtype='int64')
conv1 = paddle.static.nn.conv2d(data, 4, 5, 1, act=None) conv1 = paddle.static.nn.conv2d(data, 4, 5, 1, act=None)
bn1 = paddle.static.nn.batch_norm(conv1, act='relu') bn1 = paddle.static.nn.batch_norm(conv1, act='relu')
pool1 = paddle.nn.functional.pool2d(bn1, 2, 'max', 2) pool1 = paddle.fluid.layers.pool2d(bn1, 2, 'max', 2)
conv2 = paddle.static.nn.conv2d(pool1, 16, 5, 1, act=None) conv2 = paddle.static.nn.conv2d(pool1, 16, 5, 1, act=None)
bn2 = paddle.static.nn.batch_norm(conv2, act='relu') bn2 = paddle.static.nn.batch_norm(conv2, act='relu')
pool2 = paddle.nn.functional.pool2d(bn2, 2, 'max', 2) pool2 = paddle.fluid.layers.pool2d(bn2, 2, 'max', 2)
fc1 = paddle.static.nn.fc(x=pool2, size=50, activation='relu') fc1 = paddle.static.nn.fc(x=pool2, size=50, activation='relu')
fc2 = paddle.static.nn.fc(x=fc1, size=102, activation='softmax') fc2 = paddle.static.nn.fc(x=fc1, size=102, activation='softmax')
......
...@@ -1110,9 +1110,6 @@ def assign_skip_lod_tensor_array(input, output): ...@@ -1110,9 +1110,6 @@ def assign_skip_lod_tensor_array(input, output):
def while_loop(cond, body, loop_vars, is_test=False, name=None): def while_loop(cond, body, loop_vars, is_test=False, name=None):
""" """
:api_attr: Static Graph :api_attr: Static Graph
:alias_main: paddle.nn.while_loop
:alias: paddle.nn.while_loop,paddle.nn.control_flow.while_loop
:old_api: paddle.fluid.layers.while_loop
while_loop is one of the control flows. Repeats while_loop `body` until `cond` returns False. while_loop is one of the control flows. Repeats while_loop `body` until `cond` returns False.
...@@ -1151,6 +1148,9 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None): ...@@ -1151,6 +1148,9 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None):
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
import paddle
paddle.enable_static()
def cond(i, ten): def cond(i, ten):
return i < ten return i < ten
...@@ -2506,21 +2506,21 @@ def case(pred_fn_pairs, default=None, name=None): ...@@ -2506,21 +2506,21 @@ def case(pred_fn_pairs, default=None, name=None):
paddle.enable_static() paddle.enable_static()
def fn_1(): def fn_1():
return paddle.fill_constant(shape=[1, 2], dtype='float32', value=1) return paddle.fluid.layers.fill_constant(shape=[1, 2], dtype='float32', value=1)
def fn_2(): def fn_2():
return paddle.fill_constant(shape=[2, 2], dtype='int32', value=2) return paddle.fluid.layers.fill_constant(shape=[2, 2], dtype='int32', value=2)
def fn_3(): def fn_3():
return paddle.fill_constant(shape=[3], dtype='int32', value=3) return paddle.fluid.layers.fill_constant(shape=[3], dtype='int32', value=3)
main_program = paddle.static.default_startup_program() main_program = paddle.static.default_startup_program()
startup_program = paddle.static.default_main_program() startup_program = paddle.static.default_main_program()
with paddle.static.program_guard(main_program, startup_program): with paddle.static.program_guard(main_program, startup_program):
x = paddle.fill_constant(shape=[1], dtype='float32', value=0.3) x = paddle.fluid.layers.fill_constant(shape=[1], dtype='float32', value=0.3)
y = paddle.fill_constant(shape=[1], dtype='float32', value=0.1) y = paddle.fluid.layers.fill_constant(shape=[1], dtype='float32', value=0.1)
z = paddle.fill_constant(shape=[1], dtype='float32', value=0.2) z = paddle.fluid.layers.fill_constant(shape=[1], dtype='float32', value=0.2)
pred_1 = paddle.less_than(z, x) # true: 0.2 < 0.3 pred_1 = paddle.less_than(z, x) # true: 0.2 < 0.3
pred_2 = paddle.less_than(x, y) # false: 0.3 < 0.1 pred_2 = paddle.less_than(x, y) # false: 0.3 < 0.1
...@@ -3626,19 +3626,19 @@ def switch_case(branch_index, branch_fns, default=None, name=None): ...@@ -3626,19 +3626,19 @@ def switch_case(branch_index, branch_fns, default=None, name=None):
paddle.enable_static() paddle.enable_static()
def fn_1(): def fn_1():
return paddle.fill_constant(shape=[1, 2], dtype='float32', value=1) return paddle.fluid.layers.fill_constant(shape=[1, 2], dtype='float32', value=1)
def fn_2(): def fn_2():
return paddle.fill_constant(shape=[2, 2], dtype='int32', value=2) return paddle.fluid.layers.fill_constant(shape=[2, 2], dtype='int32', value=2)
def fn_3(): def fn_3():
return paddle.fill_constant(shape=[3], dtype='int32', value=3) return paddle.fluid.layers.fill_constant(shape=[3], dtype='int32', value=3)
main_program = paddle.static.default_startup_program() main_program = paddle.static.default_startup_program()
startup_program = paddle.static.default_main_program() startup_program = paddle.static.default_main_program()
with paddle.static.program_guard(main_program, startup_program): with paddle.static.program_guard(main_program, startup_program):
index_1 = paddle.fill_constant(shape=[1], dtype='int32', value=1) index_1 = paddle.fluid.layers.fill_constant(shape=[1], dtype='int32', value=1)
index_2 = paddle.fill_constant(shape=[1], dtype='int32', value=2) index_2 = paddle.fluid.layers.fill_constant(shape=[1], dtype='int32', value=2)
out_1 = paddle.static.nn.switch_case( out_1 = paddle.static.nn.switch_case(
branch_index=index_1, branch_index=index_1,
......
...@@ -629,9 +629,6 @@ def detection_output(loc, ...@@ -629,9 +629,6 @@ def detection_output(loc,
nms_eta=1.0, nms_eta=1.0,
return_index=False): return_index=False):
""" """
:alias_main: paddle.nn.functional.detection_output
:alias: paddle.nn.functional.detection_output,paddle.nn.functional.vision.detection_output
:old_api: paddle.fluid.layers.detection_output
Given the regression locations, classification confidences and prior boxes, Given the regression locations, classification confidences and prior boxes,
calculate the detection outputs by performing following steps: calculate the detection outputs by performing following steps:
...@@ -700,6 +697,9 @@ def detection_output(loc, ...@@ -700,6 +697,9 @@ def detection_output(loc,
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
pb = fluid.data(name='prior_box', shape=[10, 4], dtype='float32') pb = fluid.data(name='prior_box', shape=[10, 4], dtype='float32')
pbv = fluid.data(name='prior_box_var', shape=[10, 4], dtype='float32') pbv = fluid.data(name='prior_box_var', shape=[10, 4], dtype='float32')
...@@ -822,9 +822,6 @@ def box_coder(prior_box, ...@@ -822,9 +822,6 @@ def box_coder(prior_box,
name=None, name=None,
axis=0): axis=0):
""" """
:alias_main: paddle.nn.functional.box_coder
:alias: paddle.nn.functional.box_coder,paddle.nn.functional.vision.box_coder
:old_api: paddle.fluid.layers.box_coder
**Box Coder Layer** **Box Coder Layer**
...@@ -911,6 +908,8 @@ def box_coder(prior_box, ...@@ -911,6 +908,8 @@ def box_coder(prior_box,
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
# For encode # For encode
prior_box_encode = fluid.data(name='prior_box_encode', prior_box_encode = fluid.data(name='prior_box_encode',
shape=[512, 4], shape=[512, 4],
...@@ -1013,9 +1012,6 @@ def yolov3_loss(x, ...@@ -1013,9 +1012,6 @@ def yolov3_loss(x,
name=None, name=None,
scale_x_y=1.): scale_x_y=1.):
""" """
:alias_main: paddle.nn.functional.yolov3_loss
:alias: paddle.nn.functional.yolov3_loss,paddle.nn.functional.vision.yolov3_loss
:old_api: paddle.fluid.layers.yolov3_loss
${comment} ${comment}
...@@ -1060,6 +1056,8 @@ def yolov3_loss(x, ...@@ -1060,6 +1056,8 @@ def yolov3_loss(x,
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32') x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
gt_box = fluid.data(name='gt_box', shape=[None, 6, 4], dtype='float32') gt_box = fluid.data(name='gt_box', shape=[None, 6, 4], dtype='float32')
gt_label = fluid.data(name='gt_label', shape=[None, 6], dtype='int32') gt_label = fluid.data(name='gt_label', shape=[None, 6], dtype='int32')
...@@ -1140,9 +1138,6 @@ def yolo_box(x, ...@@ -1140,9 +1138,6 @@ def yolo_box(x,
name=None, name=None,
scale_x_y=1.): scale_x_y=1.):
""" """
:alias_main: paddle.nn.functional.yolo_box
:alias: paddle.nn.functional.yolo_box,paddle.nn.functional.vision.yolo_box
:old_api: paddle.fluid.layers.yolo_box
${comment} ${comment}
...@@ -1175,6 +1170,8 @@ def yolo_box(x, ...@@ -1175,6 +1170,8 @@ def yolo_box(x,
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32') x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
img_size = fluid.data(name='img_size',shape=[None, 2],dtype='int64') img_size = fluid.data(name='img_size',shape=[None, 2],dtype='int64')
anchors = [10, 13, 16, 30, 33, 23] anchors = [10, 13, 16, 30, 33, 23]
...@@ -1319,9 +1316,6 @@ def bipartite_match(dist_matrix, ...@@ -1319,9 +1316,6 @@ def bipartite_match(dist_matrix,
dist_threshold=None, dist_threshold=None,
name=None): name=None):
""" """
:alias_main: paddle.nn.functional.bipartite_match
:alias: paddle.nn.functional.bipartite_match,paddle.nn.functional.vision.bipartite_match
:old_api: paddle.fluid.layers.bipartite_match
This operator implements a greedy bipartite matching algorithm, which is This operator implements a greedy bipartite matching algorithm, which is
used to obtain the matching with the maximum distance based on the input used to obtain the matching with the maximum distance based on the input
...@@ -1413,9 +1407,6 @@ def target_assign(input, ...@@ -1413,9 +1407,6 @@ def target_assign(input,
mismatch_value=None, mismatch_value=None,
name=None): name=None):
""" """
:alias_main: paddle.nn.functional.target_assign
:alias: paddle.nn.functional.target_assign,paddle.nn.functional.extension.target_assign
:old_api: paddle.fluid.layers.target_assign
This operator can be, for given the target bounding boxes or labels, This operator can be, for given the target bounding boxes or labels,
to assign classification and regression targets to each prediction as well as to assign classification and regression targets to each prediction as well as
...@@ -1484,6 +1475,8 @@ def target_assign(input, ...@@ -1484,6 +1475,8 @@ def target_assign(input,
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
x = fluid.data( x = fluid.data(
name='x', name='x',
shape=[4, 20, 4], shape=[4, 20, 4],
...@@ -1778,9 +1771,6 @@ def prior_box(input, ...@@ -1778,9 +1771,6 @@ def prior_box(input,
name=None, name=None,
min_max_aspect_ratios_order=False): min_max_aspect_ratios_order=False):
""" """
:alias_main: paddle.nn.functional.prior_box
:alias: paddle.nn.functional.prior_box,paddle.nn.functional.vision.prior_box
:old_api: paddle.fluid.layers.prior_box
This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm. This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm.
Each position of the input produce N prior boxes, N is determined by Each position of the input produce N prior boxes, N is determined by
...@@ -1832,6 +1822,8 @@ def prior_box(input, ...@@ -1832,6 +1822,8 @@ def prior_box(input,
#declarative mode #declarative mode
import paddle.fluid as fluid import paddle.fluid as fluid
import numpy as np import numpy as np
import paddle
paddle.enable_static()
input = fluid.data(name="input", shape=[None,3,6,9]) input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12]) image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.prior_box( box, var = fluid.layers.prior_box(
...@@ -1939,10 +1931,6 @@ def density_prior_box(input, ...@@ -1939,10 +1931,6 @@ def density_prior_box(input,
flatten_to_2d=False, flatten_to_2d=False,
name=None): name=None):
""" """
:alias_main: paddle.nn.functional.density_prior_box
:alias: paddle.nn.functional.density_prior_box,paddle.nn.functional.vision.density_prior_box
:old_api: paddle.fluid.layers.density_prior_box
This op generates density prior boxes for SSD(Single Shot MultiBox Detector) This op generates density prior boxes for SSD(Single Shot MultiBox Detector)
algorithm. Each position of the input produce N prior boxes, N is algorithm. Each position of the input produce N prior boxes, N is
...@@ -2008,6 +1996,8 @@ def density_prior_box(input, ...@@ -2008,6 +1996,8 @@ def density_prior_box(input,
import paddle.fluid as fluid import paddle.fluid as fluid
import numpy as np import numpy as np
import paddle
paddle.enable_static()
input = fluid.data(name="input", shape=[None,3,6,9]) input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12]) image = fluid.data(name="image", shape=[None,3,9,12])
...@@ -2408,9 +2398,6 @@ def anchor_generator(input, ...@@ -2408,9 +2398,6 @@ def anchor_generator(input,
offset=0.5, offset=0.5,
name=None): name=None):
""" """
:alias_main: paddle.nn.functional.anchor_generator
:alias: paddle.nn.functional.anchor_generator,paddle.nn.functional.vision.anchor_generator
:old_api: paddle.fluid.layers.anchor_generator
**Anchor generator operator** **Anchor generator operator**
...@@ -2457,6 +2444,9 @@ def anchor_generator(input, ...@@ -2457,6 +2444,9 @@ def anchor_generator(input,
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
conv1 = fluid.data(name='conv1', shape=[None, 48, 16, 16], dtype='float32') conv1 = fluid.data(name='conv1', shape=[None, 48, 16, 16], dtype='float32')
anchor, var = fluid.layers.anchor_generator( anchor, var = fluid.layers.anchor_generator(
input=conv1, input=conv1,
...@@ -2613,9 +2603,6 @@ def generate_proposal_labels(rpn_rois, ...@@ -2613,9 +2603,6 @@ def generate_proposal_labels(rpn_rois,
is_cls_agnostic=False, is_cls_agnostic=False,
is_cascade_rcnn=False): is_cascade_rcnn=False):
""" """
:alias_main: paddle.nn.functional.generate_proposal_labels
:alias: paddle.nn.functional.generate_proposal_labels,paddle.nn.functional.vision.generate_proposal_labels
:old_api: paddle.fluid.layers.generate_proposal_labels
**Generate Proposal Labels of Faster-RCNN** **Generate Proposal Labels of Faster-RCNN**
...@@ -2738,9 +2725,6 @@ def generate_proposal_labels(rpn_rois, ...@@ -2738,9 +2725,6 @@ def generate_proposal_labels(rpn_rois,
def generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois, def generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois,
labels_int32, num_classes, resolution): labels_int32, num_classes, resolution):
""" """
:alias_main: paddle.nn.functional.generate_mask_labels
:alias: paddle.nn.functional.generate_mask_labels,paddle.nn.functional.vision.generate_mask_labels
:old_api: paddle.fluid.layers.generate_mask_labels
**Generate Mask Labels for Mask-RCNN** **Generate Mask Labels for Mask-RCNN**
...@@ -2897,9 +2881,6 @@ def generate_proposals(scores, ...@@ -2897,9 +2881,6 @@ def generate_proposals(scores,
return_rois_num=False, return_rois_num=False,
name=None): name=None):
""" """
:alias_main: paddle.nn.functional.generate_proposals
:alias: paddle.nn.functional.generate_proposals,paddle.nn.functional.vision.generate_proposals
:old_api: paddle.fluid.layers.generate_proposals
**Generate proposal Faster-RCNN** **Generate proposal Faster-RCNN**
...@@ -2965,6 +2946,8 @@ def generate_proposals(scores, ...@@ -2965,6 +2946,8 @@ def generate_proposals(scores,
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
scores = fluid.data(name='scores', shape=[None, 4, 5, 5], dtype='float32') scores = fluid.data(name='scores', shape=[None, 4, 5, 5], dtype='float32')
bbox_deltas = fluid.data(name='bbox_deltas', shape=[None, 16, 5, 5], dtype='float32') bbox_deltas = fluid.data(name='bbox_deltas', shape=[None, 16, 5, 5], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32') im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
...@@ -3036,9 +3019,6 @@ def generate_proposals(scores, ...@@ -3036,9 +3019,6 @@ def generate_proposals(scores,
def box_clip(input, im_info, name=None): def box_clip(input, im_info, name=None):
""" """
:alias_main: paddle.nn.functional.box_clip
:alias: paddle.nn.functional.box_clip,paddle.nn.functional.vision.box_clip
:old_api: paddle.fluid.layers.box_clip
Clip the box into the size given by im_info Clip the box into the size given by im_info
For each input box, The formula is given as follows: For each input box, The formula is given as follows:
...@@ -3079,6 +3059,8 @@ def box_clip(input, im_info, name=None): ...@@ -3079,6 +3059,8 @@ def box_clip(input, im_info, name=None):
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
boxes = fluid.data( boxes = fluid.data(
name='boxes', shape=[None, 8, 4], dtype='float32', lod_level=1) name='boxes', shape=[None, 8, 4], dtype='float32', lod_level=1)
im_info = fluid.data(name='im_info', shape=[-1 ,3]) im_info = fluid.data(name='im_info', shape=[-1 ,3])
...@@ -3265,9 +3247,6 @@ def multiclass_nms(bboxes, ...@@ -3265,9 +3247,6 @@ def multiclass_nms(bboxes,
background_label=0, background_label=0,
name=None): name=None):
""" """
:alias_main: paddle.nn.functional.multiclass_nms
:alias: paddle.nn.functional.multiclass_nms,paddle.nn.functional.extension.multiclass_nms
:old_api: paddle.fluid.layers.multiclass_nms
**Multiclass NMS** **Multiclass NMS**
...@@ -3363,6 +3342,8 @@ def multiclass_nms(bboxes, ...@@ -3363,6 +3342,8 @@ def multiclass_nms(bboxes,
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
boxes = fluid.data(name='bboxes', shape=[None,81, 4], boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1) dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81], scores = fluid.data(name='scores', shape=[None,81],
...@@ -3674,9 +3655,6 @@ def distribute_fpn_proposals(fpn_rois, ...@@ -3674,9 +3655,6 @@ def distribute_fpn_proposals(fpn_rois,
rois_num=None, rois_num=None,
name=None): name=None):
""" """
:alias_main: paddle.nn.functional.distribute_fpn_proposals
:alias: paddle.nn.functional.distribute_fpn_proposals,paddle.nn.functional.vision.distribute_fpn_proposals
:old_api: paddle.fluid.layers.distribute_fpn_proposals
**This op only takes LoDTensor as input.** In Feature Pyramid Networks **This op only takes LoDTensor as input.** In Feature Pyramid Networks
(FPN) models, it is needed to distribute all proposals into different FPN (FPN) models, it is needed to distribute all proposals into different FPN
...@@ -3732,6 +3710,8 @@ def distribute_fpn_proposals(fpn_rois, ...@@ -3732,6 +3710,8 @@ def distribute_fpn_proposals(fpn_rois,
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
fpn_rois = fluid.data( fpn_rois = fluid.data(
name='data', shape=[None, 4], dtype='float32', lod_level=1) name='data', shape=[None, 4], dtype='float32', lod_level=1)
multi_rois, restore_ind = fluid.layers.distribute_fpn_proposals( multi_rois, restore_ind = fluid.layers.distribute_fpn_proposals(
...@@ -3798,9 +3778,6 @@ def box_decoder_and_assign(prior_box, ...@@ -3798,9 +3778,6 @@ def box_decoder_and_assign(prior_box,
box_clip, box_clip,
name=None): name=None):
""" """
:alias_main: paddle.nn.functional.box_decoder_and_assign
:alias: paddle.nn.functional.box_decoder_and_assign,paddle.nn.functional.vision.box_decoder_and_assign
:old_api: paddle.fluid.layers.box_decoder_and_assign
${comment} ${comment}
Args: Args:
...@@ -3825,6 +3802,8 @@ def box_decoder_and_assign(prior_box, ...@@ -3825,6 +3802,8 @@ def box_decoder_and_assign(prior_box,
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
pb = fluid.data( pb = fluid.data(
name='prior_box', shape=[None, 4], dtype='float32') name='prior_box', shape=[None, 4], dtype='float32')
pbv = fluid.data( pbv = fluid.data(
...@@ -3874,9 +3853,6 @@ def collect_fpn_proposals(multi_rois, ...@@ -3874,9 +3853,6 @@ def collect_fpn_proposals(multi_rois,
rois_num_per_level=None, rois_num_per_level=None,
name=None): name=None):
""" """
:alias_main: paddle.nn.functional.collect_fpn_proposals
:alias: paddle.nn.functional.collect_fpn_proposals,paddle.nn.functional.vision.collect_fpn_proposals
:old_api: paddle.fluid.layers.collect_fpn_proposals
**This OP only supports LoDTensor as input**. Concat multi-level RoIs **This OP only supports LoDTensor as input**. Concat multi-level RoIs
(Region of Interest) and select N RoIs with respect to multi_scores. (Region of Interest) and select N RoIs with respect to multi_scores.
...@@ -3922,6 +3898,8 @@ def collect_fpn_proposals(multi_rois, ...@@ -3922,6 +3898,8 @@ def collect_fpn_proposals(multi_rois,
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
multi_rois = [] multi_rois = []
multi_scores = [] multi_scores = []
for i in range(4): for i in range(4):
......
...@@ -52,9 +52,6 @@ def _decay_step_counter(begin=0): ...@@ -52,9 +52,6 @@ def _decay_step_counter(begin=0):
def noam_decay(d_model, warmup_steps, learning_rate=1.0): def noam_decay(d_model, warmup_steps, learning_rate=1.0):
""" """
:alias_main: paddle.nn.functional.noam_decay
:alias: paddle.nn.functional.noam_decay,paddle.nn.functional.learning_rate.noam_decay
:old_api: paddle.fluid.layers.noam_decay
Noam decay method. The numpy implementation of noam decay as follows. Noam decay method. The numpy implementation of noam decay as follows.
...@@ -115,9 +112,6 @@ def noam_decay(d_model, warmup_steps, learning_rate=1.0): ...@@ -115,9 +112,6 @@ def noam_decay(d_model, warmup_steps, learning_rate=1.0):
def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False): def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False):
""" """
:alias_main: paddle.nn.functional.exponential_decay
:alias: paddle.nn.functional.exponential_decay,paddle.nn.functional.learning_rate.exponential_decay
:old_api: paddle.fluid.layers.exponential_decay
Applies exponential decay to the learning rate. Applies exponential decay to the learning rate.
...@@ -149,6 +143,9 @@ def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False): ...@@ -149,6 +143,9 @@ def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False):
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
base_lr = 0.1 base_lr = 0.1
sgd_optimizer = fluid.optimizer.SGD( sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.layers.exponential_decay( learning_rate=fluid.layers.exponential_decay(
...@@ -176,9 +173,6 @@ def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False): ...@@ -176,9 +173,6 @@ def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False):
def natural_exp_decay(learning_rate, decay_steps, decay_rate, staircase=False): def natural_exp_decay(learning_rate, decay_steps, decay_rate, staircase=False):
""" """
:alias_main: paddle.nn.functional.natural_exp_decay
:alias: paddle.nn.functional.natural_exp_decay,paddle.nn.functional.learning_rate.natural_exp_decay
:old_api: paddle.fluid.layers.natural_exp_decay
Applies natural exponential decay to the initial learning rate. Applies natural exponential decay to the initial learning rate.
...@@ -210,6 +204,9 @@ Applies natural exponential decay to the initial learning rate. ...@@ -210,6 +204,9 @@ Applies natural exponential decay to the initial learning rate.
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
base_lr = 0.1 base_lr = 0.1
sgd_optimizer = fluid.optimizer.SGD( sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.layers.natural_exp_decay( learning_rate=fluid.layers.natural_exp_decay(
...@@ -237,9 +234,6 @@ Applies natural exponential decay to the initial learning rate. ...@@ -237,9 +234,6 @@ Applies natural exponential decay to the initial learning rate.
def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False): def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False):
""" """
:alias_main: paddle.nn.functional.inverse_time_decay
:alias: paddle.nn.functional.inverse_time_decay,paddle.nn.functional.learning_rate.inverse_time_decay
:old_api: paddle.fluid.layers.inverse_time_decay
Applies inverse time decay to the initial learning rate. Applies inverse time decay to the initial learning rate.
...@@ -271,6 +265,8 @@ def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False): ...@@ -271,6 +265,8 @@ def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False):
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
base_lr = 0.1 base_lr = 0.1
sgd_optimizer = fluid.optimizer.SGD( sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.layers.inverse_time_decay( learning_rate=fluid.layers.inverse_time_decay(
...@@ -302,10 +298,6 @@ def polynomial_decay(learning_rate, ...@@ -302,10 +298,6 @@ def polynomial_decay(learning_rate,
power=1.0, power=1.0,
cycle=False): cycle=False):
""" """
:alias_main: paddle.nn.functional.polynomial_decay
:alias: paddle.nn.functional.polynomial_decay,paddle.nn.functional.learning_rate.polynomial_decay
:old_api: paddle.fluid.layers.polynomial_decay
2
Applies polynomial decay to the initial learning rate. Applies polynomial decay to the initial learning rate.
.. code-block:: text .. code-block:: text
...@@ -371,9 +363,6 @@ def polynomial_decay(learning_rate, ...@@ -371,9 +363,6 @@ def polynomial_decay(learning_rate,
def piecewise_decay(boundaries, values): def piecewise_decay(boundaries, values):
""" """
:alias_main: paddle.nn.functional.piecewise_decay
:alias: paddle.nn.functional.piecewise_decay,paddle.nn.functional.learning_rate.piecewise_decay
:old_api: paddle.fluid.layers.piecewise_decay
Applies piecewise decay to the initial learning rate. Applies piecewise decay to the initial learning rate.
...@@ -401,6 +390,8 @@ Applies piecewise decay to the initial learning rate. ...@@ -401,6 +390,8 @@ Applies piecewise decay to the initial learning rate.
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
boundaries = [10000, 20000] boundaries = [10000, 20000]
values = [1.0, 0.5, 0.1] values = [1.0, 0.5, 0.1]
optimizer = fluid.optimizer.Momentum( optimizer = fluid.optimizer.Momentum(
...@@ -450,9 +441,6 @@ Applies piecewise decay to the initial learning rate. ...@@ -450,9 +441,6 @@ Applies piecewise decay to the initial learning rate.
def cosine_decay(learning_rate, step_each_epoch, epochs): def cosine_decay(learning_rate, step_each_epoch, epochs):
""" """
:alias_main: paddle.nn.functional.cosine_decay
:alias: paddle.nn.functional.cosine_decay,paddle.nn.functional.learning_rate.cosine_decay
:old_api: paddle.fluid.layers.cosine_decay
Applies cosine decay to the learning rate. Applies cosine decay to the learning rate.
...@@ -499,9 +487,6 @@ def cosine_decay(learning_rate, step_each_epoch, epochs): ...@@ -499,9 +487,6 @@ def cosine_decay(learning_rate, step_each_epoch, epochs):
def linear_lr_warmup(learning_rate, warmup_steps, start_lr, end_lr): def linear_lr_warmup(learning_rate, warmup_steps, start_lr, end_lr):
""" """
:alias_main: paddle.nn.functional.linear_lr_warmup
:alias: paddle.nn.functional.linear_lr_warmup,paddle.nn.functional.learning_rate.linear_lr_warmup
:old_api: paddle.fluid.layers.linear_lr_warmup
This operator use the linear learning rate warm up strategy to adjust the learning rate preliminarily before the normal learning rate scheduling. This operator use the linear learning rate warm up strategy to adjust the learning rate preliminarily before the normal learning rate scheduling.
For more information, please refer to `Bag of Tricks for Image Classification with Convolutional Neural Networks <https://arxiv.org/abs/1812.01187>`_ For more information, please refer to `Bag of Tricks for Image Classification with Convolutional Neural Networks <https://arxiv.org/abs/1812.01187>`_
......
...@@ -59,9 +59,6 @@ def center_loss(input, ...@@ -59,9 +59,6 @@ def center_loss(input,
update_center=True): update_center=True):
""" """
:api_attr: Static Graph :api_attr: Static Graph
:alias_main: paddle.nn.functional.center_loss
:alias: paddle.nn.functional.center_loss,paddle.nn.functional.loss.center_loss
:old_api: paddle.fluid.layers.center_loss
**Center loss Cost layer** **Center loss Cost layer**
...@@ -92,6 +89,8 @@ def center_loss(input, ...@@ -92,6 +89,8 @@ def center_loss(input,
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
input = fluid.data(name='x',shape=[20,30],dtype='float32') input = fluid.data(name='x',shape=[20,30],dtype='float32')
label = fluid.data(name='y',shape=[20,1],dtype='int64') label = fluid.data(name='y',shape=[20,1],dtype='int64')
...@@ -153,9 +152,6 @@ def center_loss(input, ...@@ -153,9 +152,6 @@ def center_loss(input,
def bpr_loss(input, label, name=None): def bpr_loss(input, label, name=None):
""" """
:alias_main: paddle.nn.functional.bpr_loss
:alias: paddle.nn.functional.bpr_loss,paddle.nn.functional.loss.bpr_loss
:old_api: paddle.fluid.layers.bpr_loss
**Bayesian Personalized Ranking Loss Operator** **Bayesian Personalized Ranking Loss Operator**
...@@ -183,6 +179,9 @@ def bpr_loss(input, label, name=None): ...@@ -183,6 +179,9 @@ def bpr_loss(input, label, name=None):
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
neg_size = 10 neg_size = 10
label = fluid.data( label = fluid.data(
...@@ -1309,9 +1308,6 @@ def softmax_with_cross_entropy(logits, ...@@ -1309,9 +1308,6 @@ def softmax_with_cross_entropy(logits,
def rank_loss(label, left, right, name=None): def rank_loss(label, left, right, name=None):
""" """
:alias_main: paddle.nn.functional.rank_loss
:alias: paddle.nn.functional.rank_loss,paddle.nn.functional.loss.rank_loss
:old_api: paddle.fluid.layers.rank_loss
This operator implements the sort loss layer in the RankNet model. RankNet is a pairwise ranking model This operator implements the sort loss layer in the RankNet model. RankNet is a pairwise ranking model
with a training sample consisting of a pair of documents (A and B), The label (P) with a training sample consisting of a pair of documents (A and B), The label (P)
...@@ -1349,6 +1345,8 @@ def rank_loss(label, left, right, name=None): ...@@ -1349,6 +1345,8 @@ def rank_loss(label, left, right, name=None):
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
label = fluid.data(name="label", shape=[-1, 1], dtype="float32") label = fluid.data(name="label", shape=[-1, 1], dtype="float32")
left = fluid.data(name="left", shape=[-1, 1], dtype="float32") left = fluid.data(name="left", shape=[-1, 1], dtype="float32")
right = fluid.data(name="right", shape=[-1, 1], dtype="float32") right = fluid.data(name="right", shape=[-1, 1], dtype="float32")
...@@ -1491,9 +1489,6 @@ def teacher_student_sigmoid_loss(input, ...@@ -1491,9 +1489,6 @@ def teacher_student_sigmoid_loss(input,
soft_max_up_bound=15.0, soft_max_up_bound=15.0,
soft_max_lower_bound=-15.0): soft_max_lower_bound=-15.0):
""" """
:alias_main: paddle.nn.functional.teacher_student_sigmoid_loss
:alias: paddle.nn.functional.teacher_student_sigmoid_loss,paddle.nn.functional.loss.teacher_student_sigmoid_loss
:old_api: paddle.fluid.layers.teacher_student_sigmoid_loss
**Teacher Student Log Loss Layer** **Teacher Student Log Loss Layer**
...@@ -1521,7 +1516,8 @@ def teacher_student_sigmoid_loss(input, ...@@ -1521,7 +1516,8 @@ def teacher_student_sigmoid_loss(input,
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
batch_size = 64 batch_size = 64
label = fluid.data( label = fluid.data(
name="label", shape=[batch_size, 1], dtype="int64") name="label", shape=[batch_size, 1], dtype="int64")
......
此差异已折叠。
...@@ -488,7 +488,7 @@ def rnn(cell, ...@@ -488,7 +488,7 @@ def rnn(cell,
inputs = paddle.rand((4, 23, 16)) inputs = paddle.rand((4, 23, 16))
prev_h = paddle.randn((4, 32)) prev_h = paddle.randn((4, 32))
outputs, final_states = paddle.nn.functional.rnn(cell, inputs, prev_h) outputs, final_states = paddle.fluid.layers.rnn(cell, inputs, prev_h)
""" """
if in_dygraph_mode(): if in_dygraph_mode():
...@@ -711,7 +711,7 @@ def birnn(cell_fw, ...@@ -711,7 +711,7 @@ def birnn(cell_fw,
hf, cf = paddle.rand((4, 32)), paddle.rand((4, 32)) hf, cf = paddle.rand((4, 32)), paddle.rand((4, 32))
hb, cb = paddle.rand((4, 32)), paddle.rand((4, 32)) hb, cb = paddle.rand((4, 32)), paddle.rand((4, 32))
initial_states = ((hf, cf), (hb, cb)) initial_states = ((hf, cf), (hb, cb))
outputs, final_states = paddle.nn.functional.birnn( outputs, final_states = paddle.fluid.layers.birnn(
cell_fw, cell_bw, inputs, initial_states) cell_fw, cell_bw, inputs, initial_states)
""" """
...@@ -3046,9 +3046,6 @@ def beam_search(pre_ids, ...@@ -3046,9 +3046,6 @@ def beam_search(pre_ids,
name=None, name=None,
return_parent_idx=False): return_parent_idx=False):
""" """
:alias_main: paddle.nn.beam_search
:alias: paddle.nn.beam_search,paddle.nn.decode.beam_search
:old_api: paddle.fluid.layers.beam_search
Beam search is a classical algorithm for selecting candidate words in a Beam search is a classical algorithm for selecting candidate words in a
machine translation task. machine translation task.
...@@ -3126,6 +3123,8 @@ def beam_search(pre_ids, ...@@ -3126,6 +3123,8 @@ def beam_search(pre_ids,
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
# Suppose `probs` contains predicted results from the computation # Suppose `probs` contains predicted results from the computation
# cell and `pre_ids` and `pre_scores` is the output of beam_search # cell and `pre_ids` and `pre_scores` is the output of beam_search
...@@ -3197,9 +3196,6 @@ def beam_search(pre_ids, ...@@ -3197,9 +3196,6 @@ def beam_search(pre_ids,
def beam_search_decode(ids, scores, beam_size, end_id, name=None): def beam_search_decode(ids, scores, beam_size, end_id, name=None):
""" """
:alias_main: paddle.nn.beam_search_decode
:alias: paddle.nn.beam_search_decode,paddle.nn.decode.beam_search_decode
:old_api: paddle.fluid.layers.beam_search_decode
This operator is used after beam search has completed. It constructs the This operator is used after beam search has completed. It constructs the
full predicted sequences for each sample by walking back along the search full predicted sequences for each sample by walking back along the search
...@@ -3246,7 +3242,8 @@ def beam_search_decode(ids, scores, beam_size, end_id, name=None): ...@@ -3246,7 +3242,8 @@ def beam_search_decode(ids, scores, beam_size, end_id, name=None):
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
# Suppose `ids` and `scores` are LodTensorArray variables reserving # Suppose `ids` and `scores` are LodTensorArray variables reserving
# the selected ids and scores of all steps # the selected ids and scores of all steps
ids = fluid.layers.create_array(dtype='int64') ids = fluid.layers.create_array(dtype='int64')
......
...@@ -605,8 +605,6 @@ def assign(input, output=None): ...@@ -605,8 +605,6 @@ def assign(input, output=None):
def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None): def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None):
""" """
:alias_main: paddle.fill_constant
:alias: paddle.tensor.fill_constant, paddle.tensor.creation.fill_constant
This OP creates a Tensor with specified `shape` and `dtype`, and This OP creates a Tensor with specified `shape` and `dtype`, and
initializes it with a constant specified by `value`. initializes it with a constant specified by `value`.
...@@ -715,7 +713,7 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None): ...@@ -715,7 +713,7 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None):
return out return out
@deprecated(since='1.8.0', update_to="paddle.fill_constant") @deprecated(since='1.8.0', update_to="paddle.fluid.layers.fill_constant")
@templatedoc() @templatedoc()
def fill_constant_batch_size_like(input, def fill_constant_batch_size_like(input,
shape, shape,
...@@ -1228,7 +1226,7 @@ def has_inf(x): ...@@ -1228,7 +1226,7 @@ def has_inf(x):
import paddle import paddle
data = paddle.randn(shape=[4, 32, 32], dtype="float32") data = paddle.randn(shape=[4, 32, 32], dtype="float32")
res = paddle.has_inf(data) res = paddle.fluid.layers.has_inf(data)
# [False] # [False]
""" """
...@@ -1257,7 +1255,7 @@ def has_nan(x): ...@@ -1257,7 +1255,7 @@ def has_nan(x):
import paddle import paddle
data = paddle.randn(shape=[2,3], dtype="float32") data = paddle.randn(shape=[2,3], dtype="float32")
res = paddle.has_nan(data) res = paddle.fluid.layers.has_nan(data)
# [False] # [False]
""" """
......
...@@ -851,6 +851,9 @@ class DetectionMAP(object): ...@@ -851,6 +851,9 @@ class DetectionMAP(object):
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
batch_size = None # can be any size batch_size = None # can be any size
image_boxs_num = 10 image_boxs_num = 10
bounding_bboxes_num = 21 bounding_bboxes_num = 21
......
...@@ -105,7 +105,7 @@ class ReduceMeanLayer(object): ...@@ -105,7 +105,7 @@ class ReduceMeanLayer(object):
""" """
operation operation
""" """
mean = paddle.reduce_mean(input) mean = paddle.fluid.layers.reduce_mean(input)
return mean return mean
...@@ -181,7 +181,7 @@ class ElementwiseSubLayer(object): ...@@ -181,7 +181,7 @@ class ElementwiseSubLayer(object):
""" """
operation operation
""" """
sub = paddle.elementwise_sub(x, y) sub = paddle.fluid.layers.elementwise_sub(x, y)
return sub return sub
...@@ -203,7 +203,7 @@ class ConstantLayer(object): ...@@ -203,7 +203,7 @@ class ConstantLayer(object):
shape = list(shape) shape = list(shape)
input_shape = paddle.shape(input) input_shape = paddle.shape(input)
shape[0] = input_shape[0] shape[0] = input_shape[0]
constant = paddle.fill_constant(shape, dtype, value) constant = paddle.fluid.layers.fill_constant(shape, dtype, value)
return constant return constant
...@@ -473,8 +473,8 @@ class BOW(paddle.nn.Layer): ...@@ -473,8 +473,8 @@ class BOW(paddle.nn.Layer):
right_emb = paddle.reshape( right_emb = paddle.reshape(
right_emb, shape=[-1, self.seq_len, self.bow_dim]) right_emb, shape=[-1, self.seq_len, self.bow_dim])
bow_left = paddle.reduce_sum(left_emb, dim=1) bow_left = paddle.fluid.layers.reduce_sum(left_emb, dim=1)
bow_right = paddle.reduce_sum(right_emb, dim=1) bow_right = paddle.fluid.layers.reduce_sum(right_emb, dim=1)
softsign_layer = SoftsignLayer() softsign_layer = SoftsignLayer()
left_soft = softsign_layer.ops(bow_left) left_soft = softsign_layer.ops(bow_left)
right_soft = softsign_layer.ops(bow_right) right_soft = softsign_layer.ops(bow_right)
......
...@@ -64,9 +64,9 @@ def get_source_code(func): ...@@ -64,9 +64,9 @@ def get_source_code(func):
class StaticCode1(): class StaticCode1():
# TODO: Transform return statement # TODO: Transform return statement
def dyfunc_with_if_else(x_v, label=None): def dyfunc_with_if_else(x_v, label=None):
__return_1 = paddle.fill_constant(shape=[1], dtype='bool', value=False) __return_1 = paddle.fluid.layers.fill_constant(shape=[1], dtype='bool', value=False)
__return_0 = paddle.fill_constant(shape=[1], dtype='bool', value=False) __return_0 = paddle.fluid.layers.fill_constant(shape=[1], dtype='bool', value=False)
__return_value_init_0 = paddle.fill_constant( __return_value_init_0 = paddle.fluid.layers.fill_constant(
shape=[1], dtype='float64', value=0.0) shape=[1], dtype='float64', value=0.0)
__return_value_0 = __return_value_init_0 __return_value_0 = __return_value_init_0
...@@ -84,7 +84,7 @@ class StaticCode1(): ...@@ -84,7 +84,7 @@ class StaticCode1():
def true_fn_1(__return_0, __return_value_0, label, x_v): def true_fn_1(__return_0, __return_value_0, label, x_v):
loss = fluid.layers.cross_entropy(x_v, label) loss = fluid.layers.cross_entropy(x_v, label)
__return_0 = paddle.fill_constant( __return_0 = paddle.fluid.layers.fill_constant(
shape=[1], dtype='bool', value=True) shape=[1], dtype='bool', value=True)
__return_value_0 = loss __return_value_0 = loss
return __return_0, __return_value_0 return __return_0, __return_value_0
...@@ -98,7 +98,7 @@ class StaticCode1(): ...@@ -98,7 +98,7 @@ class StaticCode1():
(__return_0, __return_value_0), (__return_0, __return_value_0))) (__return_0, __return_value_0), (__return_0, __return_value_0)))
def true_fn_2(__return_1, __return_value_0, x_v): def true_fn_2(__return_1, __return_value_0, x_v):
__return_1 = paddle.fill_constant( __return_1 = paddle.fluid.layers.fill_constant(
shape=[1], dtype='bool', value=True) shape=[1], dtype='bool', value=True)
__return_value_0 = x_v __return_value_0 = x_v
return __return_1, __return_value_0 return __return_1, __return_value_0
...@@ -116,9 +116,9 @@ class StaticCode1(): ...@@ -116,9 +116,9 @@ class StaticCode1():
class StaticCode2(): class StaticCode2():
# TODO: Transform return statement # TODO: Transform return statement
def dyfunc_with_if_else(x_v, label=None): def dyfunc_with_if_else(x_v, label=None):
__return_3 = paddle.fill_constant(shape=[1], dtype='bool', value=False) __return_3 = paddle.fluid.layers.fill_constant(shape=[1], dtype='bool', value=False)
__return_2 = paddle.fill_constant(shape=[1], dtype='bool', value=False) __return_2 = paddle.fluid.layers.fill_constant(shape=[1], dtype='bool', value=False)
__return_value_init_1 = paddle.fill_constant( __return_value_init_1 = paddle.fluid.layers.fill_constant(
shape=[1], dtype='float64', value=0.0) shape=[1], dtype='float64', value=0.0)
__return_value_1 = __return_value_init_1 __return_value_1 = __return_value_init_1
...@@ -136,7 +136,7 @@ class StaticCode2(): ...@@ -136,7 +136,7 @@ class StaticCode2():
def true_fn_4(__return_2, __return_value_1, label, x_v): def true_fn_4(__return_2, __return_value_1, label, x_v):
loss = fluid.layers.cross_entropy(x_v, label) loss = fluid.layers.cross_entropy(x_v, label)
__return_2 = paddle.fill_constant( __return_2 = paddle.fluid.layers.fill_constant(
shape=[1], dtype='bool', value=True) shape=[1], dtype='bool', value=True)
__return_value_1 = loss __return_value_1 = loss
return __return_2, __return_value_1 return __return_2, __return_value_1
...@@ -150,7 +150,7 @@ class StaticCode2(): ...@@ -150,7 +150,7 @@ class StaticCode2():
(__return_2, __return_value_1), (__return_2, __return_value_1))) (__return_2, __return_value_1), (__return_2, __return_value_1)))
def true_fn_5(__return_3, __return_value_1, x_v): def true_fn_5(__return_3, __return_value_1, x_v):
__return_3 = paddle.fill_constant( __return_3 = paddle.fluid.layers.fill_constant(
shape=[1], dtype='bool', value=True) shape=[1], dtype='bool', value=True)
__return_value_1 = x_v __return_value_1 = x_v
return __return_3, __return_value_1 return __return_3, __return_value_1
......
...@@ -187,8 +187,8 @@ class PtbModel(paddle.nn.Layer): ...@@ -187,8 +187,8 @@ class PtbModel(paddle.nn.Layer):
loss = paddle.nn.functional.softmax_with_cross_entropy( loss = paddle.nn.functional.softmax_with_cross_entropy(
logits=projection, label=label, soft_label=False) logits=projection, label=label, soft_label=False)
loss = paddle.reshape(loss, shape=[-1, self.num_steps]) loss = paddle.reshape(loss, shape=[-1, self.num_steps])
loss = paddle.reduce_mean(loss, dim=[0]) loss = paddle.fluid.layers.reduce_mean(loss, dim=[0])
loss = paddle.reduce_sum(loss) loss = paddle.fluid.layers.reduce_sum(loss)
return loss, last_hidden, last_cell return loss, last_hidden, last_cell
......
...@@ -153,7 +153,7 @@ class ResNet(paddle.nn.Layer): ...@@ -153,7 +153,7 @@ class ResNet(paddle.nn.Layer):
self.conv = ConvBNLayer( self.conv = ConvBNLayer(
num_channels=3, num_filters=64, filter_size=7, stride=2, act='relu') num_channels=3, num_filters=64, filter_size=7, stride=2, act='relu')
self.pool2d_max = paddle.nn.Pool2D( self.pool2d_max = paddle.fluid.dygraph.Pool2D(
pool_size=3, pool_stride=2, pool_padding=1, pool_type='max') pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
self.bottleneck_block_list = [] self.bottleneck_block_list = []
...@@ -171,7 +171,7 @@ class ResNet(paddle.nn.Layer): ...@@ -171,7 +171,7 @@ class ResNet(paddle.nn.Layer):
self.bottleneck_block_list.append(bottleneck_block) self.bottleneck_block_list.append(bottleneck_block)
shortcut = True shortcut = True
self.pool2d_avg = paddle.nn.Pool2D( self.pool2d_avg = paddle.fluid.dygraph.Pool2D(
pool_size=7, pool_type='avg', global_pooling=True) pool_size=7, pool_type='avg', global_pooling=True)
self.pool2d_avg_output = num_filters[len(num_filters) - 1] * 4 * 1 * 1 self.pool2d_avg_output = num_filters[len(num_filters) - 1] * 4 * 1 * 1
......
...@@ -51,24 +51,24 @@ class TestDataLayerNotCheck(unittest.TestCase): ...@@ -51,24 +51,24 @@ class TestDataLayerNotCheck(unittest.TestCase):
class TestVariableTransFunc(unittest.TestCase): class TestVariableTransFunc(unittest.TestCase):
def test_create_fill_constant_node(self): def test_create_fill_constant_node(self):
node = create_fill_constant_node("a", 1.0) node = create_fill_constant_node("a", 1.0)
source = "a = paddle.fill_constant(shape=[1], dtype='float64', value=1.0)" source = "a = paddle.fluid.layers.fill_constant(shape=[1], dtype='float64', value=1.0)"
self.assertEqual(ast_to_source_code(node).strip(), source) self.assertEqual(ast_to_source_code(node).strip(), source)
node = create_fill_constant_node("b", True) node = create_fill_constant_node("b", True)
source = "b = paddle.fill_constant(shape=[1], dtype='bool', value=True)" source = "b = paddle.fluid.layers.fill_constant(shape=[1], dtype='bool', value=True)"
self.assertEqual(ast_to_source_code(node).strip(), source) self.assertEqual(ast_to_source_code(node).strip(), source)
if six.PY2: if six.PY2:
node = create_fill_constant_node("c", 214) node = create_fill_constant_node("c", 214)
source = "c = paddle.fill_constant(shape=[1], dtype='int32', value=214)" source = "c = paddle.fluid.layers.fill_constant(shape=[1], dtype='int32', value=214)"
self.assertEqual(ast_to_source_code(node).strip(), source) self.assertEqual(ast_to_source_code(node).strip(), source)
node = create_fill_constant_node("d", long(10086)) node = create_fill_constant_node("d", long(10086))
source = "d = paddle.fill_constant(shape=[1], dtype='int64', value=10086)" source = "d = paddle.fluid.layers.fill_constant(shape=[1], dtype='int64', value=10086)"
self.assertEqual(ast_to_source_code(node).strip(), source) self.assertEqual(ast_to_source_code(node).strip(), source)
else: else:
node = create_fill_constant_node("c", 4293) node = create_fill_constant_node("c", 4293)
source = "c = paddle.fill_constant(shape=[1], dtype='int64', value=4293)" source = "c = paddle.fluid.layers.fill_constant(shape=[1], dtype='int64', value=4293)"
self.assertEqual(ast_to_source_code(node).strip(), source) self.assertEqual(ast_to_source_code(node).strip(), source)
self.assertIsNone(create_fill_constant_node("e", None)) self.assertIsNone(create_fill_constant_node("e", None))
......
...@@ -40,9 +40,9 @@ class SquaredMatSubFusePassTest(InferencePassTest): ...@@ -40,9 +40,9 @@ class SquaredMatSubFusePassTest(InferencePassTest):
matmul_ab_square = paddle.square(matmul_ab) matmul_ab_square = paddle.square(matmul_ab)
matmul_square_ab = paddle.matmul(data_a_square, data_b_square) matmul_square_ab = paddle.matmul(data_a_square, data_b_square)
scale = paddle.fill_constant(shape=[1], value=0.5, dtype='float32') scale = paddle.fluid.layers.fill_constant(shape=[1], value=0.5, dtype='float32')
sub_val = paddle.elementwise_sub(matmul_ab_square, matmul_square_ab) sub_val = paddle.fluid.layers.elementwise_sub(matmul_ab_square, matmul_square_ab)
squared_mat_sub_out = fluid.layers.elementwise_mul(sub_val, scale) squared_mat_sub_out = fluid.layers.elementwise_mul(sub_val, scale)
self.feeds = { self.feeds = {
......
...@@ -26,7 +26,7 @@ import paddle.fluid as fluid ...@@ -26,7 +26,7 @@ import paddle.fluid as fluid
import paddle.fluid.dygraph as dygraph import paddle.fluid.dygraph as dygraph
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.optimizer import SGDOptimizer from paddle.fluid.optimizer import SGDOptimizer
from paddle.nn import Conv2d, Pool2D, Linear, SyncBatchNorm from paddle.nn import Conv2d, Linear, SyncBatchNorm
from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.base import to_variable
from test_dist_base import runtime_main, TestParallelDyGraphRunnerBase from test_dist_base import runtime_main, TestParallelDyGraphRunnerBase
......
...@@ -70,10 +70,10 @@ class TestSimpleRNNCell(unittest.TestCase): ...@@ -70,10 +70,10 @@ class TestSimpleRNNCell(unittest.TestCase):
with paddle.fluid.unique_name.guard(): with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp): with paddle.static.program_guard(mp, sp):
x_data = paddle.data( x_data = paddle.fluid.data(
"input", [-1, 16], "input", [-1, 16],
dtype=paddle.framework.get_default_dtype()) dtype=paddle.framework.get_default_dtype())
init_h = paddle.data( init_h = paddle.fluid.data(
"init_h", [-1, 32], "init_h", [-1, 32],
dtype=paddle.framework.get_default_dtype()) dtype=paddle.framework.get_default_dtype())
y, h = rnn2(x_data, init_h) y, h = rnn2(x_data, init_h)
...@@ -98,7 +98,7 @@ class TestSimpleRNNCell(unittest.TestCase): ...@@ -98,7 +98,7 @@ class TestSimpleRNNCell(unittest.TestCase):
with paddle.fluid.unique_name.guard(): with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp): with paddle.static.program_guard(mp, sp):
x_data = paddle.data( x_data = paddle.fluid.data(
"input", [-1, 16], "input", [-1, 16],
dtype=paddle.framework.get_default_dtype()) dtype=paddle.framework.get_default_dtype())
y, h = rnn2(x_data) y, h = rnn2(x_data)
...@@ -166,10 +166,10 @@ class TestGRUCell(unittest.TestCase): ...@@ -166,10 +166,10 @@ class TestGRUCell(unittest.TestCase):
with paddle.fluid.unique_name.guard(): with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp): with paddle.static.program_guard(mp, sp):
x_data = paddle.data( x_data = paddle.fluid.data(
"input", [-1, 16], "input", [-1, 16],
dtype=paddle.framework.get_default_dtype()) dtype=paddle.framework.get_default_dtype())
init_h = paddle.data( init_h = paddle.fluid.data(
"init_h", [-1, 32], "init_h", [-1, 32],
dtype=paddle.framework.get_default_dtype()) dtype=paddle.framework.get_default_dtype())
y, h = rnn2(x_data, init_h) y, h = rnn2(x_data, init_h)
...@@ -194,7 +194,7 @@ class TestGRUCell(unittest.TestCase): ...@@ -194,7 +194,7 @@ class TestGRUCell(unittest.TestCase):
with paddle.fluid.unique_name.guard(): with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp): with paddle.static.program_guard(mp, sp):
x_data = paddle.data( x_data = paddle.fluid.data(
"input", [-1, 16], "input", [-1, 16],
dtype=paddle.framework.get_default_dtype()) dtype=paddle.framework.get_default_dtype())
y, h = rnn2(x_data) y, h = rnn2(x_data)
...@@ -263,13 +263,13 @@ class TestLSTMCell(unittest.TestCase): ...@@ -263,13 +263,13 @@ class TestLSTMCell(unittest.TestCase):
with paddle.fluid.unique_name.guard(): with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp): with paddle.static.program_guard(mp, sp):
x_data = paddle.data( x_data = paddle.fluid.data(
"input", [-1, 16], "input", [-1, 16],
dtype=paddle.framework.get_default_dtype()) dtype=paddle.framework.get_default_dtype())
init_h = paddle.data( init_h = paddle.fluid.data(
"init_h", [-1, 32], "init_h", [-1, 32],
dtype=paddle.framework.get_default_dtype()) dtype=paddle.framework.get_default_dtype())
init_c = paddle.data( init_c = paddle.fluid.data(
"init_c", [-1, 32], "init_c", [-1, 32],
dtype=paddle.framework.get_default_dtype()) dtype=paddle.framework.get_default_dtype())
y, (h, c) = rnn2(x_data, (init_h, init_c)) y, (h, c) = rnn2(x_data, (init_h, init_c))
...@@ -295,7 +295,7 @@ class TestLSTMCell(unittest.TestCase): ...@@ -295,7 +295,7 @@ class TestLSTMCell(unittest.TestCase):
with paddle.fluid.unique_name.guard(): with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp): with paddle.static.program_guard(mp, sp):
x_data = paddle.data( x_data = paddle.fluid.data(
"input", [-1, 16], "input", [-1, 16],
dtype=paddle.framework.get_default_dtype()) dtype=paddle.framework.get_default_dtype())
y, (h, c) = rnn2(x_data) y, (h, c) = rnn2(x_data)
......
...@@ -81,10 +81,10 @@ class TestSimpleRNN(unittest.TestCase): ...@@ -81,10 +81,10 @@ class TestSimpleRNN(unittest.TestCase):
with paddle.fluid.unique_name.guard(): with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp): with paddle.static.program_guard(mp, sp):
x_data = paddle.data( x_data = paddle.fluid.data(
"input", [-1, -1, 16], "input", [-1, -1, 16],
dtype=paddle.framework.get_default_dtype()) dtype=paddle.framework.get_default_dtype())
init_h = paddle.data( init_h = paddle.fluid.data(
"init_h", [2 * self.num_directions, -1, 32], "init_h", [2 * self.num_directions, -1, 32],
dtype=paddle.framework.get_default_dtype()) dtype=paddle.framework.get_default_dtype())
y, h = rnn2(x_data, init_h) y, h = rnn2(x_data, init_h)
...@@ -112,7 +112,7 @@ class TestSimpleRNN(unittest.TestCase): ...@@ -112,7 +112,7 @@ class TestSimpleRNN(unittest.TestCase):
with paddle.fluid.unique_name.guard(): with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp): with paddle.static.program_guard(mp, sp):
x_data = paddle.data( x_data = paddle.fluid.data(
"input", [-1, -1, 16], "input", [-1, -1, 16],
dtype=paddle.framework.get_default_dtype()) dtype=paddle.framework.get_default_dtype())
y, h = rnn2(x_data) y, h = rnn2(x_data)
...@@ -142,10 +142,10 @@ class TestSimpleRNN(unittest.TestCase): ...@@ -142,10 +142,10 @@ class TestSimpleRNN(unittest.TestCase):
with paddle.fluid.unique_name.guard(): with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp): with paddle.static.program_guard(mp, sp):
x_data = paddle.data( x_data = paddle.fluid.data(
"input", [-1, -1, 16], "input", [-1, -1, 16],
dtype=paddle.framework.get_default_dtype()) dtype=paddle.framework.get_default_dtype())
seq_len = paddle.data("seq_len", [-1], dtype="int64") seq_len = paddle.fluid.data("seq_len", [-1], dtype="int64")
mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype()) mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype())
if self.time_major: if self.time_major:
mask = paddle.transpose(mask, [1, 0]) mask = paddle.transpose(mask, [1, 0])
...@@ -226,10 +226,10 @@ class TestGRU(unittest.TestCase): ...@@ -226,10 +226,10 @@ class TestGRU(unittest.TestCase):
with paddle.fluid.unique_name.guard(): with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp): with paddle.static.program_guard(mp, sp):
x_data = paddle.data( x_data = paddle.fluid.data(
"input", [-1, -1, 16], "input", [-1, -1, 16],
dtype=paddle.framework.get_default_dtype()) dtype=paddle.framework.get_default_dtype())
init_h = paddle.data( init_h = paddle.fluid.data(
"init_h", [2 * self.num_directions, -1, 32], "init_h", [2 * self.num_directions, -1, 32],
dtype=paddle.framework.get_default_dtype()) dtype=paddle.framework.get_default_dtype())
y, h = rnn2(x_data, init_h) y, h = rnn2(x_data, init_h)
...@@ -257,7 +257,7 @@ class TestGRU(unittest.TestCase): ...@@ -257,7 +257,7 @@ class TestGRU(unittest.TestCase):
with paddle.fluid.unique_name.guard(): with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp): with paddle.static.program_guard(mp, sp):
x_data = paddle.data( x_data = paddle.fluid.data(
"input", [-1, -1, 16], "input", [-1, -1, 16],
dtype=paddle.framework.get_default_dtype()) dtype=paddle.framework.get_default_dtype())
y, h = rnn2(x_data) y, h = rnn2(x_data)
...@@ -287,10 +287,10 @@ class TestGRU(unittest.TestCase): ...@@ -287,10 +287,10 @@ class TestGRU(unittest.TestCase):
with paddle.fluid.unique_name.guard(): with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp): with paddle.static.program_guard(mp, sp):
x_data = paddle.data( x_data = paddle.fluid.data(
"input", [-1, -1, 16], "input", [-1, -1, 16],
dtype=paddle.framework.get_default_dtype()) dtype=paddle.framework.get_default_dtype())
seq_len = paddle.data("seq_len", [-1], dtype="int64") seq_len = paddle.fluid.data("seq_len", [-1], dtype="int64")
mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype()) mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype())
if self.time_major: if self.time_major:
mask = paddle.transpose(mask, [1, 0]) mask = paddle.transpose(mask, [1, 0])
...@@ -368,13 +368,13 @@ class TestLSTM(unittest.TestCase): ...@@ -368,13 +368,13 @@ class TestLSTM(unittest.TestCase):
with paddle.fluid.unique_name.guard(): with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp): with paddle.static.program_guard(mp, sp):
x_data = paddle.data( x_data = paddle.fluid.data(
"input", [-1, -1, 16], "input", [-1, -1, 16],
dtype=paddle.framework.get_default_dtype()) dtype=paddle.framework.get_default_dtype())
init_h = paddle.data( init_h = paddle.fluid.data(
"init_h", [2 * self.num_directions, -1, 32], "init_h", [2 * self.num_directions, -1, 32],
dtype=paddle.framework.get_default_dtype()) dtype=paddle.framework.get_default_dtype())
init_c = paddle.data( init_c = paddle.fluid.data(
"init_c", [2 * self.num_directions, -1, 32], "init_c", [2 * self.num_directions, -1, 32],
dtype=paddle.framework.get_default_dtype()) dtype=paddle.framework.get_default_dtype())
y, (h, c) = rnn2(x_data, (init_h, init_c)) y, (h, c) = rnn2(x_data, (init_h, init_c))
...@@ -403,7 +403,7 @@ class TestLSTM(unittest.TestCase): ...@@ -403,7 +403,7 @@ class TestLSTM(unittest.TestCase):
with paddle.fluid.unique_name.guard(): with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp): with paddle.static.program_guard(mp, sp):
x_data = paddle.data( x_data = paddle.fluid.data(
"input", [-1, -1, 16], "input", [-1, -1, 16],
dtype=paddle.framework.get_default_dtype()) dtype=paddle.framework.get_default_dtype())
y, (h, c) = rnn2(x_data) y, (h, c) = rnn2(x_data)
...@@ -434,10 +434,10 @@ class TestLSTM(unittest.TestCase): ...@@ -434,10 +434,10 @@ class TestLSTM(unittest.TestCase):
with paddle.fluid.unique_name.guard(): with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp): with paddle.static.program_guard(mp, sp):
x_data = paddle.data( x_data = paddle.fluid.data(
"input", [-1, -1, 16], "input", [-1, -1, 16],
dtype=paddle.framework.get_default_dtype()) dtype=paddle.framework.get_default_dtype())
seq_len = paddle.data("seq_len", [-1], dtype="int64") seq_len = paddle.fluid.data("seq_len", [-1], dtype="int64")
mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype()) mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype())
if self.time_major: if self.time_major:
mask = paddle.transpose(mask, [1, 0]) mask = paddle.transpose(mask, [1, 0])
......
...@@ -143,7 +143,7 @@ class TestLogSigmoidAPI(unittest.TestCase): ...@@ -143,7 +143,7 @@ class TestLogSigmoidAPI(unittest.TestCase):
def test_static_api(self): def test_static_api(self):
paddle.enable_static() paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [11, 17]) x = paddle.fluid.data('X', [11, 17])
out1 = F.log_sigmoid(x) out1 = F.log_sigmoid(x)
m = paddle.nn.LogSigmoid() m = paddle.nn.LogSigmoid()
out2 = m(x) out2 = m(x)
...@@ -167,7 +167,7 @@ class TestLogSigmoidAPI(unittest.TestCase): ...@@ -167,7 +167,7 @@ class TestLogSigmoidAPI(unittest.TestCase):
def test_fluid_api(self): def test_fluid_api(self):
paddle.enable_static() paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [11, 17]) x = paddle.fluid.data('X', [11, 17])
out = paddle.fluid.layers.logsigmoid(x) out = paddle.fluid.layers.logsigmoid(x)
exe = paddle.static.Executor(self.place) exe = paddle.static.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
...@@ -180,10 +180,10 @@ class TestLogSigmoidAPI(unittest.TestCase): ...@@ -180,10 +180,10 @@ class TestLogSigmoidAPI(unittest.TestCase):
# The input type must be Variable. # The input type must be Variable.
self.assertRaises(TypeError, F.log_sigmoid, 1) self.assertRaises(TypeError, F.log_sigmoid, 1)
# The input dtype must be float16, float32, float64. # The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[11, 17], dtype='int32') x_int32 = paddle.fluid.data(name='x_int32', shape=[11, 17], dtype='int32')
self.assertRaises(TypeError, F.log_sigmoid, x_int32) self.assertRaises(TypeError, F.log_sigmoid, x_int32)
# support the input dtype is float16 # support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[11, 17], dtype='float16') x_fp16 = paddle.fluid.data(name='x_fp16', shape=[11, 17], dtype='float16')
F.log_sigmoid(x_fp16) F.log_sigmoid(x_fp16)
...@@ -222,7 +222,7 @@ class TestTanhAPI(unittest.TestCase): ...@@ -222,7 +222,7 @@ class TestTanhAPI(unittest.TestCase):
def test_static_api(self): def test_static_api(self):
paddle.enable_static() paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [10, 12], self.dtype) x = paddle.fluid.data('X', [10, 12], self.dtype)
out1 = F.tanh(x) out1 = F.tanh(x)
th = paddle.nn.Tanh() th = paddle.nn.Tanh()
out2 = th(x) out2 = th(x)
...@@ -260,10 +260,10 @@ class TestTanhAPI(unittest.TestCase): ...@@ -260,10 +260,10 @@ class TestTanhAPI(unittest.TestCase):
# The input type must be Variable. # The input type must be Variable.
self.assertRaises(TypeError, F.tanh, 1) self.assertRaises(TypeError, F.tanh, 1)
# The input dtype must be float16, float32. # The input dtype must be float16, float32.
x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32') x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, F.tanh, x_int32) self.assertRaises(TypeError, F.tanh, x_int32)
# support the input dtype is float16 # support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16') x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
F.tanh(x_fp16) F.tanh(x_fp16)
...@@ -482,7 +482,7 @@ class TestTanhshrinkAPI(unittest.TestCase): ...@@ -482,7 +482,7 @@ class TestTanhshrinkAPI(unittest.TestCase):
def test_static_api(self): def test_static_api(self):
paddle.enable_static() paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.x_np.shape, self.x_np.dtype) x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.tanhshrink(x) out1 = F.tanhshrink(x)
tanhshrink = paddle.nn.Tanhshrink() tanhshrink = paddle.nn.Tanhshrink()
out2 = tanhshrink(x) out2 = tanhshrink(x)
...@@ -519,10 +519,10 @@ class TestTanhshrinkAPI(unittest.TestCase): ...@@ -519,10 +519,10 @@ class TestTanhshrinkAPI(unittest.TestCase):
# The input type must be Variable. # The input type must be Variable.
self.assertRaises(TypeError, F.tanhshrink, 1) self.assertRaises(TypeError, F.tanhshrink, 1)
# The input dtype must be float16, float32, float64. # The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32') x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, F.tanhshrink, x_int32) self.assertRaises(TypeError, F.tanhshrink, x_int32)
# support the input dtype is float16 # support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16') x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
F.tanhshrink(x_fp16) F.tanhshrink(x_fp16)
...@@ -572,7 +572,7 @@ class TestHardShrinkAPI(unittest.TestCase): ...@@ -572,7 +572,7 @@ class TestHardShrinkAPI(unittest.TestCase):
def test_static_api(self): def test_static_api(self):
paddle.enable_static() paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [10, 12]) x = paddle.fluid.data('X', [10, 12])
out1 = F.hardshrink(x) out1 = F.hardshrink(x)
hd = paddle.nn.Hardshrink() hd = paddle.nn.Hardshrink()
out2 = hd(x) out2 = hd(x)
...@@ -616,10 +616,10 @@ class TestHardShrinkAPI(unittest.TestCase): ...@@ -616,10 +616,10 @@ class TestHardShrinkAPI(unittest.TestCase):
# The input type must be Variable. # The input type must be Variable.
self.assertRaises(TypeError, F.hardshrink, 1) self.assertRaises(TypeError, F.hardshrink, 1)
# The input dtype must be float16, float32, float64. # The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32') x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, F.hardshrink, x_int32) self.assertRaises(TypeError, F.hardshrink, x_int32)
# support the input dtype is float16 # support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16') x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
F.hardshrink(x_fp16) F.hardshrink(x_fp16)
...@@ -642,7 +642,7 @@ class TestHardtanhAPI(unittest.TestCase): ...@@ -642,7 +642,7 @@ class TestHardtanhAPI(unittest.TestCase):
def test_static_api(self): def test_static_api(self):
paddle.enable_static() paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [10, 12]) x = paddle.fluid.data('X', [10, 12])
out1 = F.hardtanh(x) out1 = F.hardtanh(x)
m = paddle.nn.Hardtanh() m = paddle.nn.Hardtanh()
out2 = m(x) out2 = m(x)
...@@ -676,10 +676,10 @@ class TestHardtanhAPI(unittest.TestCase): ...@@ -676,10 +676,10 @@ class TestHardtanhAPI(unittest.TestCase):
# The input type must be Variable. # The input type must be Variable.
self.assertRaises(TypeError, F.hardtanh, 1) self.assertRaises(TypeError, F.hardtanh, 1)
# The input dtype must be float16, float32, float64. # The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32') x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, F.hardtanh, x_int32) self.assertRaises(TypeError, F.hardtanh, x_int32)
# support the input dtype is float16 # support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16') x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
F.hardtanh(x_fp16) F.hardtanh(x_fp16)
...@@ -722,7 +722,7 @@ class TestSoftshrinkAPI(unittest.TestCase): ...@@ -722,7 +722,7 @@ class TestSoftshrinkAPI(unittest.TestCase):
def test_static_api(self): def test_static_api(self):
paddle.enable_static() paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.x_np.shape, self.x_np.dtype) x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.softshrink(x, self.threshold) out1 = F.softshrink(x, self.threshold)
softshrink = paddle.nn.Softshrink(self.threshold) softshrink = paddle.nn.Softshrink(self.threshold)
out2 = softshrink(x) out2 = softshrink(x)
...@@ -759,13 +759,13 @@ class TestSoftshrinkAPI(unittest.TestCase): ...@@ -759,13 +759,13 @@ class TestSoftshrinkAPI(unittest.TestCase):
# The input type must be Variable. # The input type must be Variable.
self.assertRaises(TypeError, F.softshrink, 1) self.assertRaises(TypeError, F.softshrink, 1)
# The input dtype must be float16, float32, float64. # The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32') x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, F.softshrink, x_int32) self.assertRaises(TypeError, F.softshrink, x_int32)
# The threshold must be no less than zero # The threshold must be no less than zero
x_fp32 = paddle.data(name='x_fp32', shape=[12, 10], dtype='float32') x_fp32 = paddle.fluid.data(name='x_fp32', shape=[12, 10], dtype='float32')
self.assertRaises(ValueError, F.softshrink, x_fp32, -1.0) self.assertRaises(ValueError, F.softshrink, x_fp32, -1.0)
# support the input dtype is float16 # support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16') x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
F.softshrink(x_fp16) F.softshrink(x_fp16)
...@@ -983,7 +983,7 @@ class TestReluAPI(unittest.TestCase): ...@@ -983,7 +983,7 @@ class TestReluAPI(unittest.TestCase):
def test_static_api(self): def test_static_api(self):
paddle.enable_static() paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [10, 12]) x = paddle.fluid.data('X', [10, 12])
out1 = F.relu(x) out1 = F.relu(x)
m = paddle.nn.ReLU() m = paddle.nn.ReLU()
out2 = m(x) out2 = m(x)
...@@ -1010,10 +1010,10 @@ class TestReluAPI(unittest.TestCase): ...@@ -1010,10 +1010,10 @@ class TestReluAPI(unittest.TestCase):
# The input type must be Variable. # The input type must be Variable.
self.assertRaises(TypeError, F.relu, 1) self.assertRaises(TypeError, F.relu, 1)
# The input dtype must be float16, float32, float64. # The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[10, 12], dtype='int32') x_int32 = paddle.fluid.data(name='x_int32', shape=[10, 12], dtype='int32')
self.assertRaises(TypeError, F.relu, x_int32) self.assertRaises(TypeError, F.relu, x_int32)
# support the input dtype is float16 # support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[10, 12], dtype='float16') x_fp16 = paddle.fluid.data(name='x_fp16', shape=[10, 12], dtype='float16')
F.relu(x_fp16) F.relu(x_fp16)
...@@ -1075,7 +1075,7 @@ class TestLeakyReluAPI(unittest.TestCase): ...@@ -1075,7 +1075,7 @@ class TestLeakyReluAPI(unittest.TestCase):
def test_static_api(self): def test_static_api(self):
paddle.enable_static() paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [10, 12]) x = paddle.fluid.data('X', [10, 12])
out1 = F.leaky_relu(x) out1 = F.leaky_relu(x)
m = paddle.nn.LeakyReLU() m = paddle.nn.LeakyReLU()
out2 = m(x) out2 = m(x)
...@@ -1119,10 +1119,10 @@ class TestLeakyReluAPI(unittest.TestCase): ...@@ -1119,10 +1119,10 @@ class TestLeakyReluAPI(unittest.TestCase):
# The input type must be Variable. # The input type must be Variable.
self.assertRaises(TypeError, F.leaky_relu, 1) self.assertRaises(TypeError, F.leaky_relu, 1)
# The input dtype must be float16, float32, float64. # The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32') x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, F.leaky_relu, x_int32) self.assertRaises(TypeError, F.leaky_relu, x_int32)
# support the input dtype is float16 # support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16') x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
F.leaky_relu(x_fp16) F.leaky_relu(x_fp16)
...@@ -1184,7 +1184,7 @@ class TestGELUAPI(unittest.TestCase): ...@@ -1184,7 +1184,7 @@ class TestGELUAPI(unittest.TestCase):
def test_static_api(self): def test_static_api(self):
paddle.enable_static() paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [11, 17]) x = paddle.fluid.data('X', [11, 17])
out1 = F.gelu(x) out1 = F.gelu(x)
m = paddle.nn.GELU() m = paddle.nn.GELU()
out2 = m(x) out2 = m(x)
...@@ -1218,10 +1218,10 @@ class TestGELUAPI(unittest.TestCase): ...@@ -1218,10 +1218,10 @@ class TestGELUAPI(unittest.TestCase):
# The input type must be Variable. # The input type must be Variable.
self.assertRaises(TypeError, F.gelu, 1) self.assertRaises(TypeError, F.gelu, 1)
# The input dtype must be float16, float32, float64. # The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[11, 17], dtype='int32') x_int32 = paddle.fluid.data(name='x_int32', shape=[11, 17], dtype='int32')
self.assertRaises(TypeError, F.gelu, x_int32) self.assertRaises(TypeError, F.gelu, x_int32)
# support the input dtype is float16 # support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[11, 17], dtype='float16') x_fp16 = paddle.fluid.data(name='x_fp16', shape=[11, 17], dtype='float16')
F.gelu(x_fp16) F.gelu(x_fp16)
...@@ -1331,7 +1331,7 @@ class TestRelu6API(unittest.TestCase): ...@@ -1331,7 +1331,7 @@ class TestRelu6API(unittest.TestCase):
def test_static_api(self): def test_static_api(self):
paddle.enable_static() paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.x_np.shape, self.x_np.dtype) x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.relu6(x) out1 = F.relu6(x)
relu6 = paddle.nn.ReLU6() relu6 = paddle.nn.ReLU6()
out2 = relu6(x) out2 = relu6(x)
...@@ -1368,10 +1368,10 @@ class TestRelu6API(unittest.TestCase): ...@@ -1368,10 +1368,10 @@ class TestRelu6API(unittest.TestCase):
# The input type must be Variable. # The input type must be Variable.
self.assertRaises(TypeError, F.relu6, 1) self.assertRaises(TypeError, F.relu6, 1)
# The input dtype must be float16, float32, float64. # The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32') x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, F.relu6, x_int32) self.assertRaises(TypeError, F.relu6, x_int32)
# support the input dtype is float16 # support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16') x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
F.relu6(x_fp16) F.relu6(x_fp16)
...@@ -1414,7 +1414,7 @@ class TestHardswishAPI(unittest.TestCase): ...@@ -1414,7 +1414,7 @@ class TestHardswishAPI(unittest.TestCase):
def test_static_api(self): def test_static_api(self):
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.x_np.shape, self.x_np.dtype) x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.hardswish(x) out1 = F.hardswish(x)
m = paddle.nn.Hardswish() m = paddle.nn.Hardswish()
out2 = m(x) out2 = m(x)
...@@ -1455,10 +1455,10 @@ class TestHardswishAPI(unittest.TestCase): ...@@ -1455,10 +1455,10 @@ class TestHardswishAPI(unittest.TestCase):
# The input type must be Variable. # The input type must be Variable.
self.assertRaises(TypeError, F.hardswish, 1) self.assertRaises(TypeError, F.hardswish, 1)
# The input dtype must be float16, float32, float64. # The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32') x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, F.hardswish, x_int32) self.assertRaises(TypeError, F.hardswish, x_int32)
# support the input dtype is float16 # support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16') x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
F.hardswish(x_fp16) F.hardswish(x_fp16)
...@@ -1538,7 +1538,7 @@ class TestELUAPI(unittest.TestCase): ...@@ -1538,7 +1538,7 @@ class TestELUAPI(unittest.TestCase):
def test_static_api(self): def test_static_api(self):
paddle.enable_static() paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [10, 12]) x = paddle.fluid.data('X', [10, 12])
out1 = F.elu(x) out1 = F.elu(x)
m = paddle.nn.ELU() m = paddle.nn.ELU()
out2 = m(x) out2 = m(x)
...@@ -1572,10 +1572,10 @@ class TestELUAPI(unittest.TestCase): ...@@ -1572,10 +1572,10 @@ class TestELUAPI(unittest.TestCase):
# The input type must be Variable. # The input type must be Variable.
self.assertRaises(TypeError, F.elu, 1) self.assertRaises(TypeError, F.elu, 1)
# The input dtype must be float16, float32, float64. # The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[10, 12], dtype='int32') x_int32 = paddle.fluid.data(name='x_int32', shape=[10, 12], dtype='int32')
self.assertRaises(TypeError, F.elu, x_int32) self.assertRaises(TypeError, F.elu, x_int32)
# support the input dtype is float16 # support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[10, 12], dtype='float16') x_fp16 = paddle.fluid.data(name='x_fp16', shape=[10, 12], dtype='float16')
F.elu(x_fp16) F.elu(x_fp16)
...@@ -1858,7 +1858,7 @@ class TestSoftplusAPI(unittest.TestCase): ...@@ -1858,7 +1858,7 @@ class TestSoftplusAPI(unittest.TestCase):
def test_static_api(self): def test_static_api(self):
paddle.enable_static() paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.x_np.shape, self.x_np.dtype) x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.softplus(x, self.beta, self.threshold) out1 = F.softplus(x, self.beta, self.threshold)
softplus = paddle.nn.Softplus(self.beta, self.threshold) softplus = paddle.nn.Softplus(self.beta, self.threshold)
out2 = softplus(x) out2 = softplus(x)
...@@ -1895,10 +1895,10 @@ class TestSoftplusAPI(unittest.TestCase): ...@@ -1895,10 +1895,10 @@ class TestSoftplusAPI(unittest.TestCase):
# The input type must be Variable. # The input type must be Variable.
self.assertRaises(TypeError, F.softplus, 1) self.assertRaises(TypeError, F.softplus, 1)
# The input dtype must be float16, float32, float64. # The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32') x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, F.softplus, x_int32) self.assertRaises(TypeError, F.softplus, x_int32)
# support the input dtype is float16 # support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16') x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
F.softplus(x_fp16) F.softplus(x_fp16)
...@@ -1935,7 +1935,7 @@ class TestSoftsignAPI(unittest.TestCase): ...@@ -1935,7 +1935,7 @@ class TestSoftsignAPI(unittest.TestCase):
def test_static_api(self): def test_static_api(self):
paddle.enable_static() paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.x_np.shape, self.x_np.dtype) x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.softsign(x) out1 = F.softsign(x)
softsign = paddle.nn.Softsign() softsign = paddle.nn.Softsign()
out2 = softsign(x) out2 = softsign(x)
...@@ -1972,10 +1972,10 @@ class TestSoftsignAPI(unittest.TestCase): ...@@ -1972,10 +1972,10 @@ class TestSoftsignAPI(unittest.TestCase):
# The input type must be Variable. # The input type must be Variable.
self.assertRaises(TypeError, F.softsign, 1) self.assertRaises(TypeError, F.softsign, 1)
# The input dtype must be float16, float32, float64. # The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32') x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, F.softsign, x_int32) self.assertRaises(TypeError, F.softsign, x_int32)
# support the input dtype is float16 # support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16') x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
F.softsign(x_fp16) F.softsign(x_fp16)
...@@ -2018,7 +2018,7 @@ class TestThresholdedReluAPI(unittest.TestCase): ...@@ -2018,7 +2018,7 @@ class TestThresholdedReluAPI(unittest.TestCase):
def test_static_api(self): def test_static_api(self):
paddle.enable_static() paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.x_np.shape, self.x_np.dtype) x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.thresholded_relu(x, self.threshold) out1 = F.thresholded_relu(x, self.threshold)
thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold) thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
out2 = thresholded_relu(x) out2 = thresholded_relu(x)
...@@ -2055,10 +2055,10 @@ class TestThresholdedReluAPI(unittest.TestCase): ...@@ -2055,10 +2055,10 @@ class TestThresholdedReluAPI(unittest.TestCase):
# The input type must be Variable. # The input type must be Variable.
self.assertRaises(TypeError, F.thresholded_relu, 1) self.assertRaises(TypeError, F.thresholded_relu, 1)
# The input dtype must be float16, float32, float64. # The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32') x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, F.thresholded_relu, x_int32) self.assertRaises(TypeError, F.thresholded_relu, x_int32)
# support the input dtype is float16 # support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16') x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
F.thresholded_relu(x_fp16) F.thresholded_relu(x_fp16)
...@@ -2113,7 +2113,7 @@ class TestHardsigmoidAPI(unittest.TestCase): ...@@ -2113,7 +2113,7 @@ class TestHardsigmoidAPI(unittest.TestCase):
def test_static_api(self): def test_static_api(self):
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.x_np.shape, self.x_np.dtype) x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.hardsigmoid(x) out1 = F.hardsigmoid(x)
m = paddle.nn.Hardsigmoid() m = paddle.nn.Hardsigmoid()
out2 = m(x) out2 = m(x)
...@@ -2154,10 +2154,10 @@ class TestHardsigmoidAPI(unittest.TestCase): ...@@ -2154,10 +2154,10 @@ class TestHardsigmoidAPI(unittest.TestCase):
# The input type must be Variable. # The input type must be Variable.
self.assertRaises(TypeError, F.hardsigmoid, 1) self.assertRaises(TypeError, F.hardsigmoid, 1)
# The input dtype must be float16, float32, float64. # The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32') x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, F.hardsigmoid, x_int32) self.assertRaises(TypeError, F.hardsigmoid, x_int32)
# support the input dtype is float16 # support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16') x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
F.hardsigmoid(x_fp16) F.hardsigmoid(x_fp16)
...@@ -2195,7 +2195,7 @@ class TestSwishAPI(unittest.TestCase): ...@@ -2195,7 +2195,7 @@ class TestSwishAPI(unittest.TestCase):
def test_static_api(self): def test_static_api(self):
paddle.enable_static() paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.x_np.shape, self.x_np.dtype) x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.swish(x) out1 = F.swish(x)
swish = paddle.nn.Swish() swish = paddle.nn.Swish()
out2 = swish(x) out2 = swish(x)
...@@ -2232,10 +2232,10 @@ class TestSwishAPI(unittest.TestCase): ...@@ -2232,10 +2232,10 @@ class TestSwishAPI(unittest.TestCase):
# The input type must be Variable. # The input type must be Variable.
self.assertRaises(TypeError, F.swish, 1) self.assertRaises(TypeError, F.swish, 1)
# The input dtype must be float16, float32, float64. # The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32') x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, F.swish, x_int32) self.assertRaises(TypeError, F.swish, x_int32)
# support the input dtype is float16 # support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16') x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
F.swish(x_fp16) F.swish(x_fp16)
......
...@@ -499,7 +499,7 @@ class TestAdamOpV2(unittest.TestCase): ...@@ -499,7 +499,7 @@ class TestAdamOpV2(unittest.TestCase):
cur_lr = adam.get_lr() cur_lr = adam.get_lr()
assert (lr == cur_lr) assert (lr == cur_lr)
with self.assertRaises(TypeError): with self.assertRaises(TypeError):
lr_var = paddle.create_global_var( lr_var = paddle.fluid.layers.create_global_var(
shape=[1], value=lr, dtype='float32') shape=[1], value=lr, dtype='float32')
adam.set_lr(lr_var) adam.set_lr(lr_var)
......
...@@ -110,7 +110,7 @@ class TestAdaptiveAvgPool2dAPI(unittest.TestCase): ...@@ -110,7 +110,7 @@ class TestAdaptiveAvgPool2dAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]): if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.enable_static() paddle.enable_static()
x = paddle.data(name="x", shape=[2, 3, 7, 7], dtype="float32") x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32")
out_1 = paddle.nn.functional.adaptive_avg_pool2d( out_1 = paddle.nn.functional.adaptive_avg_pool2d(
x=x, output_size=[3, 3]) x=x, output_size=[3, 3])
...@@ -205,7 +205,7 @@ class TestAdaptiveAvgPool2dClassAPI(unittest.TestCase): ...@@ -205,7 +205,7 @@ class TestAdaptiveAvgPool2dClassAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]): if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.enable_static() paddle.enable_static()
x = paddle.data(name="x", shape=[2, 3, 7, 7], dtype="float32") x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32")
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d(output_size=[3, 3]) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d(output_size=[3, 3])
out_1 = adaptive_avg_pool(x=x) out_1 = adaptive_avg_pool(x=x)
......
...@@ -125,7 +125,7 @@ class TestAdaptiveAvgPool3dAPI(unittest.TestCase): ...@@ -125,7 +125,7 @@ class TestAdaptiveAvgPool3dAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]): if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.enable_static() paddle.enable_static()
x = paddle.data(name="x", shape=[2, 3, 5, 7, 7], dtype="float32") x = paddle.fluid.data(name="x", shape=[2, 3, 5, 7, 7], dtype="float32")
out_1 = paddle.nn.functional.adaptive_avg_pool3d( out_1 = paddle.nn.functional.adaptive_avg_pool3d(
x=x, output_size=[3, 3, 3]) x=x, output_size=[3, 3, 3])
...@@ -220,7 +220,7 @@ class TestAdaptiveAvgPool3dClassAPI(unittest.TestCase): ...@@ -220,7 +220,7 @@ class TestAdaptiveAvgPool3dClassAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]): if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.enable_static() paddle.enable_static()
x = paddle.data(name="x", shape=[2, 3, 5, 7, 7], dtype="float32") x = paddle.fluid.data(name="x", shape=[2, 3, 5, 7, 7], dtype="float32")
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3d( adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3d(
output_size=[3, 3, 3]) output_size=[3, 3, 3])
......
...@@ -110,7 +110,7 @@ class TestAdaptiveMaxPool2dAPI(unittest.TestCase): ...@@ -110,7 +110,7 @@ class TestAdaptiveMaxPool2dAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]): if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.enable_static() paddle.enable_static()
x = paddle.data(name="x", shape=[2, 3, 7, 7], dtype="float32") x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32")
out_1 = paddle.nn.functional.adaptive_max_pool2d( out_1 = paddle.nn.functional.adaptive_max_pool2d(
x=x, output_size=[3, 3]) x=x, output_size=[3, 3])
...@@ -200,7 +200,7 @@ class TestAdaptiveMaxPool2dClassAPI(unittest.TestCase): ...@@ -200,7 +200,7 @@ class TestAdaptiveMaxPool2dClassAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]): if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.enable_static() paddle.enable_static()
x = paddle.data(name="x", shape=[2, 3, 7, 7], dtype="float32") x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32")
adaptive_max_pool = paddle.nn.AdaptiveMaxPool2d(output_size=[3, 3]) adaptive_max_pool = paddle.nn.AdaptiveMaxPool2d(output_size=[3, 3])
out_1 = adaptive_max_pool(x=x) out_1 = adaptive_max_pool(x=x)
......
...@@ -125,7 +125,7 @@ class TestAdaptiveMaxPool3dAPI(unittest.TestCase): ...@@ -125,7 +125,7 @@ class TestAdaptiveMaxPool3dAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]): if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.enable_static() paddle.enable_static()
x = paddle.data(name="x", shape=[2, 3, 5, 7, 7], dtype="float32") x = paddle.fluid.data(name="x", shape=[2, 3, 5, 7, 7], dtype="float32")
out_1 = paddle.nn.functional.adaptive_max_pool3d( out_1 = paddle.nn.functional.adaptive_max_pool3d(
x=x, output_size=[3, 3, 3]) x=x, output_size=[3, 3, 3])
...@@ -215,7 +215,7 @@ class TestAdaptiveMaxPool3dClassAPI(unittest.TestCase): ...@@ -215,7 +215,7 @@ class TestAdaptiveMaxPool3dClassAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]): if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.enable_static() paddle.enable_static()
x = paddle.data(name="x", shape=[2, 3, 5, 7, 7], dtype="float32") x = paddle.fluid.data(name="x", shape=[2, 3, 5, 7, 7], dtype="float32")
adaptive_max_pool = paddle.nn.AdaptiveMaxPool3d( adaptive_max_pool = paddle.nn.AdaptiveMaxPool3d(
output_size=[3, 3, 3]) output_size=[3, 3, 3])
......
...@@ -18,7 +18,6 @@ import paddle.fluid.core as core ...@@ -18,7 +18,6 @@ import paddle.fluid.core as core
from op_test import OpTest from op_test import OpTest
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle import paddle
import paddle.nn.functional as F
from paddle.fluid import Program, program_guard from paddle.fluid import Program, program_guard
...@@ -157,7 +156,7 @@ class TestAddPositionEncodingOpDygraph(unittest.TestCase): ...@@ -157,7 +156,7 @@ class TestAddPositionEncodingOpDygraph(unittest.TestCase):
def test_dygraph(self): def test_dygraph(self):
paddle.disable_static() paddle.disable_static()
tensor = np.random.randn(16, 32, 64) tensor = np.random.randn(16, 32, 64)
position_tensor = F.add_position_encoding( position_tensor = paddle.fluid.layers.add_position_encoding(
input=paddle.to_tensor(tensor), alpha=1.0, beta=1.0).numpy() input=paddle.to_tensor(tensor), alpha=1.0, beta=1.0).numpy()
paddle.enable_static() paddle.enable_static()
......
...@@ -37,7 +37,7 @@ class TestAddcmulLayer(unittest.TestCase): ...@@ -37,7 +37,7 @@ class TestAddcmulLayer(unittest.TestCase):
tensor1 = fluid.data(name="tensor1", dtype=self._dtype, shape=[100]) tensor1 = fluid.data(name="tensor1", dtype=self._dtype, shape=[100])
tensor2 = fluid.data( tensor2 = fluid.data(
name="tensor2", dtype=self._dtype, shape=[3, 100]) name="tensor2", dtype=self._dtype, shape=[3, 100])
out = paddle.addcmul(input, tensor1, tensor2, value) out = paddle.tensor.math.addcmul(input, tensor1, tensor2, value)
exe = fluid.Executor(self._place) exe = fluid.Executor(self._place)
return exe.run(feed={ return exe.run(feed={
...@@ -53,7 +53,7 @@ class TestAddcmulLayer(unittest.TestCase): ...@@ -53,7 +53,7 @@ class TestAddcmulLayer(unittest.TestCase):
input = fluid.dygraph.to_variable(self.input) input = fluid.dygraph.to_variable(self.input)
tensor1 = fluid.dygraph.to_variable(self.tensor1) tensor1 = fluid.dygraph.to_variable(self.tensor1)
tensor2 = fluid.dygraph.to_variable(self.tensor2) tensor2 = fluid.dygraph.to_variable(self.tensor2)
out = paddle.addcmul(input, tensor1, tensor2, value) out = paddle.tensor.math.addcmul(input, tensor1, tensor2, value)
return out.numpy() return out.numpy()
def numpy(self, value=1.0): def numpy(self, value=1.0):
...@@ -85,7 +85,7 @@ class TestAddcmul(unittest.TestCase): ...@@ -85,7 +85,7 @@ class TestAddcmul(unittest.TestCase):
tensor1 = fluid.data(name='t1', shape=data_shape, dtype='float32') tensor1 = fluid.data(name='t1', shape=data_shape, dtype='float32')
tensor2 = fluid.data(name='t2', shape=data_shape, dtype='float32') tensor2 = fluid.data(name='t2', shape=data_shape, dtype='float32')
out = paddle.addcmul(input, tensor1, tensor2) out = paddle.tensor.math.addcmul(input, tensor1, tensor2)
self.assertEqual(out.shape, input.shape) self.assertEqual(out.shape, input.shape)
def test_addcmul_with_broadcast0(self): def test_addcmul_with_broadcast0(self):
...@@ -95,7 +95,7 @@ class TestAddcmul(unittest.TestCase): ...@@ -95,7 +95,7 @@ class TestAddcmul(unittest.TestCase):
tensor1 = fluid.data(name='t1', shape=[3, 100], dtype='float32') tensor1 = fluid.data(name='t1', shape=[3, 100], dtype='float32')
tensor2 = fluid.data(name='t2', shape=[100], dtype='float32') tensor2 = fluid.data(name='t2', shape=[100], dtype='float32')
out = paddle.addcmul(input, tensor1, tensor2) out = paddle.tensor.math.addcmul(input, tensor1, tensor2)
self.assertEqual(out.shape, input.shape) self.assertEqual(out.shape, input.shape)
def test_addcmul_with_broadcast1(self): def test_addcmul_with_broadcast1(self):
...@@ -105,7 +105,7 @@ class TestAddcmul(unittest.TestCase): ...@@ -105,7 +105,7 @@ class TestAddcmul(unittest.TestCase):
tensor1 = fluid.data(name='t1', shape=[100], dtype='float32') tensor1 = fluid.data(name='t1', shape=[100], dtype='float32')
tensor2 = fluid.data(name='t2', shape=[4, 100], dtype='float32') tensor2 = fluid.data(name='t2', shape=[4, 100], dtype='float32')
out = paddle.addcmul(input, tensor1, tensor2) out = paddle.tensor.math.addcmul(input, tensor1, tensor2)
self.assertEqual(out.shape, input.shape) self.assertEqual(out.shape, input.shape)
def test_addcmul_with_broadcast2(self): def test_addcmul_with_broadcast2(self):
...@@ -115,7 +115,7 @@ class TestAddcmul(unittest.TestCase): ...@@ -115,7 +115,7 @@ class TestAddcmul(unittest.TestCase):
tensor1 = fluid.data(name='t1', shape=[100], dtype='float32') tensor1 = fluid.data(name='t1', shape=[100], dtype='float32')
tensor2 = fluid.data(name='t2', shape=[100], dtype='float32') tensor2 = fluid.data(name='t2', shape=[100], dtype='float32')
out = paddle.addcmul(input, tensor1, tensor2) out = paddle.tensor.math.addcmul(input, tensor1, tensor2)
self.assertEqual(out.shape, input.shape) self.assertEqual(out.shape, input.shape)
...@@ -129,7 +129,7 @@ class InvalidInputTest(unittest.TestCase): ...@@ -129,7 +129,7 @@ class InvalidInputTest(unittest.TestCase):
name='tensor1', shape=[20, 20], dtype='float32') name='tensor1', shape=[20, 20], dtype='float32')
tensor2 = fluid.data( tensor2 = fluid.data(
name='tensor2', shape=[20, 20], dtype='float32') name='tensor2', shape=[20, 20], dtype='float32')
out = paddle.addcmul(input, tensor1, tensor2) out = paddle.tensor.math.addcmul(input, tensor1, tensor2)
self.assertRaises(TypeError, test_invalid_input) self.assertRaises(TypeError, test_invalid_input)
...@@ -141,7 +141,7 @@ class InvalidInputTest(unittest.TestCase): ...@@ -141,7 +141,7 @@ class InvalidInputTest(unittest.TestCase):
tensor1 = [20, 20] tensor1 = [20, 20]
tensor2 = fluid.data( tensor2 = fluid.data(
name='tensor2', shape=[20, 20], dtype='float32') name='tensor2', shape=[20, 20], dtype='float32')
out = paddle.addcmul(input, tensor1, tensor2) out = paddle.tensor.math.addcmul(input, tensor1, tensor2)
self.assertRaises(TypeError, test_invalid_tensor1) self.assertRaises(TypeError, test_invalid_tensor1)
...@@ -153,7 +153,7 @@ class InvalidInputTest(unittest.TestCase): ...@@ -153,7 +153,7 @@ class InvalidInputTest(unittest.TestCase):
tensor1 = fluid.data( tensor1 = fluid.data(
name='tensor1', shape=[20, 20], dtype='float32') name='tensor1', shape=[20, 20], dtype='float32')
tensor2 = [20, 20] tensor2 = [20, 20]
out = paddle.addcmul(input, tensor1, tensor2) out = paddle.tensor.math.addcmul(input, tensor1, tensor2)
self.assertRaises(TypeError, test_invalid_tensor2) self.assertRaises(TypeError, test_invalid_tensor2)
...@@ -166,7 +166,7 @@ class InvalidInputTest(unittest.TestCase): ...@@ -166,7 +166,7 @@ class InvalidInputTest(unittest.TestCase):
name='tensor1', shape=[20, 20], dtype='float32') name='tensor1', shape=[20, 20], dtype='float32')
tensor2 = fluid.data( tensor2 = fluid.data(
name='tensor2', shape=[20, 20], dtype='float32') name='tensor2', shape=[20, 20], dtype='float32')
out = paddle.addcmul(input, tensor1, tensor2, value=1) out = paddle.tensor.math.addcmul(input, tensor1, tensor2, value=1)
self.assertRaises(TypeError, test_invalid_value_int) self.assertRaises(TypeError, test_invalid_value_int)
...@@ -178,7 +178,7 @@ class InvalidInputTest(unittest.TestCase): ...@@ -178,7 +178,7 @@ class InvalidInputTest(unittest.TestCase):
name='tensor1', shape=[20, 20], dtype='int32') name='tensor1', shape=[20, 20], dtype='int32')
tensor2 = fluid.data( tensor2 = fluid.data(
name='tensor2', shape=[20, 20], dtype='int32') name='tensor2', shape=[20, 20], dtype='int32')
out = paddle.addcmul(input, tensor1, tensor2, value=1.0) out = paddle.tensor.math.addcmul(input, tensor1, tensor2, value=1.0)
self.assertRaises(TypeError, test_invalid_value_float) self.assertRaises(TypeError, test_invalid_value_float)
......
...@@ -95,8 +95,8 @@ class TestAllcloseError(unittest.TestCase): ...@@ -95,8 +95,8 @@ class TestAllcloseError(unittest.TestCase):
def test_x_dtype(): def test_x_dtype():
with paddle.static.program_guard(paddle.static.Program(), with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()): paddle.static.Program()):
x = paddle.data(name='x', shape=[10, 10], dtype='float16') x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float16')
y = paddle.data(name='y', shape=[10, 10], dtype='float64') y = paddle.fluid.data(name='y', shape=[10, 10], dtype='float64')
result = paddle.allclose(x, y) result = paddle.allclose(x, y)
self.assertRaises(TypeError, test_x_dtype) self.assertRaises(TypeError, test_x_dtype)
...@@ -104,15 +104,15 @@ class TestAllcloseError(unittest.TestCase): ...@@ -104,15 +104,15 @@ class TestAllcloseError(unittest.TestCase):
def test_y_dtype(): def test_y_dtype():
with paddle.static.program_guard(paddle.static.Program(), with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()): paddle.static.Program()):
x = paddle.data(name='x', shape=[10, 10], dtype='float64') x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64')
y = paddle.data(name='y', shape=[10, 10], dtype='int32') y = paddle.fluid.data(name='y', shape=[10, 10], dtype='int32')
result = paddle.allclose(x, y) result = paddle.allclose(x, y)
self.assertRaises(TypeError, test_y_dtype) self.assertRaises(TypeError, test_y_dtype)
def test_attr(self): def test_attr(self):
x = paddle.data(name='x', shape=[10, 10], dtype='float64') x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64')
y = paddle.data(name='y', shape=[10, 10], dtype='float64') y = paddle.fluid.data(name='y', shape=[10, 10], dtype='float64')
def test_rtol(): def test_rtol():
result = paddle.allclose(x, y, rtol=True) result = paddle.allclose(x, y, rtol=True)
......
...@@ -27,10 +27,10 @@ def test_static_layer(place, ...@@ -27,10 +27,10 @@ def test_static_layer(place,
prog = paddle.static.Program() prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
with paddle.static.program_guard(prog, startup_prog): with paddle.static.program_guard(prog, startup_prog):
input = paddle.data(name='input', shape=input_np.shape, dtype='float64') input = paddle.fluid.data(name='input', shape=input_np.shape, dtype='float64')
label = paddle.data(name='label', shape=label_np.shape, dtype='float64') label = paddle.fluid.data(name='label', shape=label_np.shape, dtype='float64')
if weight_np is not None: if weight_np is not None:
weight = paddle.data( weight = paddle.fluid.data(
name='weight', shape=weight_np.shape, dtype='float64') name='weight', shape=weight_np.shape, dtype='float64')
bce_loss = paddle.nn.loss.BCELoss( bce_loss = paddle.nn.loss.BCELoss(
weight=weight, reduction=reduction) weight=weight, reduction=reduction)
...@@ -58,10 +58,10 @@ def test_static_functional(place, ...@@ -58,10 +58,10 @@ def test_static_functional(place,
prog = paddle.static.Program() prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
with paddle.static.program_guard(prog, startup_prog): with paddle.static.program_guard(prog, startup_prog):
input = paddle.data(name='input', shape=input_np.shape, dtype='float64') input = paddle.fluid.data(name='input', shape=input_np.shape, dtype='float64')
label = paddle.data(name='label', shape=label_np.shape, dtype='float64') label = paddle.fluid.data(name='label', shape=label_np.shape, dtype='float64')
if weight_np is not None: if weight_np is not None:
weight = paddle.data( weight = paddle.fluid.data(
name='weight', shape=weight_np.shape, dtype='float64') name='weight', shape=weight_np.shape, dtype='float64')
res = paddle.nn.functional.binary_cross_entropy( res = paddle.nn.functional.binary_cross_entropy(
input, label, weight=weight, reduction=reduction) input, label, weight=weight, reduction=reduction)
......
...@@ -48,18 +48,18 @@ def test_static(place, ...@@ -48,18 +48,18 @@ def test_static(place,
prog = paddle.static.Program() prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
with paddle.static.program_guard(prog, startup_prog): with paddle.static.program_guard(prog, startup_prog):
logit = paddle.data(name='logit', shape=logit_np.shape, dtype='float64') logit = paddle.fluid.data(name='logit', shape=logit_np.shape, dtype='float64')
label = paddle.data(name='label', shape=label_np.shape, dtype='float64') label = paddle.fluid.data(name='label', shape=label_np.shape, dtype='float64')
feed_dict = {"logit": logit_np, "label": label_np} feed_dict = {"logit": logit_np, "label": label_np}
pos_weight = None pos_weight = None
weight = None weight = None
if pos_weight_np is not None: if pos_weight_np is not None:
pos_weight = paddle.data( pos_weight = paddle.fluid.data(
name='pos_weight', shape=pos_weight_np.shape, dtype='float64') name='pos_weight', shape=pos_weight_np.shape, dtype='float64')
feed_dict["pos_weight"] = pos_weight_np feed_dict["pos_weight"] = pos_weight_np
if weight_np is not None: if weight_np is not None:
weight = paddle.data( weight = paddle.fluid.data(
name='weight', shape=weight_np.shape, dtype='float64') name='weight', shape=weight_np.shape, dtype='float64')
feed_dict["weight"] = weight_np feed_dict["weight"] = weight_np
if functional: if functional:
......
...@@ -27,28 +27,28 @@ class TestChunkOpError(unittest.TestCase): ...@@ -27,28 +27,28 @@ class TestChunkOpError(unittest.TestCase):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
# The type of axis in chunk_op should be int or Variable. # The type of axis in chunk_op should be int or Variable.
def test_axis_type(): def test_axis_type():
x1 = paddle.data(shape=[4], dtype='float16', name='x3') x1 = paddle.fluid.data(shape=[4], dtype='float16', name='x3')
paddle.chunk(x=x1, chunks=2, axis=3.2) paddle.chunk(x=x1, chunks=2, axis=3.2)
self.assertRaises(TypeError, test_axis_type) self.assertRaises(TypeError, test_axis_type)
# The type of axis in chunk op should be int or Variable. # The type of axis in chunk op should be int or Variable.
def test_axis_variable_type(): def test_axis_variable_type():
x2 = paddle.data(shape=[4], dtype='float16', name='x9') x2 = paddle.fluid.data(shape=[4], dtype='float16', name='x9')
x3 = paddle.data(shape=[1], dtype='float16', name='x10') x3 = paddle.fluid.data(shape=[1], dtype='float16', name='x10')
paddle.chunk(input=x2, chunks=2, axis=x3) paddle.chunk(input=x2, chunks=2, axis=x3)
self.assertRaises(TypeError, test_axis_variable_type) self.assertRaises(TypeError, test_axis_variable_type)
# The type of num_or_sections in chunk_op should be int, tuple or list. # The type of num_or_sections in chunk_op should be int, tuple or list.
def test_chunks_type(): def test_chunks_type():
x4 = paddle.data(shape=[4], dtype='float16', name='x4') x4 = paddle.fluid.data(shape=[4], dtype='float16', name='x4')
paddle.chunk(input=x4, chunks=2.1, axis=3) paddle.chunk(input=x4, chunks=2.1, axis=3)
self.assertRaises(TypeError, test_chunks_type) self.assertRaises(TypeError, test_chunks_type)
def test_axis_type_tensor(): def test_axis_type_tensor():
x5 = paddle.data(shape=[4], dtype='float16', name='x6') x5 = paddle.fluid.data(shape=[4], dtype='float16', name='x6')
paddle.chunk(input=x5, chunks=2, axis=3.2) paddle.chunk(input=x5, chunks=2, axis=3.2)
self.assertRaises(TypeError, test_axis_type_tensor) self.assertRaises(TypeError, test_axis_type_tensor)
...@@ -57,8 +57,8 @@ class TestChunkOpError(unittest.TestCase): ...@@ -57,8 +57,8 @@ class TestChunkOpError(unittest.TestCase):
class API_TestChunk(unittest.TestCase): class API_TestChunk(unittest.TestCase):
def test_out(self): def test_out(self):
with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = paddle.data('data1', shape=[4, 6, 6], dtype='float64') data1 = paddle.fluid.data('data1', shape=[4, 6, 6], dtype='float64')
data2 = paddle.data('data2', shape=[1], dtype='int32') data2 = paddle.fluid.data('data2', shape=[1], dtype='int32')
x0, x1, x2 = paddle.chunk(data1, chunks=3, axis=data2) x0, x1, x2 = paddle.chunk(data1, chunks=3, axis=data2)
place = paddle.CPUPlace() place = paddle.CPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
...@@ -76,7 +76,7 @@ class API_TestChunk(unittest.TestCase): ...@@ -76,7 +76,7 @@ class API_TestChunk(unittest.TestCase):
class API_TestChunk1(unittest.TestCase): class API_TestChunk1(unittest.TestCase):
def test_out(self): def test_out(self):
with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = paddle.data('data1', shape=[4, 6, 6], dtype='float64') data1 = paddle.fluid.data('data1', shape=[4, 6, 6], dtype='float64')
x0, x1, x2 = paddle.chunk(data1, chunks=3, axis=2) x0, x1, x2 = paddle.chunk(data1, chunks=3, axis=2)
place = paddle.CPUPlace() place = paddle.CPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
......
...@@ -253,16 +253,16 @@ class TestConcatAPI(unittest.TestCase): ...@@ -253,16 +253,16 @@ class TestConcatAPI(unittest.TestCase):
assert np.array_equal(res_3, np.concatenate((input_2, input_3), axis=1)) assert np.array_equal(res_3, np.concatenate((input_2, input_3), axis=1))
def test_api(self): def test_api(self):
x_1 = paddle.data(shape=[None, 1, 4, 5], dtype='int32', name='x_1') x_1 = paddle.fluid.data(shape=[None, 1, 4, 5], dtype='int32', name='x_1')
paddle.concat([x_1, x_1], 0) paddle.concat([x_1, x_1], 0)
input_2 = np.random.random([2, 1, 4, 5]).astype("int32") input_2 = np.random.random([2, 1, 4, 5]).astype("int32")
input_3 = np.random.random([2, 2, 4, 5]).astype("int32") input_3 = np.random.random([2, 2, 4, 5]).astype("int32")
x_2 = fluid.data(shape=[2, 1, 4, 5], dtype='int32', name='x_2') x_2 = fluid.data(shape=[2, 1, 4, 5], dtype='int32', name='x_2')
x_3 = fluid.data(shape=[2, 2, 4, 5], dtype='int32', name='x_3') x_3 = fluid.data(shape=[2, 2, 4, 5], dtype='int32', name='x_3')
positive_1_int32 = paddle.fill_constant([1], "int32", 1) positive_1_int32 = paddle.fluid.layers.fill_constant([1], "int32", 1)
positive_1_int64 = paddle.fill_constant([1], "int64", 1) positive_1_int64 = paddle.fluid.layers.fill_constant([1], "int64", 1)
negative_int64 = paddle.fill_constant([1], "int64", -3) negative_int64 = paddle.fluid.layers.fill_constant([1], "int64", -3)
out_1 = paddle.concat(x=[x_2, x_3], axis=1) out_1 = paddle.concat(x=[x_2, x_3], axis=1)
out_2 = paddle.concat(x=[x_2, x_3], axis=positive_1_int32) out_2 = paddle.concat(x=[x_2, x_3], axis=positive_1_int32)
out_3 = paddle.concat(x=[x_2, x_3], axis=positive_1_int64) out_3 = paddle.concat(x=[x_2, x_3], axis=positive_1_int64)
...@@ -305,8 +305,8 @@ class TestConcatAPI(unittest.TestCase): ...@@ -305,8 +305,8 @@ class TestConcatAPI(unittest.TestCase):
np.array([[-1]]), [[1]], fluid.CPUPlace()) np.array([[-1]]), [[1]], fluid.CPUPlace())
self.assertRaises(TypeError, paddle.concat, [x2]) self.assertRaises(TypeError, paddle.concat, [x2])
# The input dtype of concat_op must be float16, float32, float64, int32, int64. # The input dtype of concat_op must be float16, float32, float64, int32, int64.
x4 = paddle.data(shape=[4], dtype='uint8', name='x4') x4 = paddle.fluid.data(shape=[4], dtype='uint8', name='x4')
x5 = paddle.data(shape=[4], dtype='uint8', name='x5') x5 = paddle.fluid.data(shape=[4], dtype='uint8', name='x5')
self.assertRaises(TypeError, fluid.layers.concat, [x4, x5]) self.assertRaises(TypeError, fluid.layers.concat, [x4, x5])
# The type of axis in concat_op should be int or Variable. # The type of axis in concat_op should be int or Variable.
......
...@@ -48,8 +48,8 @@ class TestCosineSimilarityAPI(unittest.TestCase): ...@@ -48,8 +48,8 @@ class TestCosineSimilarityAPI(unittest.TestCase):
np_x1 = np.random.rand(*shape).astype(np.float32) np_x1 = np.random.rand(*shape).astype(np.float32)
np_x2 = np.random.rand(*shape).astype(np.float32) np_x2 = np.random.rand(*shape).astype(np.float32)
x1 = paddle.data(name="x1", shape=shape) x1 = paddle.fluid.data(name="x1", shape=shape)
x2 = paddle.data(name="x2", shape=shape) x2 = paddle.fluid.data(name="x2", shape=shape)
result = F.cosine_similarity(x1, x2, axis=axis, eps=eps) result = F.cosine_similarity(x1, x2, axis=axis, eps=eps)
exe = Executor(place) exe = Executor(place)
fetches = exe.run(default_main_program(), fetches = exe.run(default_main_program(),
......
...@@ -172,11 +172,11 @@ class TestDiagV2API(unittest.TestCase): ...@@ -172,11 +172,11 @@ class TestDiagV2API(unittest.TestCase):
self.assertTrue(np.allclose(y.numpy(), self.expected11)) self.assertTrue(np.allclose(y.numpy(), self.expected11))
def run_static(self, use_gpu=False): def run_static(self, use_gpu=False):
x = paddle.data(name='input', shape=[10, 10], dtype='float32') x = paddle.fluid.data(name='input', shape=[10, 10], dtype='float32')
x2 = paddle.data(name='input2', shape=[100], dtype='float64') x2 = paddle.fluid.data(name='input2', shape=[100], dtype='float64')
x3 = paddle.data(name='input3', shape=[100], dtype='int64') x3 = paddle.fluid.data(name='input3', shape=[100], dtype='int64')
x4 = paddle.data(name='input4', shape=[2000, 2000], dtype='float32') x4 = paddle.fluid.data(name='input4', shape=[2000, 2000], dtype='float32')
x5 = paddle.data(name='input5', shape=[2000], dtype='float32') x5 = paddle.fluid.data(name='input5', shape=[2000], dtype='float32')
result0 = paddle.diag(x) result0 = paddle.diag(x)
result1 = paddle.diag(x, offset=1) result1 = paddle.diag(x, offset=1)
result2 = paddle.diag(x, offset=-1) result2 = paddle.diag(x, offset=-1)
......
...@@ -37,8 +37,7 @@ class TestDirectory(unittest.TestCase): ...@@ -37,8 +37,7 @@ class TestDirectory(unittest.TestCase):
new_directory = [ new_directory = [
'paddle.enable_static', 'paddle.disable_static', 'paddle.enable_static', 'paddle.disable_static',
'paddle.in_dynamic_mode', 'paddle.to_tensor', 'paddle.grad', 'paddle.in_dynamic_mode', 'paddle.to_tensor', 'paddle.grad',
'paddle.no_grad', 'paddle.save', 'paddle.load', 'paddle.no_grad', 'paddle.static.save', 'paddle.static.load',
'paddle.static.save', 'paddle.static.load',
'paddle.distributed.ParallelEnv', 'paddle.distributed.ParallelEnv',
'paddle.distributed.prepare_context', 'paddle.DataParallel', 'paddle.distributed.prepare_context', 'paddle.DataParallel',
'paddle.jit', 'paddle.jit.TracedLayer', 'paddle.jit.to_static', 'paddle.jit', 'paddle.jit.TracedLayer', 'paddle.jit.to_static',
......
...@@ -170,7 +170,7 @@ class TestFlatten2OpError(unittest.TestCase): ...@@ -170,7 +170,7 @@ class TestFlatten2OpError(unittest.TestCase):
x2 = np.arange(image_shape[0] * image_shape[1] * image_shape[2] * x2 = np.arange(image_shape[0] * image_shape[1] * image_shape[2] *
image_shape[3]).reshape(image_shape) / 100. image_shape[3]).reshape(image_shape) / 100.
x2 = x2.astype('float16') x2 = x2.astype('float16')
x2_var = paddle.data(name='x2', shape=[3, 2, 4, 5], dtype='float16') x2_var = paddle.fluid.data(name='x2', shape=[3, 2, 4, 5], dtype='float16')
paddle.flatten(x2_var) paddle.flatten(x2_var)
self.assertRaises(TypeError, test_type) self.assertRaises(TypeError, test_type)
......
...@@ -31,7 +31,7 @@ class TestFullOp(unittest.TestCase): ...@@ -31,7 +31,7 @@ class TestFullOp(unittest.TestCase):
train_program = Program() train_program = Program()
with program_guard(train_program, startup_program): with program_guard(train_program, startup_program):
fill_value = 2.0 fill_value = 2.0
input = paddle.data(name='input', dtype='float32', shape=[2, 3]) input = paddle.fluid.data(name='input', dtype='float32', shape=[2, 3])
output = paddle.full_like(input, fill_value) output = paddle.full_like(input, fill_value)
output_dtype = paddle.full_like(input, fill_value, dtype='float32') output_dtype = paddle.full_like(input, fill_value, dtype='float32')
...@@ -67,7 +67,7 @@ class TestFullOpError(unittest.TestCase): ...@@ -67,7 +67,7 @@ class TestFullOpError(unittest.TestCase):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
#for ci coverage #for ci coverage
input_data = paddle.data( input_data = paddle.fluid.data(
name='input', dtype='float32', shape=[2, 3]) name='input', dtype='float32', shape=[2, 3])
output = paddle.full_like(input_data, 2.0) output = paddle.full_like(input_data, 2.0)
......
...@@ -192,9 +192,9 @@ class TestGatherNdError(unittest.TestCase): ...@@ -192,9 +192,9 @@ class TestGatherNdError(unittest.TestCase):
paddle.static.Program()): paddle.static.Program()):
shape = [8, 9, 6] shape = [8, 9, 6]
x = paddle.data(shape=shape, dtype='float32', name='x') x = paddle.fluid.data(shape=shape, dtype='float32', name='x')
index = paddle.data(shape=shape, dtype='bool', name='index') index = paddle.fluid.data(shape=shape, dtype='bool', name='index')
index_float = paddle.data( index_float = paddle.fluid.data(
shape=shape, dtype='float32', name='index_float') shape=shape, dtype='float32', name='index_float')
np_x = np.random.random(shape).astype('float32') np_x = np.random.random(shape).astype('float32')
np_index = np.array(np.random.randint(2, size=shape, dtype=bool)) np_index = np.array(np.random.randint(2, size=shape, dtype=bool))
......
...@@ -202,9 +202,9 @@ class API_TestGather(unittest.TestCase): ...@@ -202,9 +202,9 @@ class API_TestGather(unittest.TestCase):
def test_out2(self): def test_out2(self):
with paddle.static.program_guard(paddle.static.Program(), with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()): paddle.static.Program()):
x = paddle.data('x', shape=[-1, 2], dtype='float64') x = paddle.fluid.data('x', shape=[-1, 2], dtype='float64')
index = paddle.data('index', shape=[-1, 1], dtype='int32') index = paddle.fluid.data('index', shape=[-1, 1], dtype='int32')
axis = paddle.data('axis', shape=[1], dtype='int32') axis = paddle.fluid.data('axis', shape=[1], dtype='int32')
out = paddle.gather(x, index, axis) out = paddle.gather(x, index, axis)
place = paddle.CPUPlace() place = paddle.CPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
...@@ -252,10 +252,10 @@ class TestGathertError(unittest.TestCase): ...@@ -252,10 +252,10 @@ class TestGathertError(unittest.TestCase):
paddle.static.Program()): paddle.static.Program()):
shape = [8, 9, 6] shape = [8, 9, 6]
x = paddle.data(shape=shape, dtype='int8', name='x') x = paddle.fluid.data(shape=shape, dtype='int8', name='x')
axis = paddle.data(shape=[1], dtype='float32', name='axis') axis = paddle.fluid.data(shape=[1], dtype='float32', name='axis')
index = paddle.data(shape=shape, dtype='int32', name='index') index = paddle.fluid.data(shape=shape, dtype='int32', name='index')
index_float = paddle.data( index_float = paddle.fluid.data(
shape=shape, dtype='float32', name='index_float') shape=shape, dtype='float32', name='index_float')
def test_x_type(): def test_x_type():
......
...@@ -73,7 +73,7 @@ class TestHistogramOpError(unittest.TestCase): ...@@ -73,7 +73,7 @@ class TestHistogramOpError(unittest.TestCase):
"""Test bins should be greater than or equal to 1.""" """Test bins should be greater than or equal to 1."""
def net_func(): def net_func():
input_value = paddle.fill_constant( input_value = paddle.fluid.layers.fill_constant(
shape=[3, 4], dtype='float32', value=3.0) shape=[3, 4], dtype='float32', value=3.0)
paddle.histogram(input=input_value, bins=-1, min=1, max=5) paddle.histogram(input=input_value, bins=-1, min=1, max=5)
...@@ -84,7 +84,7 @@ class TestHistogramOpError(unittest.TestCase): ...@@ -84,7 +84,7 @@ class TestHistogramOpError(unittest.TestCase):
"""Test max must be larger or equal to min.""" """Test max must be larger or equal to min."""
def net_func(): def net_func():
input_value = paddle.fill_constant( input_value = paddle.fluid.layers.fill_constant(
shape=[3, 4], dtype='float32', value=3.0) shape=[3, 4], dtype='float32', value=3.0)
paddle.histogram(input=input_value, bins=1, min=5, max=1) paddle.histogram(input=input_value, bins=1, min=5, max=1)
...@@ -95,7 +95,7 @@ class TestHistogramOpError(unittest.TestCase): ...@@ -95,7 +95,7 @@ class TestHistogramOpError(unittest.TestCase):
"""Test range of min, max is not finite""" """Test range of min, max is not finite"""
def net_func(): def net_func():
input_value = paddle.fill_constant( input_value = paddle.fluid.layers.fill_constant(
shape=[3, 4], dtype='float32', value=3.0) shape=[3, 4], dtype='float32', value=3.0)
paddle.histogram(input=input_value, bins=1, min=-np.inf, max=5) paddle.histogram(input=input_value, bins=1, min=-np.inf, max=5)
......
...@@ -31,11 +31,11 @@ class LeNetDygraph(fluid.dygraph.Layer): ...@@ -31,11 +31,11 @@ class LeNetDygraph(fluid.dygraph.Layer):
nn.Conv2d( nn.Conv2d(
1, 6, 3, stride=1, padding=1), 1, 6, 3, stride=1, padding=1),
nn.ReLU(), nn.ReLU(),
nn.Pool2D(2, 'max', 2), paddle.fluid.dygraph.Pool2D(2, 'max', 2),
nn.Conv2d( nn.Conv2d(
6, 16, 5, stride=1, padding=0), 6, 16, 5, stride=1, padding=0),
nn.ReLU(), nn.ReLU(),
nn.Pool2D(2, 'max', 2)) paddle.fluid.dygraph.Pool2D(2, 'max', 2))
if num_classes > 0: if num_classes > 0:
self.fc = nn.Sequential( self.fc = nn.Sequential(
...@@ -54,17 +54,17 @@ class LeNetDygraph(fluid.dygraph.Layer): ...@@ -54,17 +54,17 @@ class LeNetDygraph(fluid.dygraph.Layer):
def init_weights(layer): def init_weights(layer):
if type(layer) == nn.Linear: if type(layer) == nn.Linear:
new_weight = paddle.fill_constant( new_weight = paddle.fluid.layers.fill_constant(
layer.weight.shape, layer.weight.dtype, value=0.9) layer.weight.shape, layer.weight.dtype, value=0.9)
layer.weight.set_value(new_weight) layer.weight.set_value(new_weight)
new_bias = paddle.fill_constant( new_bias = paddle.fluid.layers.fill_constant(
layer.bias.shape, layer.bias.dtype, value=-0.1) layer.bias.shape, layer.bias.dtype, value=-0.1)
layer.bias.set_value(new_bias) layer.bias.set_value(new_bias)
elif type(layer) == nn.Conv2d: elif type(layer) == nn.Conv2d:
new_weight = paddle.fill_constant( new_weight = paddle.fluid.layers.fill_constant(
layer.weight.shape, layer.weight.dtype, value=0.7) layer.weight.shape, layer.weight.dtype, value=0.7)
layer.weight.set_value(new_weight) layer.weight.set_value(new_weight)
new_bias = paddle.fill_constant( new_bias = paddle.fluid.layers.fill_constant(
layer.bias.shape, layer.bias.dtype, value=-0.2) layer.bias.shape, layer.bias.dtype, value=-0.2)
layer.bias.set_value(new_bias) layer.bias.set_value(new_bias)
......
...@@ -30,11 +30,11 @@ class LeNetDygraph(fluid.dygraph.Layer): ...@@ -30,11 +30,11 @@ class LeNetDygraph(fluid.dygraph.Layer):
nn.Conv2d( nn.Conv2d(
1, 6, 3, stride=1, padding=1), 1, 6, 3, stride=1, padding=1),
nn.ReLU(), nn.ReLU(),
nn.Pool2D(2, 'max', 2), paddle.fluid.dygraph.Pool2D(2, 'max', 2),
nn.Conv2d( nn.Conv2d(
6, 16, 5, stride=1, padding=0), 6, 16, 5, stride=1, padding=0),
nn.ReLU(), nn.ReLU(),
nn.Pool2D(2, 'max', 2)) paddle.fluid.dygraph.Pool2D(2, 'max', 2))
def forward(self, inputs): def forward(self, inputs):
x = self.features(inputs) x = self.features(inputs)
......
...@@ -135,10 +135,10 @@ class BadInputTest(unittest.TestCase): ...@@ -135,10 +135,10 @@ class BadInputTest(unittest.TestCase):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
data = paddle.zeros([2, 3]) data = paddle.zeros([2, 3])
result = paddle.has_inf(data) result = paddle.fluid.layers.has_inf(data)
expect_value = np.array([False]) expect_value = np.array([False])
self.assertEqual((result.numpy() == expect_value).all(), True) self.assertEqual((result.numpy() == expect_value).all(), True)
result = paddle.has_nan(data) result = paddle.fluid.layers.has_nan(data)
self.assertEqual((result.numpy() == expect_value).all(), True) self.assertEqual((result.numpy() == expect_value).all(), True)
......
...@@ -27,7 +27,7 @@ def run_static(x_np, dtype, op_str, use_gpu=False): ...@@ -27,7 +27,7 @@ def run_static(x_np, dtype, op_str, use_gpu=False):
place = paddle.CUDAPlace(0) place = paddle.CUDAPlace(0)
exe = fluid.Executor(place) exe = fluid.Executor(place)
with fluid.program_guard(main_program, startup_program): with fluid.program_guard(main_program, startup_program):
x = paddle.data(name='x', shape=x_np.shape, dtype=dtype) x = paddle.fluid.data(name='x', shape=x_np.shape, dtype=dtype)
res = getattr(paddle.tensor, op_str)(x) res = getattr(paddle.tensor, op_str)(x)
exe.run(startup_program) exe.run(startup_program)
static_result = exe.run(main_program, static_result = exe.run(main_program,
......
...@@ -44,8 +44,8 @@ class TestFunctionalL1Loss(unittest.TestCase): ...@@ -44,8 +44,8 @@ class TestFunctionalL1Loss(unittest.TestCase):
self.assertTrue(dy_result.shape, [10, 10, 5]) self.assertTrue(dy_result.shape, [10, 10, 5])
def run_static(self, use_gpu=False): def run_static(self, use_gpu=False):
input = paddle.data(name='input', shape=[10, 10, 5], dtype='float32') input = paddle.fluid.data(name='input', shape=[10, 10, 5], dtype='float32')
label = paddle.data(name='label', shape=[10, 10, 5], dtype='float32') label = paddle.fluid.data(name='label', shape=[10, 10, 5], dtype='float32')
result0 = paddle.nn.functional.l1_loss(input, label) result0 = paddle.nn.functional.l1_loss(input, label)
result1 = paddle.nn.functional.l1_loss(input, label, reduction='sum') result1 = paddle.nn.functional.l1_loss(input, label, reduction='sum')
result2 = paddle.nn.functional.l1_loss(input, label, reduction='none') result2 = paddle.nn.functional.l1_loss(input, label, reduction='none')
...@@ -90,9 +90,9 @@ class TestFunctionalL1Loss(unittest.TestCase): ...@@ -90,9 +90,9 @@ class TestFunctionalL1Loss(unittest.TestCase):
# test case the raise message # test case the raise message
def test_errors(self): def test_errors(self):
def test_value_error(): def test_value_error():
input = paddle.data( input = paddle.fluid.data(
name='input', shape=[10, 10, 5], dtype='float32') name='input', shape=[10, 10, 5], dtype='float32')
label = paddle.data( label = paddle.fluid.data(
name='label', shape=[10, 10, 5], dtype='float32') name='label', shape=[10, 10, 5], dtype='float32')
loss = paddle.nn.functional.l1_loss( loss = paddle.nn.functional.l1_loss(
input, label, reduction='reduce_mean') input, label, reduction='reduce_mean')
...@@ -127,8 +127,8 @@ class TestClassL1Loss(unittest.TestCase): ...@@ -127,8 +127,8 @@ class TestClassL1Loss(unittest.TestCase):
self.assertTrue(dy_result.shape, [10, 10, 5]) self.assertTrue(dy_result.shape, [10, 10, 5])
def run_static(self, use_gpu=False): def run_static(self, use_gpu=False):
input = paddle.data(name='input', shape=[10, 10, 5], dtype='float32') input = paddle.fluid.data(name='input', shape=[10, 10, 5], dtype='float32')
label = paddle.data(name='label', shape=[10, 10, 5], dtype='float32') label = paddle.fluid.data(name='label', shape=[10, 10, 5], dtype='float32')
l1_loss = paddle.nn.loss.L1Loss() l1_loss = paddle.nn.loss.L1Loss()
result0 = l1_loss(input, label) result0 = l1_loss(input, label)
l1_loss = paddle.nn.loss.L1Loss(reduction='sum') l1_loss = paddle.nn.loss.L1Loss(reduction='sum')
......
...@@ -327,7 +327,7 @@ class TestLayer(LayerTest): ...@@ -327,7 +327,7 @@ class TestLayer(LayerTest):
with self.dynamic_graph(): with self.dynamic_graph():
t = np.ones([3, 3, 5, 5], dtype='float32') t = np.ones([3, 3, 5, 5], dtype='float32')
my_pad2d = paddle.nn.Pad2D(paddings=1) my_pad2d = paddle.nn.layer.Pad2D(paddings=1)
dy_ret = my_pad2d(base.to_variable(t)) dy_ret = my_pad2d(base.to_variable(t))
dy_ret_value = dy_ret.numpy() dy_ret_value = dy_ret.numpy()
......
...@@ -88,7 +88,7 @@ class TestNNLogSoftmaxAPI(unittest.TestCase): ...@@ -88,7 +88,7 @@ class TestNNLogSoftmaxAPI(unittest.TestCase):
logsoftmax = paddle.nn.LogSoftmax(axis) logsoftmax = paddle.nn.LogSoftmax(axis)
# test static api # test static api
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data(name='x', shape=self.x_shape) x = paddle.fluid.data(name='x', shape=self.x_shape)
y = logsoftmax(x) y = logsoftmax(x)
exe = paddle.static.Executor(self.place) exe = paddle.static.Executor(self.place)
out = exe.run(feed={'x': self.x}, fetch_list=[y]) out = exe.run(feed={'x': self.x}, fetch_list=[y])
...@@ -120,7 +120,7 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase): ...@@ -120,7 +120,7 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase):
x = x.astype(dtype) x = x.astype(dtype)
ref_out = np.apply_along_axis(ref_log_softmax, axis, x) ref_out = np.apply_along_axis(ref_log_softmax, axis, x)
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data(name='x', shape=self.x_shape) x = paddle.fluid.data(name='x', shape=self.x_shape)
y = F.log_softmax(x, axis, dtype) y = F.log_softmax(x, axis, dtype)
exe = paddle.static.Executor(self.place) exe = paddle.static.Executor(self.place)
out = exe.run(feed={'x': self.x}, fetch_list=[y]) out = exe.run(feed={'x': self.x}, fetch_list=[y])
...@@ -139,10 +139,10 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase): ...@@ -139,10 +139,10 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase):
def test_errors(self): def test_errors(self):
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data(name='X1', shape=[100], dtype='int32') x = paddle.fluid.data(name='X1', shape=[100], dtype='int32')
self.assertRaises(TypeError, F.log_softmax, x) self.assertRaises(TypeError, F.log_softmax, x)
x = paddle.data(name='X2', shape=[100], dtype='float32') x = paddle.fluid.data(name='X2', shape=[100], dtype='float32')
self.assertRaises(TypeError, F.log_softmax, x, dtype='int32') self.assertRaises(TypeError, F.log_softmax, x, dtype='int32')
......
...@@ -90,7 +90,7 @@ class TestLogsumexpError(unittest.TestCase): ...@@ -90,7 +90,7 @@ class TestLogsumexpError(unittest.TestCase):
def test_errors(self): def test_errors(self):
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
self.assertRaises(TypeError, paddle.logsumexp, 1) self.assertRaises(TypeError, paddle.logsumexp, 1)
x1 = paddle.data(name='x1', shape=[120], dtype="int32") x1 = paddle.fluid.data(name='x1', shape=[120], dtype="int32")
self.assertRaises(TypeError, paddle.logsumexp, x1) self.assertRaises(TypeError, paddle.logsumexp, x1)
...@@ -104,7 +104,7 @@ class TestLogsumexpAPI(unittest.TestCase): ...@@ -104,7 +104,7 @@ class TestLogsumexpAPI(unittest.TestCase):
def api_case(self, axis=None, keepdim=False): def api_case(self, axis=None, keepdim=False):
out_ref = ref_logsumexp(self.x, axis, keepdim) out_ref = ref_logsumexp(self.x, axis, keepdim)
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.shape) x = paddle.fluid.data('X', self.shape)
out = paddle.logsumexp(x, axis, keepdim) out = paddle.logsumexp(x, axis, keepdim)
exe = paddle.static.Executor(self.place) exe = paddle.static.Executor(self.place)
res = exe.run(feed={'X': self.x}, fetch_list=[out]) res = exe.run(feed={'X': self.x}, fetch_list=[out])
......
...@@ -414,7 +414,7 @@ class TestLRScheduler(unittest.TestCase): ...@@ -414,7 +414,7 @@ class TestLRScheduler(unittest.TestCase):
for batch_id in range(2): for batch_id in range(2):
x = paddle.to_tensor(x) x = paddle.to_tensor(x)
out = linear(x) out = linear(x)
loss = paddle.reduce_mean(out) loss = paddle.fluid.layers.reduce_mean(out)
loss.backward() loss.backward()
adam.step() adam.step()
adam.clear_grad() adam.clear_grad()
......
...@@ -74,8 +74,8 @@ class TestMaskedSelectAPI(unittest.TestCase): ...@@ -74,8 +74,8 @@ class TestMaskedSelectAPI(unittest.TestCase):
def test_static_mode(self): def test_static_mode(self):
shape = [8, 9, 6] shape = [8, 9, 6]
x = paddle.data(shape=shape, dtype='float32', name='x') x = paddle.fluid.data(shape=shape, dtype='float32', name='x')
mask = paddle.data(shape=shape, dtype='bool', name='mask') mask = paddle.fluid.data(shape=shape, dtype='bool', name='mask')
np_x = np.random.random(shape).astype('float32') np_x = np.random.random(shape).astype('float32')
np_mask = np.array(np.random.randint(2, size=shape, dtype=bool)) np_mask = np.array(np.random.randint(2, size=shape, dtype=bool))
...@@ -97,9 +97,9 @@ class TestMaskedSelectError(unittest.TestCase): ...@@ -97,9 +97,9 @@ class TestMaskedSelectError(unittest.TestCase):
paddle.static.Program()): paddle.static.Program()):
shape = [8, 9, 6] shape = [8, 9, 6]
x = paddle.data(shape=shape, dtype='float32', name='x') x = paddle.fluid.data(shape=shape, dtype='float32', name='x')
mask = paddle.data(shape=shape, dtype='bool', name='mask') mask = paddle.fluid.data(shape=shape, dtype='bool', name='mask')
mask_float = paddle.data( mask_float = paddle.fluid.data(
shape=shape, dtype='float32', name='mask_float') shape=shape, dtype='float32', name='mask_float')
np_x = np.random.random(shape).astype('float32') np_x = np.random.random(shape).astype('float32')
np_mask = np.array(np.random.randint(2, size=shape, dtype=bool)) np_mask = np.array(np.random.randint(2, size=shape, dtype=bool))
......
...@@ -473,12 +473,6 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -473,12 +473,6 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
# 3. Bool tensor operation # 3. Bool tensor operation
x = paddle.to_tensor([[True, False], [True, False]]) x = paddle.to_tensor([[True, False], [True, False]])
y = paddle.to_tensor([[False, False], [False, True]]) y = paddle.to_tensor([[False, False], [False, True]])
self.assertTrue(
np.array_equal(x.reduce_all().numpy(), paddle.reduce_all(x).numpy(
)))
self.assertTrue(
np.array_equal(x.reduce_any().numpy(), paddle.reduce_any(x).numpy(
)))
self.assertTrue( self.assertTrue(
np.array_equal( np.array_equal(
x.logical_and(y).numpy(), paddle.logical_and(x, y).numpy())) x.logical_and(y).numpy(), paddle.logical_and(x, y).numpy()))
...@@ -501,18 +495,9 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -501,18 +495,9 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
x.where(a, b).numpy(), paddle.where(x, a, b).numpy())) x.where(a, b).numpy(), paddle.where(x, a, b).numpy()))
self.assertTrue(inspect.ismethod(a.dot)) self.assertTrue(inspect.ismethod(a.dot))
self.assertTrue(inspect.ismethod(a.elementwise_add))
self.assertTrue(inspect.ismethod(a.elementwise_div))
self.assertTrue(inspect.ismethod(a.elementwise_floordiv))
self.assertTrue(inspect.ismethod(a.elementwise_mod))
self.assertTrue(inspect.ismethod(a.elementwise_sub))
self.assertTrue(inspect.ismethod(a.logsumexp)) self.assertTrue(inspect.ismethod(a.logsumexp))
self.assertTrue(inspect.ismethod(a.multiplex)) self.assertTrue(inspect.ismethod(a.multiplex))
self.assertTrue(inspect.ismethod(a.prod)) self.assertTrue(inspect.ismethod(a.prod))
self.assertTrue(inspect.ismethod(a.reduce_max))
self.assertTrue(inspect.ismethod(a.reduce_min))
self.assertTrue(inspect.ismethod(a.reduce_prod))
self.assertTrue(inspect.ismethod(a.reduce_sum))
self.assertTrue(inspect.ismethod(a.scale)) self.assertTrue(inspect.ismethod(a.scale))
self.assertTrue(inspect.ismethod(a.stanh)) self.assertTrue(inspect.ismethod(a.stanh))
self.assertTrue(inspect.ismethod(a.add_n)) self.assertTrue(inspect.ismethod(a.add_n))
...@@ -528,7 +513,6 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -528,7 +513,6 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
self.assertTrue(inspect.ismethod(a.inverse)) self.assertTrue(inspect.ismethod(a.inverse))
self.assertTrue(inspect.ismethod(a.log1p)) self.assertTrue(inspect.ismethod(a.log1p))
self.assertTrue(inspect.ismethod(a.erf)) self.assertTrue(inspect.ismethod(a.erf))
self.assertTrue(inspect.ismethod(a.addcmul))
self.assertTrue(inspect.ismethod(a.addmm)) self.assertTrue(inspect.ismethod(a.addmm))
self.assertTrue(inspect.ismethod(a.clip)) self.assertTrue(inspect.ismethod(a.clip))
self.assertTrue(inspect.ismethod(a.trace)) self.assertTrue(inspect.ismethod(a.trace))
...@@ -548,8 +532,6 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -548,8 +532,6 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
self.assertTrue(inspect.ismethod(a.argmax)) self.assertTrue(inspect.ismethod(a.argmax))
self.assertTrue(inspect.ismethod(a.argmin)) self.assertTrue(inspect.ismethod(a.argmin))
self.assertTrue(inspect.ismethod(a.argsort)) self.assertTrue(inspect.ismethod(a.argsort))
self.assertTrue(inspect.ismethod(a.has_inf))
self.assertTrue(inspect.ismethod(a.has_nan))
self.assertTrue(inspect.ismethod(a.masked_select)) self.assertTrue(inspect.ismethod(a.masked_select))
self.assertTrue(inspect.ismethod(a.topk)) self.assertTrue(inspect.ismethod(a.topk))
self.assertTrue(inspect.ismethod(a.index_select)) self.assertTrue(inspect.ismethod(a.index_select))
...@@ -557,7 +539,6 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -557,7 +539,6 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
self.assertTrue(inspect.ismethod(a.sort)) self.assertTrue(inspect.ismethod(a.sort))
self.assertTrue(inspect.ismethod(a.index_sample)) self.assertTrue(inspect.ismethod(a.index_sample))
self.assertTrue(inspect.ismethod(a.mean)) self.assertTrue(inspect.ismethod(a.mean))
self.assertTrue(inspect.ismethod(a.reduce_mean))
self.assertTrue(inspect.ismethod(a.std)) self.assertTrue(inspect.ismethod(a.std))
self.assertTrue(inspect.ismethod(a.numel)) self.assertTrue(inspect.ismethod(a.numel))
......
...@@ -92,7 +92,7 @@ class TestMaxoutAPI(unittest.TestCase): ...@@ -92,7 +92,7 @@ class TestMaxoutAPI(unittest.TestCase):
def test_static_api(self): def test_static_api(self):
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.x_np.shape, self.x_np.dtype) x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.maxout(x, self.groups, self.axis) out1 = F.maxout(x, self.groups, self.axis)
m = paddle.nn.Maxout(self.groups, self.axis) m = paddle.nn.Maxout(self.groups, self.axis)
out2 = m(x) out2 = m(x)
...@@ -137,11 +137,11 @@ class TestMaxoutAPI(unittest.TestCase): ...@@ -137,11 +137,11 @@ class TestMaxoutAPI(unittest.TestCase):
# The input type must be Variable. # The input type must be Variable.
self.assertRaises(TypeError, F.maxout, 1) self.assertRaises(TypeError, F.maxout, 1)
# The input dtype must be float16, float32, float64. # The input dtype must be float16, float32, float64.
x_int32 = paddle.data( x_int32 = paddle.fluid.data(
name='x_int32', shape=[2, 4, 6, 8], dtype='int32') name='x_int32', shape=[2, 4, 6, 8], dtype='int32')
self.assertRaises(TypeError, F.maxout, x_int32) self.assertRaises(TypeError, F.maxout, x_int32)
x_float32 = paddle.data(name='x_float32', shape=[2, 4, 6, 8]) x_float32 = paddle.fluid.data(name='x_float32', shape=[2, 4, 6, 8])
self.assertRaises(ValueError, F.maxout, x_float32, 2, 2) self.assertRaises(ValueError, F.maxout, x_float32, 2, 2)
......
...@@ -185,7 +185,7 @@ class TestMeanAPI(unittest.TestCase): ...@@ -185,7 +185,7 @@ class TestMeanAPI(unittest.TestCase):
def test_api_static(self): def test_api_static(self):
paddle.enable_static() paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.x_shape) x = paddle.fluid.data('X', self.x_shape)
out1 = paddle.mean(x) out1 = paddle.mean(x)
out2 = paddle.tensor.mean(x) out2 = paddle.tensor.mean(x)
out3 = paddle.tensor.stat.mean(x) out3 = paddle.tensor.stat.mean(x)
...@@ -249,7 +249,7 @@ class TestMeanAPI(unittest.TestCase): ...@@ -249,7 +249,7 @@ class TestMeanAPI(unittest.TestCase):
self.assertRaises(Exception, paddle.mean, x, 2) self.assertRaises(Exception, paddle.mean, x, 2)
paddle.enable_static() paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [10, 12], 'int32') x = paddle.fluid.data('X', [10, 12], 'int32')
self.assertRaises(TypeError, paddle.mean, x) self.assertRaises(TypeError, paddle.mean, x)
......
...@@ -191,8 +191,8 @@ class TestNNFunctionalMseLoss(unittest.TestCase): ...@@ -191,8 +191,8 @@ class TestNNFunctionalMseLoss(unittest.TestCase):
place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda( place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda(
) else paddle.CPUPlace() ) else paddle.CPUPlace()
with paddle.static.program_guard(prog, startup_prog): with paddle.static.program_guard(prog, startup_prog):
input = paddle.data(name='input', shape=dim, dtype='float32') input = paddle.fluid.data(name='input', shape=dim, dtype='float32')
target = paddle.data(name='target', shape=dim, dtype='float32') target = paddle.fluid.data(name='target', shape=dim, dtype='float32')
mse_loss = paddle.nn.functional.mse_loss(input, target, 'mean') mse_loss = paddle.nn.functional.mse_loss(input, target, 'mean')
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
...@@ -225,8 +225,8 @@ class TestNNFunctionalMseLoss(unittest.TestCase): ...@@ -225,8 +225,8 @@ class TestNNFunctionalMseLoss(unittest.TestCase):
place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda( place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda(
) else paddle.CPUPlace() ) else paddle.CPUPlace()
with paddle.static.program_guard(prog, startup_prog): with paddle.static.program_guard(prog, startup_prog):
input = paddle.data(name='input', shape=dim, dtype='float32') input = paddle.fluid.data(name='input', shape=dim, dtype='float32')
target = paddle.data(name='target', shape=dim, dtype='float32') target = paddle.fluid.data(name='target', shape=dim, dtype='float32')
mse_loss = paddle.nn.functional.mse_loss(input, target, 'sum') mse_loss = paddle.nn.functional.mse_loss(input, target, 'sum')
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
...@@ -259,8 +259,8 @@ class TestNNFunctionalMseLoss(unittest.TestCase): ...@@ -259,8 +259,8 @@ class TestNNFunctionalMseLoss(unittest.TestCase):
place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda( place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda(
) else paddle.CPUPlace() ) else paddle.CPUPlace()
with paddle.static.program_guard(prog, startup_prog): with paddle.static.program_guard(prog, startup_prog):
input = paddle.data(name='input', shape=dim, dtype='float32') input = paddle.fluid.data(name='input', shape=dim, dtype='float32')
target = paddle.data(name='target', shape=dim, dtype='float32') target = paddle.fluid.data(name='target', shape=dim, dtype='float32')
mse_loss = paddle.nn.functional.mse_loss(input, target, 'none') mse_loss = paddle.nn.functional.mse_loss(input, target, 'none')
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
......
...@@ -884,8 +884,8 @@ class TestNLLLossName(unittest.TestCase): ...@@ -884,8 +884,8 @@ class TestNLLLossName(unittest.TestCase):
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
place = paddle.CPUPlace() place = paddle.CPUPlace()
with paddle.static.program_guard(prog, startup_prog): with paddle.static.program_guard(prog, startup_prog):
x = paddle.data(name='x', shape=[10, 10], dtype='float64') x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64')
label = paddle.data(name='label', shape=[10], dtype='int64') label = paddle.fluid.data(name='label', shape=[10], dtype='int64')
nll_loss = paddle.nn.loss.NLLLoss(name='nll_loss') nll_loss = paddle.nn.loss.NLLLoss(name='nll_loss')
res = nll_loss(x, label) res = nll_loss(x, label)
self.assertTrue(res.name.startswith('nll_loss')) self.assertTrue(res.name.startswith('nll_loss'))
...@@ -898,8 +898,8 @@ class TestNLLLossInvalidArgs(unittest.TestCase): ...@@ -898,8 +898,8 @@ class TestNLLLossInvalidArgs(unittest.TestCase):
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
place = paddle.CPUPlace() place = paddle.CPUPlace()
with paddle.static.program_guard(prog, startup_prog): with paddle.static.program_guard(prog, startup_prog):
x = paddle.data(name='x', shape=[10, ], dtype='float64') x = paddle.fluid.data(name='x', shape=[10, ], dtype='float64')
label = paddle.data(name='label', shape=[10, ], dtype='float64') label = paddle.fluid.data(name='label', shape=[10, ], dtype='float64')
nll_loss = paddle.nn.loss.NLLLoss() nll_loss = paddle.nn.loss.NLLLoss()
res = nll_loss(x, label) res = nll_loss(x, label)
...@@ -922,8 +922,8 @@ class TestNLLLossInvalidArgs(unittest.TestCase): ...@@ -922,8 +922,8 @@ class TestNLLLossInvalidArgs(unittest.TestCase):
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
place = paddle.CPUPlace() place = paddle.CPUPlace()
with paddle.static.program_guard(prog, startup_prog): with paddle.static.program_guard(prog, startup_prog):
x = paddle.data(name='x', shape=[10, 10], dtype='float64') x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64')
label = paddle.data(name='label', shape=[10], dtype='int64') label = paddle.fluid.data(name='label', shape=[10], dtype='int64')
nll_loss = paddle.nn.loss.NLLLoss(reduction='') nll_loss = paddle.nn.loss.NLLLoss(reduction='')
res = nll_loss(x, label) res = nll_loss(x, label)
...@@ -946,8 +946,8 @@ class TestNLLLossInvalidArgs(unittest.TestCase): ...@@ -946,8 +946,8 @@ class TestNLLLossInvalidArgs(unittest.TestCase):
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
place = paddle.CPUPlace() place = paddle.CPUPlace()
with paddle.static.program_guard(prog, startup_prog): with paddle.static.program_guard(prog, startup_prog):
x = paddle.data(name='x', shape=[10, 10], dtype='float64') x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64')
label = paddle.data(name='label', shape=[10], dtype='int64') label = paddle.fluid.data(name='label', shape=[10], dtype='int64')
res = paddle.nn.functional.nll_loss(x, label, reduction='') res = paddle.nn.functional.nll_loss(x, label, reduction='')
self.assertRaises(ValueError, self.assertRaises(ValueError,
......
...@@ -61,8 +61,8 @@ class TestNormalAPI(unittest.TestCase): ...@@ -61,8 +61,8 @@ class TestNormalAPI(unittest.TestCase):
if isinstance(self.mean, np.ndarray) \ if isinstance(self.mean, np.ndarray) \
and isinstance(self.std, np.ndarray): and isinstance(self.std, np.ndarray):
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
mean = paddle.data('Mean', self.mean.shape, self.mean.dtype) mean = paddle.fluid.data('Mean', self.mean.shape, self.mean.dtype)
std = paddle.data('Std', self.std.shape, self.std.dtype) std = paddle.fluid.data('Std', self.std.shape, self.std.dtype)
out = paddle.normal(mean, std, self.shape) out = paddle.normal(mean, std, self.shape)
exe = paddle.static.Executor(self.place) exe = paddle.static.Executor(self.place)
...@@ -76,7 +76,7 @@ class TestNormalAPI(unittest.TestCase): ...@@ -76,7 +76,7 @@ class TestNormalAPI(unittest.TestCase):
return ret_all return ret_all
elif isinstance(self.mean, np.ndarray): elif isinstance(self.mean, np.ndarray):
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
mean = paddle.data('Mean', self.mean.shape, self.mean.dtype) mean = paddle.fluid.data('Mean', self.mean.shape, self.mean.dtype)
out = paddle.normal(mean, self.std, self.shape) out = paddle.normal(mean, self.std, self.shape)
exe = paddle.static.Executor(self.place) exe = paddle.static.Executor(self.place)
...@@ -86,7 +86,7 @@ class TestNormalAPI(unittest.TestCase): ...@@ -86,7 +86,7 @@ class TestNormalAPI(unittest.TestCase):
return ret_all return ret_all
elif isinstance(self.std, np.ndarray): elif isinstance(self.std, np.ndarray):
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
std = paddle.data('Std', self.std.shape, self.std.dtype) std = paddle.fluid.data('Std', self.std.shape, self.std.dtype)
out = paddle.normal(self.mean, std, self.shape) out = paddle.normal(self.mean, std, self.shape)
exe = paddle.static.Executor(self.place) exe = paddle.static.Executor(self.place)
...@@ -180,17 +180,17 @@ class TestNormalErrors(unittest.TestCase): ...@@ -180,17 +180,17 @@ class TestNormalErrors(unittest.TestCase):
std = [1, 2, 3] std = [1, 2, 3]
self.assertRaises(TypeError, paddle.normal, std=std) self.assertRaises(TypeError, paddle.normal, std=std)
mean = paddle.data('Mean', [100], 'int32') mean = paddle.fluid.data('Mean', [100], 'int32')
self.assertRaises(TypeError, paddle.normal, mean) self.assertRaises(TypeError, paddle.normal, mean)
std = paddle.data('Std', [100], 'int32') std = paddle.fluid.data('Std', [100], 'int32')
self.assertRaises(TypeError, paddle.normal, mean=1.0, std=std) self.assertRaises(TypeError, paddle.normal, mean=1.0, std=std)
self.assertRaises(TypeError, paddle.normal, shape=1) self.assertRaises(TypeError, paddle.normal, shape=1)
self.assertRaises(TypeError, paddle.normal, shape=[1.0]) self.assertRaises(TypeError, paddle.normal, shape=[1.0])
shape = paddle.data('Shape', [100], 'float32') shape = paddle.fluid.data('Shape', [100], 'float32')
self.assertRaises(TypeError, paddle.normal, shape=shape) self.assertRaises(TypeError, paddle.normal, shape=shape)
......
...@@ -56,8 +56,8 @@ class TestNNFunctionalNormalize(unittest.TestCase): ...@@ -56,8 +56,8 @@ class TestNNFunctionalNormalize(unittest.TestCase):
self.assertRaises(BaseException, F.normalize, x) self.assertRaises(BaseException, F.normalize, x)
def run_static(self, use_gpu=False): def run_static(self, use_gpu=False):
x = paddle.data(name='input', shape=[10, 10], dtype='float32') x = paddle.fluid.data(name='input', shape=[10, 10], dtype='float32')
x2 = paddle.data(name='input2', shape=[2], dtype='float32') x2 = paddle.fluid.data(name='input2', shape=[2], dtype='float32')
result0 = F.normalize(x) result0 = F.normalize(x)
result1 = F.normalize(x, p=1.5) result1 = F.normalize(x, p=1.5)
result2 = F.normalize(x, axis=0) result2 = F.normalize(x, axis=0)
......
...@@ -55,8 +55,8 @@ class TestNumelOoAPI(unittest.TestCase): ...@@ -55,8 +55,8 @@ class TestNumelOoAPI(unittest.TestCase):
with fluid.program_guard(main_program, startup_program): with fluid.program_guard(main_program, startup_program):
shape1 = [2, 1, 4, 5] shape1 = [2, 1, 4, 5]
shape2 = [1, 4, 5] shape2 = [1, 4, 5]
x_1 = paddle.data(shape=shape1, dtype='int32', name='x_1') x_1 = paddle.fluid.data(shape=shape1, dtype='int32', name='x_1')
x_2 = paddle.data(shape=shape2, dtype='int32', name='x_2') x_2 = paddle.fluid.data(shape=shape2, dtype='int32', name='x_2')
input_1 = np.random.random(shape1).astype("int32") input_1 = np.random.random(shape1).astype("int32")
input_2 = np.random.random(shape2).astype("int32") input_2 = np.random.random(shape2).astype("int32")
out_1 = paddle.numel(x_1) out_1 = paddle.numel(x_1)
......
...@@ -25,7 +25,7 @@ from paddle.fluid import core, Program, program_guard ...@@ -25,7 +25,7 @@ from paddle.fluid import core, Program, program_guard
class TestOnesLikeAPIError(unittest.TestCase): class TestOnesLikeAPIError(unittest.TestCase):
def test_errors(self): def test_errors(self):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
x = paddle.data('x', [3, 4]) x = paddle.fluid.data('x', [3, 4])
self.assertRaises(TypeError, ones_like, x, 'int8') self.assertRaises(TypeError, ones_like, x, 'int8')
...@@ -35,7 +35,7 @@ class TestOnesLikeAPI(unittest.TestCase): ...@@ -35,7 +35,7 @@ class TestOnesLikeAPI(unittest.TestCase):
startup_program = Program() startup_program = Program()
train_program = Program() train_program = Program()
with program_guard(train_program, startup_program): with program_guard(train_program, startup_program):
x = paddle.data('X', shape) x = paddle.fluid.data('X', shape)
# 'bool', 'float32', 'float64', 'int32', 'int64' # 'bool', 'float32', 'float64', 'int32', 'int64'
out1 = ones_like(x) out1 = ones_like(x)
......
...@@ -165,7 +165,7 @@ class TestPadAPI(unittest.TestCase): ...@@ -165,7 +165,7 @@ class TestPadAPI(unittest.TestCase):
mode = "constant" mode = "constant"
value = 100 value = 100
input_data = np.random.rand(*input_shape).astype(np.float32) input_data = np.random.rand(*input_shape).astype(np.float32)
x = paddle.data(name="x", shape=input_shape) x = paddle.fluid.data(name="x", shape=input_shape)
result = F.pad(x=x, result = F.pad(x=x,
pad=pad, pad=pad,
value=value, value=value,
...@@ -186,7 +186,7 @@ class TestPadAPI(unittest.TestCase): ...@@ -186,7 +186,7 @@ class TestPadAPI(unittest.TestCase):
pad = [1, 2, 1, 1, 1, 2] pad = [1, 2, 1, 1, 1, 2]
mode = "reflect" mode = "reflect"
input_data = np.random.rand(*input_shape).astype(np.float32) input_data = np.random.rand(*input_shape).astype(np.float32)
x = paddle.data(name="x", shape=input_shape) x = paddle.fluid.data(name="x", shape=input_shape)
result1 = F.pad(x=x, pad=pad, mode=mode, data_format="NCDHW") result1 = F.pad(x=x, pad=pad, mode=mode, data_format="NCDHW")
result2 = F.pad(x=x, pad=pad, mode=mode, data_format="NDHWC") result2 = F.pad(x=x, pad=pad, mode=mode, data_format="NDHWC")
exe = Executor(place) exe = Executor(place)
...@@ -208,7 +208,7 @@ class TestPadAPI(unittest.TestCase): ...@@ -208,7 +208,7 @@ class TestPadAPI(unittest.TestCase):
pad = [1, 2, 1, 1, 3, 4] pad = [1, 2, 1, 1, 3, 4]
mode = "replicate" mode = "replicate"
input_data = np.random.rand(*input_shape).astype(np.float32) input_data = np.random.rand(*input_shape).astype(np.float32)
x = paddle.data(name="x", shape=input_shape) x = paddle.fluid.data(name="x", shape=input_shape)
result1 = F.pad(x=x, pad=pad, mode=mode, data_format="NCDHW") result1 = F.pad(x=x, pad=pad, mode=mode, data_format="NCDHW")
result2 = F.pad(x=x, pad=pad, mode=mode, data_format="NDHWC") result2 = F.pad(x=x, pad=pad, mode=mode, data_format="NDHWC")
exe = Executor(place) exe = Executor(place)
...@@ -230,7 +230,7 @@ class TestPadAPI(unittest.TestCase): ...@@ -230,7 +230,7 @@ class TestPadAPI(unittest.TestCase):
pad = [1, 2, 1, 1, 3, 4] pad = [1, 2, 1, 1, 3, 4]
mode = "circular" mode = "circular"
input_data = np.random.rand(*input_shape).astype(np.float32) input_data = np.random.rand(*input_shape).astype(np.float32)
x = paddle.data(name="x", shape=input_shape) x = paddle.fluid.data(name="x", shape=input_shape)
result1 = F.pad(x=x, pad=pad, mode=mode, data_format="NCDHW") result1 = F.pad(x=x, pad=pad, mode=mode, data_format="NCDHW")
result2 = F.pad(x=x, pad=pad, mode=mode, data_format="NDHWC") result2 = F.pad(x=x, pad=pad, mode=mode, data_format="NDHWC")
exe = Executor(place) exe = Executor(place)
...@@ -637,7 +637,7 @@ class TestPad3dOpError(unittest.TestCase): ...@@ -637,7 +637,7 @@ class TestPad3dOpError(unittest.TestCase):
def test_reflect_1(): def test_reflect_1():
input_shape = (1, 2, 3, 4, 5) input_shape = (1, 2, 3, 4, 5)
data = np.random.rand(*input_shape).astype(np.float32) data = np.random.rand(*input_shape).astype(np.float32)
x = paddle.data(name="x", shape=input_shape) x = paddle.fluid.data(name="x", shape=input_shape)
y = F.pad(x, pad=[5, 6, 1, 1, 1, 1], value=1, mode='reflect') y = F.pad(x, pad=[5, 6, 1, 1, 1, 1], value=1, mode='reflect')
place = paddle.CPUPlace() place = paddle.CPUPlace()
exe = Executor(place) exe = Executor(place)
...@@ -646,7 +646,7 @@ class TestPad3dOpError(unittest.TestCase): ...@@ -646,7 +646,7 @@ class TestPad3dOpError(unittest.TestCase):
def test_reflect_2(): def test_reflect_2():
input_shape = (1, 2, 3, 4, 5) input_shape = (1, 2, 3, 4, 5)
data = np.random.rand(*input_shape).astype(np.float32) data = np.random.rand(*input_shape).astype(np.float32)
x = paddle.data(name="x", shape=input_shape) x = paddle.fluid.data(name="x", shape=input_shape)
y = F.pad(x, pad=[1, 1, 4, 3, 1, 1], value=1, mode='reflect') y = F.pad(x, pad=[1, 1, 4, 3, 1, 1], value=1, mode='reflect')
place = paddle.CPUPlace() place = paddle.CPUPlace()
exe = Executor(place) exe = Executor(place)
...@@ -655,7 +655,7 @@ class TestPad3dOpError(unittest.TestCase): ...@@ -655,7 +655,7 @@ class TestPad3dOpError(unittest.TestCase):
def test_reflect_3(): def test_reflect_3():
input_shape = (1, 2, 3, 4, 5) input_shape = (1, 2, 3, 4, 5)
data = np.random.rand(*input_shape).astype(np.float32) data = np.random.rand(*input_shape).astype(np.float32)
x = paddle.data(name="x", shape=input_shape) x = paddle.fluid.data(name="x", shape=input_shape)
y = F.pad(x, pad=[1, 1, 1, 1, 2, 3], value=1, mode='reflect') y = F.pad(x, pad=[1, 1, 1, 1, 2, 3], value=1, mode='reflect')
place = paddle.CPUPlace() place = paddle.CPUPlace()
exe = Executor(place) exe = Executor(place)
......
...@@ -32,8 +32,8 @@ def test_static(x_np, y_np, p=2.0, epsilon=1e-6, keepdim=False): ...@@ -32,8 +32,8 @@ def test_static(x_np, y_np, p=2.0, epsilon=1e-6, keepdim=False):
) else fluid.CPUPlace() ) else fluid.CPUPlace()
with paddle.static.program_guard(prog, startup_prog): with paddle.static.program_guard(prog, startup_prog):
x = paddle.data(name='x', shape=x_np.shape, dtype=x_np.dtype) x = paddle.fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
y = paddle.data(name='y', shape=y_np.shape, dtype=x_np.dtype) y = paddle.fluid.data(name='y', shape=y_np.shape, dtype=x_np.dtype)
dist = paddle.nn.layer.distance.PairwiseDistance( dist = paddle.nn.layer.distance.PairwiseDistance(
p=p, epsilon=epsilon, keepdim=keepdim) p=p, epsilon=epsilon, keepdim=keepdim)
distance = dist(x, y) distance = dist(x, y)
......
...@@ -97,8 +97,8 @@ class TestPixelShuffleAPI(unittest.TestCase): ...@@ -97,8 +97,8 @@ class TestPixelShuffleAPI(unittest.TestCase):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.enable_static() paddle.enable_static()
x_1 = paddle.data(name="x", shape=[2, 9, 4, 4], dtype="float64") x_1 = paddle.fluid.data(name="x", shape=[2, 9, 4, 4], dtype="float64")
x_2 = paddle.data(name="x2", shape=[2, 4, 4, 9], dtype="float64") x_2 = paddle.fluid.data(name="x2", shape=[2, 4, 4, 9], dtype="float64")
out_1 = F.pixel_shuffle(x_1, 3) out_1 = F.pixel_shuffle(x_1, 3)
out_2 = F.pixel_shuffle(x_2, 3, "NHWC") out_2 = F.pixel_shuffle(x_2, 3, "NHWC")
...@@ -123,8 +123,8 @@ class TestPixelShuffleAPI(unittest.TestCase): ...@@ -123,8 +123,8 @@ class TestPixelShuffleAPI(unittest.TestCase):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.enable_static() paddle.enable_static()
x_1 = paddle.data(name="x", shape=[2, 9, 4, 4], dtype="float64") x_1 = paddle.fluid.data(name="x", shape=[2, 9, 4, 4], dtype="float64")
x_2 = paddle.data(name="x2", shape=[2, 4, 4, 9], dtype="float64") x_2 = paddle.fluid.data(name="x2", shape=[2, 4, 4, 9], dtype="float64")
# init instance # init instance
ps_1 = paddle.nn.PixelShuffle(3) ps_1 = paddle.nn.PixelShuffle(3)
ps_2 = paddle.nn.PixelShuffle(3, "NHWC") ps_2 = paddle.nn.PixelShuffle(3, "NHWC")
......
...@@ -49,8 +49,8 @@ class TestFunctionalPReluAPI(unittest.TestCase): ...@@ -49,8 +49,8 @@ class TestFunctionalPReluAPI(unittest.TestCase):
def static_check(self, weight_np): def static_check(self, weight_np):
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.x_np.shape, 'float32') x = paddle.fluid.data('X', self.x_np.shape, 'float32')
weight = paddle.data('Alpha', weight_np.shape, 'float32') weight = paddle.fluid.data('Alpha', weight_np.shape, 'float32')
out = F.prelu(x, weight) out = F.prelu(x, weight)
exe = paddle.static.Executor(self.place) exe = paddle.static.Executor(self.place)
res = exe.run(feed={'X': self.x_np, res = exe.run(feed={'X': self.x_np,
...@@ -78,15 +78,15 @@ class TestFunctionalPReluAPI(unittest.TestCase): ...@@ -78,15 +78,15 @@ class TestFunctionalPReluAPI(unittest.TestCase):
def test_error(self): def test_error(self):
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
weight_fp32 = paddle.data( weight_fp32 = paddle.fluid.data(
name='weight_fp32', shape=[1], dtype='float32') name='weight_fp32', shape=[1], dtype='float32')
# The input type must be Variable. # The input type must be Variable.
self.assertRaises(TypeError, F.prelu, x=1, weight=weight_fp32) self.assertRaises(TypeError, F.prelu, x=1, weight=weight_fp32)
# The input dtype must be float16, float32, float64. # The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[2, 3], dtype='int32') x_int32 = paddle.fluid.data(name='x_int32', shape=[2, 3], dtype='int32')
self.assertRaises(TypeError, F.prelu, x=x_int32, weight=weight_fp32) self.assertRaises(TypeError, F.prelu, x=x_int32, weight=weight_fp32)
# support the input dtype is float16 # support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[2, 3], dtype='float16') x_fp16 = paddle.fluid.data(name='x_fp16', shape=[2, 3], dtype='float16')
F.prelu(x=x_fp16, weight=weight_fp32) F.prelu(x=x_fp16, weight=weight_fp32)
...@@ -100,7 +100,7 @@ class TestNNPReluAPI(unittest.TestCase): ...@@ -100,7 +100,7 @@ class TestNNPReluAPI(unittest.TestCase):
startup_program = paddle.static.Program() startup_program = paddle.static.Program()
train_program = paddle.static.Program() train_program = paddle.static.Program()
with paddle.static.program_guard(train_program, startup_program): with paddle.static.program_guard(train_program, startup_program):
x = paddle.data(name='X', shape=self.x_np.shape, dtype='float32') x = paddle.fluid.data(name='X', shape=self.x_np.shape, dtype='float32')
m = paddle.nn.PReLU() m = paddle.nn.PReLU()
out = m(x) out = m(x)
exe = paddle.static.Executor(self.place) exe = paddle.static.Executor(self.place)
......
...@@ -55,7 +55,7 @@ class TestProdOp(unittest.TestCase): ...@@ -55,7 +55,7 @@ class TestProdOp(unittest.TestCase):
self.assertTrue(np.allclose(dy_result.numpy(), expected_result)) self.assertTrue(np.allclose(dy_result.numpy(), expected_result))
def run_static(self, use_gpu=False): def run_static(self, use_gpu=False):
input = paddle.data(name='input', shape=[10, 10, 5], dtype='float32') input = paddle.fluid.data(name='input', shape=[10, 10, 5], dtype='float32')
result0 = paddle.prod(input) result0 = paddle.prod(input)
result1 = paddle.prod(input, axis=1) result1 = paddle.prod(input, axis=1)
result2 = paddle.prod(input, axis=-1) result2 = paddle.prod(input, axis=-1)
...@@ -113,8 +113,8 @@ class TestProdOpError(unittest.TestCase): ...@@ -113,8 +113,8 @@ class TestProdOpError(unittest.TestCase):
def test_error(self): def test_error(self):
with paddle.static.program_guard(paddle.static.Program(), with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()): paddle.static.Program()):
x = paddle.data(name='x', shape=[2, 2, 4], dtype='float32') x = paddle.fluid.data(name='x', shape=[2, 2, 4], dtype='float32')
bool_x = paddle.data(name='bool_x', shape=[2, 2, 4], dtype='bool') bool_x = paddle.fluid.data(name='bool_x', shape=[2, 2, 4], dtype='bool')
# The argument x shoule be a Tensor # The argument x shoule be a Tensor
self.assertRaises(TypeError, paddle.prod, [1]) self.assertRaises(TypeError, paddle.prod, [1])
......
...@@ -125,8 +125,8 @@ class TestRandintAPI(unittest.TestCase): ...@@ -125,8 +125,8 @@ class TestRandintAPI(unittest.TestCase):
out3 = paddle.randint( out3 = paddle.randint(
low=-100, high=100, shape=(32, 32, 3), dtype='int64') low=-100, high=100, shape=(32, 32, 3), dtype='int64')
# shape is a tensorlist and dtype is 'float32' # shape is a tensorlist and dtype is 'float32'
dim_1 = paddle.fill_constant([1], "int64", 32) dim_1 = paddle.fluid.layers.fill_constant([1], "int64", 32)
dim_2 = paddle.fill_constant([1], "int32", 50) dim_2 = paddle.fluid.layers.fill_constant([1], "int32", 50)
out4 = paddle.randint( out4 = paddle.randint(
low=-100, high=100, shape=[dim_1, 5, dim_2], dtype='int32') low=-100, high=100, shape=[dim_1, 5, dim_2], dtype='int32')
# shape is a tensor and dtype is 'float64' # shape is a tensor and dtype is 'float64'
......
...@@ -30,8 +30,8 @@ class TestRandnOp(unittest.TestCase): ...@@ -30,8 +30,8 @@ class TestRandnOp(unittest.TestCase):
x1 = paddle.randn(shape, 'float32') x1 = paddle.randn(shape, 'float32')
x2 = paddle.randn(shape, 'float64') x2 = paddle.randn(shape, 'float64')
dim_1 = paddle.fill_constant([1], "int64", 20) dim_1 = paddle.fluid.layers.fill_constant([1], "int64", 20)
dim_2 = paddle.fill_constant([1], "int32", 50) dim_2 = paddle.fluid.layers.fill_constant([1], "int32", 50)
x3 = paddle.randn([dim_1, dim_2, 784]) x3 = paddle.randn([dim_1, dim_2, 784])
var_shape = paddle.static.data('X', [2], 'int32') var_shape = paddle.static.data('X', [2], 'int32')
...@@ -59,8 +59,8 @@ class TestRandnOpForDygraph(unittest.TestCase): ...@@ -59,8 +59,8 @@ class TestRandnOpForDygraph(unittest.TestCase):
x1 = paddle.randn(shape, 'float32') x1 = paddle.randn(shape, 'float32')
x2 = paddle.randn(shape, 'float64') x2 = paddle.randn(shape, 'float64')
dim_1 = paddle.fill_constant([1], "int64", 20) dim_1 = paddle.fluid.layers.fill_constant([1], "int64", 20)
dim_2 = paddle.fill_constant([1], "int32", 50) dim_2 = paddle.fluid.layers.fill_constant([1], "int32", 50)
x3 = paddle.randn(shape=[dim_1, dim_2, 784]) x3 = paddle.randn(shape=[dim_1, dim_2, 784])
var_shape = paddle.to_tensor(np.array(shape)) var_shape = paddle.to_tensor(np.array(shape))
......
...@@ -229,8 +229,8 @@ class TestReshapeUint8Op(TestReshapeInt8Op): ...@@ -229,8 +229,8 @@ class TestReshapeUint8Op(TestReshapeInt8Op):
# Test python API # Test python API
class TestReshapeAPI(unittest.TestCase): class TestReshapeAPI(unittest.TestCase):
def _set_paddle_api(self): def _set_paddle_api(self):
self.fill_constant = paddle.fill_constant self.fill_constant = paddle.fluid.layers.fill_constant
self.data = paddle.data self.data = paddle.fluid.data
self.reshape = paddle.reshape self.reshape = paddle.reshape
self.to_tensor = paddle.to_tensor self.to_tensor = paddle.to_tensor
...@@ -305,7 +305,7 @@ class TestReshapeAPI(unittest.TestCase): ...@@ -305,7 +305,7 @@ class TestReshapeAPI(unittest.TestCase):
# Test Input Error # Test Input Error
class TestReshapeOpError(unittest.TestCase): class TestReshapeOpError(unittest.TestCase):
def _set_paddle_api(self): def _set_paddle_api(self):
self.data = paddle.data self.data = paddle.fluid.data
self.reshape = paddle.reshape self.reshape = paddle.reshape
def _set_fluid_api(self): def _set_fluid_api(self):
......
...@@ -73,7 +73,7 @@ class TestRetainGraph(unittest.TestCase): ...@@ -73,7 +73,7 @@ class TestRetainGraph(unittest.TestCase):
fake_AB = paddle.concat((real_data.detach(), interpolatesv), 1) fake_AB = paddle.concat((real_data.detach(), interpolatesv), 1)
disc_interpolates = netD(fake_AB) disc_interpolates = netD(fake_AB)
outs = paddle.fill_constant(disc_interpolates.shape, outs = paddle.fluid.layers.fill_constant(disc_interpolates.shape,
disc_interpolates.dtype, 1.0) disc_interpolates.dtype, 1.0)
gradients = paddle.grad( gradients = paddle.grad(
outputs=disc_interpolates, outputs=disc_interpolates,
...@@ -85,7 +85,7 @@ class TestRetainGraph(unittest.TestCase): ...@@ -85,7 +85,7 @@ class TestRetainGraph(unittest.TestCase):
gradients = paddle.reshape(gradients[0], [real_data.shape[0], -1]) gradients = paddle.reshape(gradients[0], [real_data.shape[0], -1])
gradient_penalty = paddle.reduce_mean((paddle.norm( gradient_penalty = paddle.fluid.layers.reduce_mean((paddle.norm(
gradients + 1e-16, 2, 1) - constant)** gradients + 1e-16, 2, 1) - constant)**
2) * lambda_gp # added eps 2) * lambda_gp # added eps
return gradient_penalty, gradients return gradient_penalty, gradients
...@@ -113,7 +113,7 @@ class TestRetainGraph(unittest.TestCase): ...@@ -113,7 +113,7 @@ class TestRetainGraph(unittest.TestCase):
fake_AB = paddle.concat((realA, fakeB), 1) fake_AB = paddle.concat((realA, fakeB), 1)
G_pred_fake = d(fake_AB.detach()) G_pred_fake = d(fake_AB.detach())
false_target = paddle.fill_constant(G_pred_fake.shape, 'float32', 0.0) false_target = paddle.fluid.layers.fill_constant(G_pred_fake.shape, 'float32', 0.0)
G_gradient_penalty, _ = self.cal_gradient_penalty( G_gradient_penalty, _ = self.cal_gradient_penalty(
d, realA, fakeB, lambda_gp=10.0) d, realA, fakeB, lambda_gp=10.0)
...@@ -125,7 +125,7 @@ class TestRetainGraph(unittest.TestCase): ...@@ -125,7 +125,7 @@ class TestRetainGraph(unittest.TestCase):
optim_g.clear_gradients() optim_g.clear_gradients()
fake_AB = paddle.concat((realA, fakeB), 1) fake_AB = paddle.concat((realA, fakeB), 1)
G_pred_fake = d(fake_AB) G_pred_fake = d(fake_AB)
true_target = paddle.fill_constant(G_pred_fake.shape, 'float32', 1.0) true_target = paddle.fluid.layers.fill_constant(G_pred_fake.shape, 'float32', 1.0)
loss_g = l1_criterion(fakeB, realB) + gan_criterion(G_pred_fake, loss_g = l1_criterion(fakeB, realB) + gan_criterion(G_pred_fake,
true_target) true_target)
......
...@@ -69,7 +69,7 @@ class RowConvTestCase(unittest.TestCase): ...@@ -69,7 +69,7 @@ class RowConvTestCase(unittest.TestCase):
x = fluid.data( x = fluid.data(
"input", [-1, -1, self.num_channels], dtype=self.dtype) "input", [-1, -1, self.num_channels], dtype=self.dtype)
w = fluid.data("weight", self.weight_shape, dtype=self.dtype) w = fluid.data("weight", self.weight_shape, dtype=self.dtype)
y = F.row_conv(x, w, act=self.act) y = F.extension.row_conv(x, w, act=self.act)
exe = fluid.Executor(place) exe = fluid.Executor(place)
exe.run(start) exe.run(start)
y_np, = exe.run(main, y_np, = exe.run(main,
...@@ -82,7 +82,7 @@ class RowConvTestCase(unittest.TestCase): ...@@ -82,7 +82,7 @@ class RowConvTestCase(unittest.TestCase):
with dg.guard(place): with dg.guard(place):
x_var = dg.to_variable(self.input) x_var = dg.to_variable(self.input)
w_var = dg.to_variable(self.weight) w_var = dg.to_variable(self.weight)
y_var = F.row_conv(x_var, w_var, act=self.act) y_var = F.extension.row_conv(x_var, w_var, act=self.act)
y_np = y_var.numpy() y_np = y_var.numpy()
return y_np return y_np
......
...@@ -93,7 +93,7 @@ class TestSeluAPI(unittest.TestCase): ...@@ -93,7 +93,7 @@ class TestSeluAPI(unittest.TestCase):
def test_static_api(self): def test_static_api(self):
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.x_np.shape, self.x_np.dtype) x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.selu(x, self.scale, self.alpha) out1 = F.selu(x, self.scale, self.alpha)
selu = paddle.nn.SELU(self.scale, self.alpha) selu = paddle.nn.SELU(self.scale, self.alpha)
out2 = selu(x) out2 = selu(x)
...@@ -128,15 +128,15 @@ class TestSeluAPI(unittest.TestCase): ...@@ -128,15 +128,15 @@ class TestSeluAPI(unittest.TestCase):
# The input type must be Variable. # The input type must be Variable.
self.assertRaises(TypeError, F.selu, 1) self.assertRaises(TypeError, F.selu, 1)
# The input dtype must be float16, float32, float64. # The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32') x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, F.selu, x_int32) self.assertRaises(TypeError, F.selu, x_int32)
# The scale must be greater than 1.0 # The scale must be greater than 1.0
x_fp32 = paddle.data(name='x_fp32', shape=[12, 10], dtype='float32') x_fp32 = paddle.fluid.data(name='x_fp32', shape=[12, 10], dtype='float32')
self.assertRaises(ValueError, F.selu, x_fp32, -1.0) self.assertRaises(ValueError, F.selu, x_fp32, -1.0)
# The alpha must be no less than 0 # The alpha must be no less than 0
self.assertRaises(ValueError, F.selu, x_fp32, 1.6, -1.0) self.assertRaises(ValueError, F.selu, x_fp32, 1.6, -1.0)
# support the input dtype is float16 # support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16') x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
F.selu(x_fp16) F.selu(x_fp16)
......
...@@ -42,13 +42,13 @@ def test_static(place, ...@@ -42,13 +42,13 @@ def test_static(place,
prog = paddle.static.Program() prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
with paddle.static.program_guard(prog, startup_prog): with paddle.static.program_guard(prog, startup_prog):
logit = paddle.data(name='logit', shape=logit_np.shape, dtype='float64') logit = paddle.fluid.data(name='logit', shape=logit_np.shape, dtype='float64')
label = paddle.data(name='label', shape=label_np.shape, dtype='float64') label = paddle.fluid.data(name='label', shape=label_np.shape, dtype='float64')
feed_dict = {"logit": logit_np, "label": label_np} feed_dict = {"logit": logit_np, "label": label_np}
normalizer = None normalizer = None
if normalizer_np is not None: if normalizer_np is not None:
normalizer = paddle.data( normalizer = paddle.fluid.data(
name='normalizer', shape=normalizer_np.shape, dtype='float64') name='normalizer', shape=normalizer_np.shape, dtype='float64')
feed_dict["normalizer"] = normalizer_np feed_dict["normalizer"] = normalizer_np
......
...@@ -315,7 +315,7 @@ class TestSoftmaxAPI(unittest.TestCase): ...@@ -315,7 +315,7 @@ class TestSoftmaxAPI(unittest.TestCase):
def test_static_check(self): def test_static_check(self):
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.x_np.shape, 'float32') x = paddle.fluid.data('X', self.x_np.shape, 'float32')
out1 = F.softmax(x) out1 = F.softmax(x)
m = paddle.nn.Softmax() m = paddle.nn.Softmax()
out2 = m(x) out2 = m(x)
...@@ -354,10 +354,10 @@ class TestSoftmaxAPI(unittest.TestCase): ...@@ -354,10 +354,10 @@ class TestSoftmaxAPI(unittest.TestCase):
# The input type must be Variable. # The input type must be Variable.
self.assertRaises(TypeError, F.softmax, 1) self.assertRaises(TypeError, F.softmax, 1)
# The input dtype must be float16, float32, float64. # The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[2, 3], dtype='int32') x_int32 = paddle.fluid.data(name='x_int32', shape=[2, 3], dtype='int32')
self.assertRaises(TypeError, F.softmax, x_int32) self.assertRaises(TypeError, F.softmax, x_int32)
# support the input dtype is float16 # support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[2, 3], dtype='float16') x_fp16 = paddle.fluid.data(name='x_fp16', shape=[2, 3], dtype='float16')
F.softmax(x_fp16) F.softmax(x_fp16)
......
...@@ -44,7 +44,7 @@ class TestStdAPI(unittest.TestCase): ...@@ -44,7 +44,7 @@ class TestStdAPI(unittest.TestCase):
def static(self): def static(self):
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.shape, self.dtype) x = paddle.fluid.data('X', self.shape, self.dtype)
out = paddle.std(x, self.axis, self.unbiased, self.keepdim) out = paddle.std(x, self.axis, self.unbiased, self.keepdim)
exe = paddle.static.Executor(self.place) exe = paddle.static.Executor(self.place)
res = exe.run(feed={'X': self.x}, fetch_list=[out]) res = exe.run(feed={'X': self.x}, fetch_list=[out])
...@@ -111,7 +111,7 @@ class TestStdAPI_alias(unittest.TestCase): ...@@ -111,7 +111,7 @@ class TestStdAPI_alias(unittest.TestCase):
class TestStdError(unittest.TestCase): class TestStdError(unittest.TestCase):
def test_error(self): def test_error(self):
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [2, 3, 4], 'int32') x = paddle.fluid.data('X', [2, 3, 4], 'int32')
self.assertRaises(TypeError, paddle.std, x) self.assertRaises(TypeError, paddle.std, x)
......
...@@ -81,7 +81,7 @@ class TestTemporalShift3(TestTemporalShift): ...@@ -81,7 +81,7 @@ class TestTemporalShift3(TestTemporalShift):
class TestTemporalShiftAPI(unittest.TestCase): class TestTemporalShiftAPI(unittest.TestCase):
def test_api(self): def test_api(self):
input = paddle.randn([6, 4, 2, 2]) input = paddle.randn([6, 4, 2, 2])
out = paddle.nn.functional.temporal_shift( out = paddle.fluid.layers.temporal_shift(
x=input, seg_num=2, shift_ratio=0.2) x=input, seg_num=2, shift_ratio=0.2)
......
...@@ -254,7 +254,7 @@ class TestUniqueAPI(unittest.TestCase): ...@@ -254,7 +254,7 @@ class TestUniqueAPI(unittest.TestCase):
def test_static_graph(self): def test_static_graph(self):
with paddle.static.program_guard(paddle.static.Program(), with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()): paddle.static.Program()):
x = paddle.data(name='x', shape=[3, 2], dtype='float64') x = paddle.fluid.data(name='x', shape=[3, 2], dtype='float64')
unique, inverse, counts = paddle.unique( unique, inverse, counts = paddle.unique(
x, return_inverse=True, return_counts=True, axis=0) x, return_inverse=True, return_counts=True, axis=0)
place = paddle.CPUPlace() place = paddle.CPUPlace()
...@@ -274,13 +274,13 @@ class TestUniqueError(unittest.TestCase): ...@@ -274,13 +274,13 @@ class TestUniqueError(unittest.TestCase):
def test_x_dtype(): def test_x_dtype():
with paddle.static.program_guard(paddle.static.Program(), with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()): paddle.static.Program()):
x = paddle.data(name='x', shape=[10, 10], dtype='float16') x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float16')
result = paddle.unique(x) result = paddle.unique(x)
self.assertRaises(TypeError, test_x_dtype) self.assertRaises(TypeError, test_x_dtype)
def test_attr(self): def test_attr(self):
x = paddle.data(name='x', shape=[10, 10], dtype='float64') x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64')
def test_return_index(): def test_return_index():
result = paddle.unique(x, return_index=0) result = paddle.unique(x, return_index=0)
......
...@@ -44,7 +44,7 @@ class TestVarAPI(unittest.TestCase): ...@@ -44,7 +44,7 @@ class TestVarAPI(unittest.TestCase):
def static(self): def static(self):
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.shape, self.dtype) x = paddle.fluid.data('X', self.shape, self.dtype)
out = paddle.var(x, self.axis, self.unbiased, self.keepdim) out = paddle.var(x, self.axis, self.unbiased, self.keepdim)
exe = paddle.static.Executor(self.place) exe = paddle.static.Executor(self.place)
res = exe.run(feed={'X': self.x}, fetch_list=[out]) res = exe.run(feed={'X': self.x}, fetch_list=[out])
...@@ -111,7 +111,7 @@ class TestVarAPI_alias(unittest.TestCase): ...@@ -111,7 +111,7 @@ class TestVarAPI_alias(unittest.TestCase):
class TestVarError(unittest.TestCase): class TestVarError(unittest.TestCase):
def test_error(self): def test_error(self):
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [2, 3, 4], 'int32') x = paddle.fluid.data('X', [2, 3, 4], 'int32')
self.assertRaises(TypeError, paddle.var, x) self.assertRaises(TypeError, paddle.var, x)
......
...@@ -25,7 +25,7 @@ from paddle.fluid import core, Program, program_guard ...@@ -25,7 +25,7 @@ from paddle.fluid import core, Program, program_guard
class TestZerosLikeAPIError(unittest.TestCase): class TestZerosLikeAPIError(unittest.TestCase):
def test_errors(self): def test_errors(self):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
x = paddle.data('x', [3, 4]) x = paddle.fluid.data('x', [3, 4])
self.assertRaises(TypeError, zeros_like, x, 'int8') self.assertRaises(TypeError, zeros_like, x, 'int8')
...@@ -35,7 +35,7 @@ class TestZerosLikeAPI(unittest.TestCase): ...@@ -35,7 +35,7 @@ class TestZerosLikeAPI(unittest.TestCase):
startup_program = Program() startup_program = Program()
train_program = Program() train_program = Program()
with program_guard(train_program, startup_program): with program_guard(train_program, startup_program):
x = paddle.data('X', shape) x = paddle.fluid.data('X', shape)
# 'bool', 'float32', 'float64', 'int32', 'int64' # 'bool', 'float32', 'float64', 'int32', 'int64'
out1 = zeros_like(x) out1 = zeros_like(x)
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
# TODO: import framework api under this directory # TODO: import framework api under this directory
__all__ = [ __all__ = [
'create_global_var', 'create_parameter', 'ParamAttr', 'Variable', 'create_parameter', 'ParamAttr',
'CPUPlace', 'CUDAPlace', 'CUDAPinnedPlace', 'get_default_dtype', 'CPUPlace', 'CUDAPlace', 'CUDAPinnedPlace', 'get_default_dtype',
'set_default_dtype' 'set_default_dtype'
] ]
...@@ -29,10 +29,9 @@ from .random import manual_seed ...@@ -29,10 +29,9 @@ from .random import manual_seed
from .framework import get_default_dtype from .framework import get_default_dtype
from .framework import set_default_dtype from .framework import set_default_dtype
from ..fluid.framework import Variable #DEFINE_ALIAS
from ..fluid.framework import ComplexVariable #DEFINE_ALIAS from ..fluid.framework import ComplexVariable #DEFINE_ALIAS
from ..fluid.param_attr import ParamAttr #DEFINE_ALIAS from ..fluid.param_attr import ParamAttr #DEFINE_ALIAS
from ..fluid.layers.tensor import create_global_var #DEFINE_ALIAS # from ..fluid.layers.tensor import create_global_var #DEFINE_ALIAS
from ..fluid.layers.tensor import create_parameter #DEFINE_ALIAS from ..fluid.layers.tensor import create_parameter #DEFINE_ALIAS
from ..fluid.core import CPUPlace #DEFINE_ALIAS from ..fluid.core import CPUPlace #DEFINE_ALIAS
from ..fluid.core import CUDAPlace #DEFINE_ALIAS from ..fluid.core import CUDAPlace #DEFINE_ALIAS
......
...@@ -37,10 +37,10 @@ from .clip import ClipGradByValue #DEFINE_ALIAS ...@@ -37,10 +37,10 @@ from .clip import ClipGradByValue #DEFINE_ALIAS
# from .clip import set_gradient_clip #DEFINE_ALIAS # from .clip import set_gradient_clip #DEFINE_ALIAS
from .clip import clip #DEFINE_ALIAS from .clip import clip #DEFINE_ALIAS
from .clip import clip_by_norm #DEFINE_ALIAS from .clip import clip_by_norm #DEFINE_ALIAS
from .control_flow import cond #DEFINE_ALIAS # from .control_flow import cond #DEFINE_ALIAS
# from .control_flow import DynamicRNN #DEFINE_ALIAS # from .control_flow import DynamicRNN #DEFINE_ALIAS
# from .control_flow import StaticRNN #DEFINE_ALIAS # from .control_flow import StaticRNN #DEFINE_ALIAS
from .control_flow import while_loop #DEFINE_ALIAS # from .control_flow import while_loop #DEFINE_ALIAS
# from .control_flow import rnn #DEFINE_ALIAS # from .control_flow import rnn #DEFINE_ALIAS
# from .decode import BeamSearchDecoder #DEFINE_ALIAS # from .decode import BeamSearchDecoder #DEFINE_ALIAS
# from .decode import Decoder #DEFINE_ALIAS # from .decode import Decoder #DEFINE_ALIAS
...@@ -49,7 +49,7 @@ from .control_flow import while_loop #DEFINE_ALIAS ...@@ -49,7 +49,7 @@ from .control_flow import while_loop #DEFINE_ALIAS
# from .decode import crf_decoding #DEFINE_ALIAS # from .decode import crf_decoding #DEFINE_ALIAS
# from .decode import ctc_greedy_decoder #DEFINE_ALIAS # from .decode import ctc_greedy_decoder #DEFINE_ALIAS
# from .decode import dynamic_decode #DEFINE_ALIAS # from .decode import dynamic_decode #DEFINE_ALIAS
from .decode import gather_tree #DEFINE_ALIAS # from .decode import gather_tree #DEFINE_ALIAS
# from .input import Input #DEFINE_ALIAS # from .input import Input #DEFINE_ALIAS
from .layer.activation import ELU #DEFINE_ALIAS from .layer.activation import ELU #DEFINE_ALIAS
from .layer.activation import GELU #DEFINE_ALIAS from .layer.activation import GELU #DEFINE_ALIAS
...@@ -74,9 +74,6 @@ from .layer.activation import Tanhshrink #DEFINE_ALIAS ...@@ -74,9 +74,6 @@ from .layer.activation import Tanhshrink #DEFINE_ALIAS
from .layer.activation import ThresholdedReLU #DEFINE_ALIAS from .layer.activation import ThresholdedReLU #DEFINE_ALIAS
from .layer.activation import LogSoftmax #DEFINE_ALIAS from .layer.activation import LogSoftmax #DEFINE_ALIAS
from .layer.activation import Maxout #DEFINE_ALIAS from .layer.activation import Maxout #DEFINE_ALIAS
from .layer.common import BilinearTensorProduct #DEFINE_ALIAS
from .layer.common import Pool2D #DEFINE_ALIAS
from .layer.common import Pad2D #DEFINE_ALIAS
from .layer.common import ReflectionPad1d #DEFINE_ALIAS from .layer.common import ReflectionPad1d #DEFINE_ALIAS
from .layer.common import ReplicationPad1d #DEFINE_ALIAS from .layer.common import ReplicationPad1d #DEFINE_ALIAS
from .layer.common import ConstantPad1d #DEFINE_ALIAS from .layer.common import ConstantPad1d #DEFINE_ALIAS
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define the control flow api
from ..fluid.layers import cond #DEFINE_ALIAS
from ..fluid.layers import while_loop #DEFINE_ALIAS
__all__ = [
'cond',
# 'DynamicRNN',
# 'StaticRNN',
'while_loop',
# 'rnn'
]
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define api to implement decoding algorithm
# from ..fluid.layers import beam_search #DEFINE_ALIAS
# from ..fluid.layers import beam_search_decode #DEFINE_ALIAS
from ..fluid.layers import gather_tree #DEFINE_ALIAS
__all__ = [
# 'BeamSearchDecoder',
# 'Decoder',
# 'beam_search',
# 'beam_search_decode',
# 'crf_decoding',
# 'ctc_greedy_decoder',
# 'dynamic_decode',
'gather_tree'
]
...@@ -30,7 +30,7 @@ __all__ += pooling.__all__ ...@@ -30,7 +30,7 @@ __all__ += pooling.__all__
from . import loss from . import loss
__all__ += loss.__all__ __all__ += loss.__all__
from .activation import elu #DEFINE_ALIAS from .activation import elu #DEFINE_ALIAS
from .activation import erf #DEFINE_ALIAS # from .activation import erf #DEFINE_ALIAS
from .activation import gelu #DEFINE_ALIAS from .activation import gelu #DEFINE_ALIAS
from .activation import hardshrink #DEFINE_ALIAS from .activation import hardshrink #DEFINE_ALIAS
from .activation import hardtanh #DEFINE_ALIAS from .activation import hardtanh #DEFINE_ALIAS
...@@ -44,7 +44,7 @@ from .activation import relu #DEFINE_ALIAS ...@@ -44,7 +44,7 @@ from .activation import relu #DEFINE_ALIAS
from .activation import relu6 #DEFINE_ALIAS from .activation import relu6 #DEFINE_ALIAS
from .activation import selu #DEFINE_ALIAS from .activation import selu #DEFINE_ALIAS
from .activation import sigmoid #DEFINE_ALIAS from .activation import sigmoid #DEFINE_ALIAS
from .activation import soft_relu #DEFINE_ALIAS # from .activation import soft_relu #DEFINE_ALIAS
from .activation import softmax #DEFINE_ALIAS from .activation import softmax #DEFINE_ALIAS
from .activation import softplus #DEFINE_ALIAS from .activation import softplus #DEFINE_ALIAS
from .activation import softshrink #DEFINE_ALIAS from .activation import softshrink #DEFINE_ALIAS
...@@ -61,10 +61,10 @@ from .common import alpha_dropout #DEFINE_ALIAS ...@@ -61,10 +61,10 @@ from .common import alpha_dropout #DEFINE_ALIAS
# from .common import embedding #DEFINE_ALIAS # from .common import embedding #DEFINE_ALIAS
# from .common import fc #DEFINE_ALIAS # from .common import fc #DEFINE_ALIAS
from .common import label_smooth from .common import label_smooth
from .common import one_hot #DEFINE_ALIAS # from .common import one_hot #DEFINE_ALIAS
from .common import pad #DEFINE_ALIAS from .common import pad #DEFINE_ALIAS
from .common import pad_constant_like #DEFINE_ALIAS # from .common import pad_constant_like #DEFINE_ALIAS
from .common import pad2d #DEFINE_ALIAS # from .common import pad2d #DEFINE_ALIAS
from .common import cosine_similarity #DEFINE_ALIAS from .common import cosine_similarity #DEFINE_ALIAS
from .common import unfold #DEFINE_ALIAS from .common import unfold #DEFINE_ALIAS
# from .common import bilinear_tensor_product #DEFINE_ALIAS # from .common import bilinear_tensor_product #DEFINE_ALIAS
...@@ -79,21 +79,21 @@ from .conv import conv2d #DEFINE_ALIAS ...@@ -79,21 +79,21 @@ from .conv import conv2d #DEFINE_ALIAS
from .conv import conv_transpose2d #DEFINE_ALIAS from .conv import conv_transpose2d #DEFINE_ALIAS
from .conv import conv3d #DEFINE_ALIAS from .conv import conv3d #DEFINE_ALIAS
from .conv import conv_transpose3d #DEFINE_ALIAS from .conv import conv_transpose3d #DEFINE_ALIAS
from .extension import add_position_encoding #DEFINE_ALIAS # from .extension import add_position_encoding #DEFINE_ALIAS
# from .extension import autoincreased_step_counter #DEFINE_ALIAS # from .extension import autoincreased_step_counter #DEFINE_ALIAS
from .extension import continuous_value_model #DEFINE_ALIAS # from .extension import continuous_value_model #DEFINE_ALIAS
from .extension import filter_by_instag #DEFINE_ALIAS # from .extension import filter_by_instag #DEFINE_ALIAS
# from .extension import linear_chain_crf #DEFINE_ALIAS # from .extension import linear_chain_crf #DEFINE_ALIAS
# from .extension import merge_selected_rows #DEFINE_ALIAS # from .extension import merge_selected_rows #DEFINE_ALIAS
from .extension import multiclass_nms #DEFINE_ALIAS # from .extension import multiclass_nms #DEFINE_ALIAS
from .extension import polygon_box_transform #DEFINE_ALIAS # from .extension import polygon_box_transform #DEFINE_ALIAS
from .extension import random_crop #DEFINE_ALIAS # from .extension import random_crop #DEFINE_ALIAS
from .extension import row_conv #DEFINE_ALIAS # from .extension import row_conv #DEFINE_ALIAS
from .extension import rpn_target_assign #DEFINE_ALIAS # from .extension import rpn_target_assign #DEFINE_ALIAS
from .extension import similarity_focus #DEFINE_ALIAS # from .extension import similarity_focus #DEFINE_ALIAS
from .extension import target_assign #DEFINE_ALIAS # from .extension import target_assign #DEFINE_ALIAS
from .extension import temporal_shift #DEFINE_ALIAS # from .extension import temporal_shift #DEFINE_ALIAS
from .extension import warpctc #DEFINE_ALIAS # from .extension import warpctc #DEFINE_ALIAS
from .extension import diag_embed #DEFINE_ALIAS from .extension import diag_embed #DEFINE_ALIAS
# from .lod import sequence_concat #DEFINE_ALIAS # from .lod import sequence_concat #DEFINE_ALIAS
# from .lod import sequence_conv #DEFINE_ALIAS # from .lod import sequence_conv #DEFINE_ALIAS
...@@ -115,7 +115,7 @@ from .extension import diag_embed #DEFINE_ALIAS ...@@ -115,7 +115,7 @@ from .extension import diag_embed #DEFINE_ALIAS
# from .lod import array_read #DEFINE_ALIAS # from .lod import array_read #DEFINE_ALIAS
# from .lod import array_write #DEFINE_ALIAS # from .lod import array_write #DEFINE_ALIAS
# from .lod import create_array #DEFINE_ALIAS # from .lod import create_array #DEFINE_ALIAS
from .lod import hash #DEFINE_ALIAS # from .lod import hash #DEFINE_ALIAS
# from .lod import im2sequence #DEFINE_ALIAS # from .lod import im2sequence #DEFINE_ALIAS
# from .lod import lod_append #DEFINE_ALIAS # from .lod import lod_append #DEFINE_ALIAS
# from .lod import lod_reset #DEFINE_ALIAS # from .lod import lod_reset #DEFINE_ALIAS
...@@ -126,11 +126,10 @@ from .lod import hash #DEFINE_ALIAS ...@@ -126,11 +126,10 @@ from .lod import hash #DEFINE_ALIAS
# from .lod import dynamic_lstmp #DEFINE_ALIAS # from .lod import dynamic_lstmp #DEFINE_ALIAS
from .loss import binary_cross_entropy #DEFINE_ALIAS from .loss import binary_cross_entropy #DEFINE_ALIAS
from .loss import binary_cross_entropy_with_logits #DEFINE_ALIAS from .loss import binary_cross_entropy_with_logits #DEFINE_ALIAS
from .loss import bpr_loss #DEFINE_ALIAS # from .loss import bpr_loss #DEFINE_ALIAS
from .loss import center_loss #DEFINE_ALIAS # from .loss import center_loss #DEFINE_ALIAS
from .loss import cross_entropy #DEFINE_ALIAS from .loss import cross_entropy #DEFINE_ALIAS
from .loss import dice_loss #DEFINE_ALIAS from .loss import dice_loss #DEFINE_ALIAS
from .loss import edit_distance #DEFINE_ALIAS
from .loss import hsigmoid_loss #DEFINE_ALIAS from .loss import hsigmoid_loss #DEFINE_ALIAS
from .loss import iou_similarity #DEFINE_ALIAS from .loss import iou_similarity #DEFINE_ALIAS
from .loss import kl_div #DEFINE_ALIAS from .loss import kl_div #DEFINE_ALIAS
...@@ -141,15 +140,13 @@ from .loss import mse_loss #DEFINE_ALIAS ...@@ -141,15 +140,13 @@ from .loss import mse_loss #DEFINE_ALIAS
from .loss import nll_loss #DEFINE_ALIAS from .loss import nll_loss #DEFINE_ALIAS
# from .loss import nce #DEFINE_ALIAS # from .loss import nce #DEFINE_ALIAS
from .loss import npair_loss #DEFINE_ALIAS from .loss import npair_loss #DEFINE_ALIAS
from .loss import rank_loss #DEFINE_ALIAS
from .loss import sampled_softmax_with_cross_entropy #DEFINE_ALIAS
from .loss import sigmoid_focal_loss #DEFINE_ALIAS from .loss import sigmoid_focal_loss #DEFINE_ALIAS
from .loss import smooth_l1 #DEFINE_ALIAS # from .loss import smooth_l1 #DEFINE_ALIAS
from .loss import smooth_l1_loss #DEFINE_ALIAS from .loss import smooth_l1_loss #DEFINE_ALIAS
from .loss import softmax_with_cross_entropy #DEFINE_ALIAS from .loss import softmax_with_cross_entropy #DEFINE_ALIAS
from .loss import square_error_cost #DEFINE_ALIAS from .loss import square_error_cost #DEFINE_ALIAS
from .loss import ssd_loss #DEFINE_ALIAS from .loss import ssd_loss #DEFINE_ALIAS
from .loss import teacher_student_sigmoid_loss #DEFINE_ALIAS # from .loss import teacher_student_sigmoid_loss #DEFINE_ALIAS
from .loss import ctc_loss #DEFINE_ALIAS from .loss import ctc_loss #DEFINE_ALIAS
# from .norm import data_norm #DEFINE_ALIAS # from .norm import data_norm #DEFINE_ALIAS
# from .norm import group_norm #DEFINE_ALIAS # from .norm import group_norm #DEFINE_ALIAS
...@@ -159,8 +156,8 @@ from .norm import layer_norm #DEFINE_ALIAS ...@@ -159,8 +156,8 @@ from .norm import layer_norm #DEFINE_ALIAS
from .norm import local_response_norm #DEFINE_ALIAS from .norm import local_response_norm #DEFINE_ALIAS
from .norm import normalize #DEFINE_ALIAS from .norm import normalize #DEFINE_ALIAS
# from .norm import spectral_norm #DEFINE_ALIAS # from .norm import spectral_norm #DEFINE_ALIAS
from .pooling import pool2d #DEFINE_ALIAS # from .pooling import pool2d #DEFINE_ALIAS
from .pooling import pool3d #DEFINE_ALIAS # from .pooling import pool3d #DEFINE_ALIAS
from .pooling import avg_pool1d #DEFINE_ALIAS from .pooling import avg_pool1d #DEFINE_ALIAS
from .pooling import avg_pool2d #DEFINE_ALIAS from .pooling import avg_pool2d #DEFINE_ALIAS
from .pooling import avg_pool3d #DEFINE_ALIAS from .pooling import avg_pool3d #DEFINE_ALIAS
...@@ -175,43 +172,47 @@ from .pooling import adaptive_avg_pool1d #DEFINE_ALIAS ...@@ -175,43 +172,47 @@ from .pooling import adaptive_avg_pool1d #DEFINE_ALIAS
from .pooling import adaptive_avg_pool2d #DEFINE_ALIAS from .pooling import adaptive_avg_pool2d #DEFINE_ALIAS
from .pooling import adaptive_avg_pool3d #DEFINE_ALIAS from .pooling import adaptive_avg_pool3d #DEFINE_ALIAS
from .rnn import rnn #DEFINE_ALIAS # from .rnn import rnn #DEFINE_ALIAS
from .rnn import birnn #DEFINE_ALIAS # from .rnn import birnn #DEFINE_ALIAS
# from .rnn import gru_unit #DEFINE_ALIAS # from .rnn import gru_unit #DEFINE_ALIAS
# from .rnn import lstm #DEFINE_ALIAS # from .rnn import lstm #DEFINE_ALIAS
# from .rnn import lstm_unit #DEFINE_ALIAS # from .rnn import lstm_unit #DEFINE_ALIAS
from .vision import affine_channel #DEFINE_ALIAS # from .vision import affine_channel #DEFINE_ALIAS
from .vision import affine_grid #DEFINE_ALIAS from .vision import affine_grid #DEFINE_ALIAS
from .vision import anchor_generator #DEFINE_ALIAS # from .vision import anchor_generator #DEFINE_ALIAS
from .vision import bipartite_match #DEFINE_ALIAS # from .vision import bipartite_match #DEFINE_ALIAS
from .vision import box_clip #DEFINE_ALIAS # from .vision import box_clip #DEFINE_ALIAS
from .vision import box_coder #DEFINE_ALIAS # from .vision import box_coder #DEFINE_ALIAS
from .vision import box_decoder_and_assign #DEFINE_ALIAS # from .vision import box_decoder_and_assign #DEFINE_ALIAS
from .vision import collect_fpn_proposals #DEFINE_ALIAS # from .vision import collect_fpn_proposals #DEFINE_ALIAS
# from .vision import deformable_conv #DEFINE_ALIAS # from .vision import deformable_conv #DEFINE_ALIAS
from .vision import deformable_roi_pooling #DEFINE_ALIAS # from .vision import deformable_roi_pooling #DEFINE_ALIAS
from .vision import density_prior_box #DEFINE_ALIAS # from .vision import density_prior_box #DEFINE_ALIAS
from .vision import detection_output #DEFINE_ALIAS # from .vision import detection_output #DEFINE_ALIAS
from .vision import distribute_fpn_proposals #DEFINE_ALIAS # from .vision import distribute_fpn_proposals #DEFINE_ALIAS
from .vision import fsp_matrix #DEFINE_ALIAS # from .vision import fsp_matrix #DEFINE_ALIAS
from .vision import generate_mask_labels #DEFINE_ALIAS # from .vision import generate_mask_labels #DEFINE_ALIAS
from .vision import generate_proposal_labels #DEFINE_ALIAS # from .vision import generate_proposal_labels #DEFINE_ALIAS
from .vision import generate_proposals #DEFINE_ALIAS # from .vision import generate_proposals #DEFINE_ALIAS
from .vision import grid_sample #DEFINE_ALIAS from .vision import grid_sample #DEFINE_ALIAS
from .vision import image_resize_short #DEFINE_ALIAS # from .vision import image_resize #DEFINE_ALIAS
# from .vision import image_resize_short #DEFINE_ALIAS
# from .vision import multi_box_head #DEFINE_ALIAS # from .vision import multi_box_head #DEFINE_ALIAS
from .vision import pixel_shuffle #DEFINE_ALIAS from .vision import pixel_shuffle #DEFINE_ALIAS
from .vision import prior_box #DEFINE_ALIAS # from .vision import prior_box #DEFINE_ALIAS
from .vision import prroi_pool #DEFINE_ALIAS # from .vision import prroi_pool #DEFINE_ALIAS
from .vision import psroi_pool #DEFINE_ALIAS # from .vision import psroi_pool #DEFINE_ALIAS
from .vision import retinanet_detection_output #DEFINE_ALIAS # from .vision import resize_bilinear #DEFINE_ALIAS
from .vision import retinanet_target_assign #DEFINE_ALIAS # from .vision import resize_nearest #DEFINE_ALIAS
from .vision import roi_align #DEFINE_ALIAS # from .vision import resize_trilinear #DEFINE_ALIAS
from .vision import roi_perspective_transform #DEFINE_ALIAS # from .vision import retinanet_detection_output #DEFINE_ALIAS
from .vision import roi_pool #DEFINE_ALIAS # from .vision import retinanet_target_assign #DEFINE_ALIAS
from .vision import shuffle_channel #DEFINE_ALIAS # from .vision import roi_align #DEFINE_ALIAS
from .vision import space_to_depth #DEFINE_ALIAS # from .vision import roi_perspective_transform #DEFINE_ALIAS
from .vision import yolo_box #DEFINE_ALIAS # from .vision import roi_pool #DEFINE_ALIAS
from .vision import yolov3_loss #DEFINE_ALIAS # from .vision import shuffle_channel #DEFINE_ALIAS
# from .vision import space_to_depth #DEFINE_ALIAS
# from .vision import yolo_box #DEFINE_ALIAS
# from .vision import yolov3_loss #DEFINE_ALIAS
from .input import one_hot #DEFINE_ALIAS from .input import one_hot #DEFINE_ALIAS
from .input import embedding #DEFINE_ALIAS from .input import embedding #DEFINE_ALIAS
...@@ -13,14 +13,18 @@ ...@@ -13,14 +13,18 @@
# limitations under the License. # limitations under the License.
# TODO: define activation functions of neural network # TODO: define activation functions of neural network
from ...fluid.layers import erf #DEFINE_ALIAS from ...fluid.layers import brelu #DEFINE_ALIAS
from ...fluid.layers import soft_relu #DEFINE_ALIAS # from ...fluid.layers import erf #DEFINE_ALIAS
from ...fluid.layers import hard_sigmoid #DEFINE_ALIAS
from ...fluid.layers import hard_swish #DEFINE_ALIAS
from ...fluid.layers import maxout #DEFINE_ALIAS
# from ...fluid.layers import soft_relu #DEFINE_ALIAS
from ...fluid.layers import swish #DEFINE_ALIAS
from ...fluid.layers import sigmoid #DEFINE_ALIAS from ...fluid.layers import sigmoid #DEFINE_ALIAS
from ...tensor.math import tanh #DEFINE_ALIAS from ...tensor.math import tanh #DEFINE_ALIAS
__all__ = [ __all__ = [
'elu', 'elu',
'erf',
'gelu', 'gelu',
'hardshrink', 'hardshrink',
'hardtanh', 'hardtanh',
...@@ -33,7 +37,6 @@ __all__ = [ ...@@ -33,7 +37,6 @@ __all__ = [
'relu', 'relu',
'relu6', 'relu6',
'selu', 'selu',
'soft_relu',
'softmax', 'softmax',
'softplus', 'softplus',
'softshrink', 'softshrink',
......
...@@ -20,13 +20,12 @@ from paddle.fluid.layers.tensor import Variable, fill_constant, zeros, concat ...@@ -20,13 +20,12 @@ from paddle.fluid.layers.tensor import Variable, fill_constant, zeros, concat
from ...fluid.layers import core from ...fluid.layers import core
from ...fluid import dygraph_utils from ...fluid import dygraph_utils
# TODO: define the common functions to build a neural network # TODO: define the common functions to build a neural network
from ...fluid import one_hot #DEFINE_ALIAS # from ...fluid import one_hot #DEFINE_ALIAS
from ...fluid.layers import pad2d #DEFINE_ALIAS # from ...fluid.layers import pad2d #DEFINE_ALIAS
from ...fluid.layers import unfold #DEFINE_ALIAS from ...fluid.layers import unfold #DEFINE_ALIAS
from ...fluid.layers import assign #DEFINE_ALIAS from ...fluid.layers import assign #DEFINE_ALIAS
from ...fluid.layers import squeeze #DEFINE_ALIAS from ...fluid.layers import squeeze #DEFINE_ALIAS
from ...fluid.layers import unsqueeze #DEFINE_ALIAS from ...fluid.layers import unsqueeze #DEFINE_ALIAS
from ...fluid.layers import elementwise_mul #DEFINE_ALIAS
from ...tensor import clip from ...tensor import clip
from ...tensor import sum from ...tensor import sum
from ...tensor import sqrt from ...tensor import sqrt
...@@ -36,7 +35,7 @@ from ...fluid.data_feeder import check_variable_and_dtype, check_dtype ...@@ -36,7 +35,7 @@ from ...fluid.data_feeder import check_variable_and_dtype, check_dtype
from ...fluid.framework import Variable, in_dygraph_mode, _varbase_creator from ...fluid.framework import Variable, in_dygraph_mode, _varbase_creator
#from ...fluid.layers import fc #DEFINE_ALIAS #from ...fluid.layers import fc #DEFINE_ALIAS
from ...fluid.layers import pad_constant_like #DEFINE_ALIAS # from ...fluid.layers import pad_constant_like #DEFINE_ALIAS
from ...fluid.framework import in_dygraph_mode from ...fluid.framework import in_dygraph_mode
from ...fluid import core, dygraph_utils from ...fluid import core, dygraph_utils
from ...fluid import core, layers from ...fluid import core, layers
...@@ -51,10 +50,7 @@ __all__ = [ ...@@ -51,10 +50,7 @@ __all__ = [
# 'fc', # 'fc',
'label_smooth', 'label_smooth',
'linear', 'linear',
'one_hot',
'pad', 'pad',
'pad_constant_like',
'pad2d',
'unfold', 'unfold',
# 'bilinear_tensor_product', # 'bilinear_tensor_product',
'assign', 'assign',
...@@ -1395,9 +1391,9 @@ def cosine_similarity(x1, x2, axis=1, eps=1e-8): ...@@ -1395,9 +1391,9 @@ def cosine_similarity(x1, x2, axis=1, eps=1e-8):
# [0.99806249 0.9817672 0.94987036] # [0.99806249 0.9817672 0.94987036]
""" """
w12 = sum(elementwise_mul(x1, x2), axis=axis) w12 = sum(paddle.multiply(x1, x2), axis=axis)
w1 = sum(elementwise_mul(x1, x1), axis=axis) w1 = sum(paddle.multiply(x1, x1), axis=axis)
w2 = sum(elementwise_mul(x2, x2), axis=axis) w2 = sum(paddle.multiply(x2, x2), axis=axis)
n12 = sqrt(clip(w1 * w2, min=eps * eps)) n12 = sqrt(clip(w1 * w2, min=eps * eps))
cos_sim = w12 / n12 cos_sim = w12 / n12
return cos_sim return cos_sim
......
...@@ -13,36 +13,10 @@ ...@@ -13,36 +13,10 @@
# limitations under the License. # limitations under the License.
# TODO: define the extention functions # TODO: define the extention functions
from ...fluid.layers import add_position_encoding #DEFINE_ALIAS
from ...fluid.layers import multiclass_nms #DEFINE_ALIAS
from ...fluid.layers import target_assign #DEFINE_ALIAS
from ...fluid.layers import temporal_shift #DEFINE_ALIAS
from ...fluid.layers import continuous_value_model #DEFINE_ALIAS
from ...fluid.layers import filter_by_instag #DEFINE_ALIAS
from ...fluid.layers import polygon_box_transform #DEFINE_ALIAS
from ...fluid.layers import random_crop #DEFINE_ALIAS
from ...fluid.layers import rpn_target_assign #DEFINE_ALIAS
from ...fluid.layers import similarity_focus #DEFINE_ALIAS
from ...fluid.layers import warpctc #DEFINE_ALIAS
__all__ = [ __all__ = [
'add_position_encoding', 'diag_embed',
# 'autoincreased_step_counter', 'row_conv'
'continuous_value_model',
'filter_by_instag',
# 'linear_chain_crf',
# 'merge_selected_rows',
'multiclass_nms',
'polygon_box_transform',
'random_crop',
'row_conv',
'rpn_target_assign',
'similarity_focus',
'target_assign',
'temporal_shift',
'warpctc',
'diag_embed'
] ]
import numpy as np import numpy as np
...@@ -176,8 +150,6 @@ def diag_embed(input, offset=0, dim1=-2, dim2=-1): ...@@ -176,8 +150,6 @@ def diag_embed(input, offset=0, dim1=-2, dim2=-1):
@templatedoc() @templatedoc()
def row_conv(input, weight, act=None): def row_conv(input, weight, act=None):
""" """
:alias_main: paddle.nn.functional.row_conv
:alias: paddle.nn.functional.row_conv,paddle.nn.functional.extension.row_conv
${comment} ${comment}
...@@ -217,7 +189,7 @@ def row_conv(input, weight, act=None): ...@@ -217,7 +189,7 @@ def row_conv(input, weight, act=None):
with dg.guard(place): with dg.guard(place):
x_var = dg.to_variable(x) x_var = dg.to_variable(x)
w_var = dg.to_variable(weight) w_var = dg.to_variable(weight)
y_var = F.row_conv(x_var, w_var) y_var = F.extension.row_conv(x_var, w_var)
y_np = y_var.numpy() y_np = y_var.numpy()
print(y_np.shape) print(y_np.shape)
......
...@@ -74,7 +74,7 @@ def one_hot(x, num_classes, name=None): ...@@ -74,7 +74,7 @@ def one_hot(x, num_classes, name=None):
import paddle import paddle
# Correspond to the first example above, where label.shape is 4 and one_hot_label.shape is [4, 4]. # Correspond to the first example above, where label.shape is 4 and one_hot_label.shape is [4, 4].
label = paddle.data(name="label", shape=[4, 1], dtype="int64") label = paddle.fluid.data(name="label", shape=[4, 1], dtype="int64")
# label.shape = [4] # label.shape = [4]
# label.data = [1, 1, 3, 0] # label.data = [1, 1, 3, 0]
one_hot_label = paddle.nn.functional.one_hot(x=label, num_classes=4) one_hot_label = paddle.nn.functional.one_hot(x=label, num_classes=4)
...@@ -183,7 +183,7 @@ def embedding(x, weight, padding_idx=None, sparse=False, name=None): ...@@ -183,7 +183,7 @@ def embedding(x, weight, padding_idx=None, sparse=False, name=None):
weight = prog.global_block().create_parameter( weight = prog.global_block().create_parameter(
(128, 100), dtype="float32", default_initializer=Constant(1.0)) (128, 100), dtype="float32", default_initializer=Constant(1.0))
label = paddle.data( label = paddle.fluid.data(
name="label", name="label",
shape=[4], shape=[4],
append_batch_size=False, append_batch_size=False,
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define functions which accept only LoDTensor as input
from ...fluid.layers import hash #DEFINE_ALIAS
__all__ = [
# 'sequence_concat',
# 'sequence_conv',
# 'sequence_enumerate',
# 'sequence_expand_as',
# 'sequence_expand',
# 'sequence_first_step',
# 'sequence_last_step',
# 'sequence_mask',
# 'sequence_pad',
# 'sequence_pool',
# 'sequence_reshape',
# 'sequence_reverse',
# 'sequence_scatter',
# 'sequence_slice',
# 'sequence_softmax',
# 'sequence_unpad',
# 'array_length',
# 'array_read',
# 'array_write',
# 'create_array',
'hash',
# 'im2sequence',
# 'lod_append',
# 'lod_reset',
# 'reorder_lod_tensor_by_rank',
# 'tensor_array_to_tensor',
# 'dynamic_gru',
# 'dynamic_lstm',
# 'dynamic_lstmp'
]
...@@ -23,19 +23,14 @@ import paddle ...@@ -23,19 +23,14 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from ...fluid.framework import core, in_dygraph_mode from ...fluid.framework import core, in_dygraph_mode
from ...fluid.layers.nn import _elementwise_op_in_dygraph from ...fluid.layers.nn import _elementwise_op_in_dygraph
from ...fluid.layers import bpr_loss #DEFINE_ALIAS
from ...fluid.layers import center_loss #DEFINE_ALIAS
from ...fluid.layers import dice_loss #DEFINE_ALIAS from ...fluid.layers import dice_loss #DEFINE_ALIAS
from ...fluid.layers import iou_similarity #DEFINE_ALIAS from ...fluid.layers import iou_similarity #DEFINE_ALIAS
from ...fluid.layers import log_loss #DEFINE_ALIAS from ...fluid.layers import log_loss #DEFINE_ALIAS
from ...fluid.layers import npair_loss #DEFINE_ALIAS from ...fluid.layers import npair_loss #DEFINE_ALIAS
from ...fluid.layers import rank_loss #DEFINE_ALIAS
from ...fluid.layers import reshape from ...fluid.layers import reshape
from ...fluid.layers import smooth_l1 #DEFINE_ALIAS
from ...fluid.layers import softmax_with_cross_entropy #DEFINE_ALIAS from ...fluid.layers import softmax_with_cross_entropy #DEFINE_ALIAS
from ...fluid.layers import square_error_cost #DEFINE_ALIAS from ...fluid.layers import square_error_cost #DEFINE_ALIAS
from ...fluid.layers import ssd_loss #DEFINE_ALIAS from ...fluid.layers import ssd_loss #DEFINE_ALIAS
from ...fluid.layers import teacher_student_sigmoid_loss #DEFINE_ALIAS
from ...fluid.layers import edit_distance #DEFINE_ALIAS from ...fluid.layers import edit_distance #DEFINE_ALIAS
from ...fluid.layers import sampled_softmax_with_cross_entropy #DEFINE_ALIAS from ...fluid.layers import sampled_softmax_with_cross_entropy #DEFINE_ALIAS
...@@ -48,11 +43,8 @@ from ...fluid.framework import Variable ...@@ -48,11 +43,8 @@ from ...fluid.framework import Variable
__all__ = [ __all__ = [
'binary_cross_entropy', 'binary_cross_entropy',
'binary_cross_entropy_with_logits', 'binary_cross_entropy_with_logits',
'bpr_loss',
'center_loss',
'cross_entropy', 'cross_entropy',
'dice_loss', 'dice_loss',
'edit_distance',
'hsigmoid_loss', 'hsigmoid_loss',
'iou_similarity', 'iou_similarity',
'kl_div', 'kl_div',
...@@ -63,15 +55,11 @@ __all__ = [ ...@@ -63,15 +55,11 @@ __all__ = [
# 'nce', # 'nce',
'nll_loss', 'nll_loss',
'npair_loss', 'npair_loss',
'rank_loss',
'sampled_softmax_with_cross_entropy',
'sigmoid_focal_loss', 'sigmoid_focal_loss',
'smooth_l1',
'smooth_l1_loss', 'smooth_l1_loss',
'softmax_with_cross_entropy', 'softmax_with_cross_entropy',
'square_error_cost', 'square_error_cost',
'ssd_loss', 'ssd_loss',
'teacher_student_sigmoid_loss',
'ctc_loss', 'ctc_loss',
] ]
...@@ -179,7 +167,7 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean', ...@@ -179,7 +167,7 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean',
outputs={'Out': [out]}) outputs={'Out': [out]})
if weight is not None: if weight is not None:
if isinstance(weight, paddle.framework.Variable): if isinstance(weight, paddle.static.Variable):
weight_name = name if reduction is 'none' else None weight_name = name if reduction is 'none' else None
out = paddle.multiply(out, weight, axis=-1, name=weight_name) out = paddle.multiply(out, weight, axis=-1, name=weight_name)
else: else:
...@@ -317,13 +305,13 @@ def binary_cross_entropy_with_logits(logit, ...@@ -317,13 +305,13 @@ def binary_cross_entropy_with_logits(logit,
out = paddle.fluid.layers.sigmoid_cross_entropy_with_logits( out = paddle.fluid.layers.sigmoid_cross_entropy_with_logits(
logit, label, name=sigmoid_name) logit, label, name=sigmoid_name)
one = paddle.fill_constant(shape=[1], value=1.0, dtype=logit.dtype) one = paddle.fluid.layers.fill_constant(shape=[1], value=1.0, dtype=logit.dtype)
if pos_weight is not None: if pos_weight is not None:
fluid.data_feeder.check_variable_and_dtype( fluid.data_feeder.check_variable_and_dtype(
pos_weight, 'pos_weight', ['float32', 'float64'], pos_weight, 'pos_weight', ['float32', 'float64'],
'binary_cross_entropy_with_logits') 'binary_cross_entropy_with_logits')
log_weight = paddle.add( log_weight = paddle.add(
paddle.multiply(label, paddle.elementwise_sub(pos_weight, one)), paddle.multiply(label, paddle.fluid.layers.elementwise_sub(pos_weight, one)),
one) one)
pos_weight_name = name if reduction == 'none' and weight is None else None pos_weight_name = name if reduction == 'none' and weight is None else None
out = paddle.multiply(out, log_weight, name=pos_weight_name) out = paddle.multiply(out, log_weight, name=pos_weight_name)
...@@ -625,12 +613,12 @@ def margin_ranking_loss(input, ...@@ -625,12 +613,12 @@ def margin_ranking_loss(input,
fluid.data_feeder.check_variable_and_dtype( fluid.data_feeder.check_variable_and_dtype(
label, 'label', ['float32', 'float64'], 'margin_rank_loss') label, 'label', ['float32', 'float64'], 'margin_rank_loss')
out = paddle.elementwise_sub(other, input) out = paddle.fluid.layers.elementwise_sub(other, input)
out = paddle.multiply(out, label) out = paddle.multiply(out, label)
if margin != 0.0: if margin != 0.0:
margin_var = out.block.create_var(dtype=out.dtype) margin_var = out.block.create_var(dtype=out.dtype)
paddle.fill_constant([1], out.dtype, margin, out=margin_var) paddle.fluid.layers.fill_constant([1], out.dtype, margin, out=margin_var)
out = paddle.add(out, margin_var) out = paddle.add(out, margin_var)
result_out = helper.create_variable_for_type_inference(input.dtype) result_out = helper.create_variable_for_type_inference(input.dtype)
...@@ -735,13 +723,13 @@ def l1_loss(input, label, reduction='mean', name=None): ...@@ -735,13 +723,13 @@ def l1_loss(input, label, reduction='mean', name=None):
label, 'label', ['float32', 'float64', 'int32', 'int64'], 'l1_loss') label, 'label', ['float32', 'float64', 'int32', 'int64'], 'l1_loss')
if reduction == 'sum': if reduction == 'sum':
unreduced = paddle.elementwise_sub(input, label, act='abs') unreduced = paddle.fluid.layers.elementwise_sub(input, label, act='abs')
return paddle.sum(unreduced, name=name) return paddle.sum(unreduced, name=name)
elif reduction == 'mean': elif reduction == 'mean':
unreduced = paddle.elementwise_sub(input, label, act='abs') unreduced = paddle.fluid.layers.elementwise_sub(input, label, act='abs')
return paddle.mean(unreduced, name=name) return paddle.mean(unreduced, name=name)
else: else:
return paddle.elementwise_sub(input, label, act='abs', name=name) return paddle.fluid.layers.elementwise_sub(input, label, act='abs', name=name)
def nll_loss(input, def nll_loss(input,
...@@ -1008,8 +996,8 @@ def mse_loss(input, label, reduction='mean', name=None): ...@@ -1008,8 +996,8 @@ def mse_loss(input, label, reduction='mean', name=None):
# static graph mode # static graph mode
paddle.enable_static() paddle.enable_static()
mse_loss = paddle.nn.loss.MSELoss() mse_loss = paddle.nn.loss.MSELoss()
input = paddle.data(name="input", shape=[1]) input = paddle.fluid.data(name="input", shape=[1])
label = paddle.data(name="label", shape=[1]) label = paddle.fluid.data(name="label", shape=[1])
place = paddle.CPUPlace() place = paddle.CPUPlace()
output = mse_loss(input,label) output = mse_loss(input,label)
...@@ -1354,7 +1342,7 @@ def sigmoid_focal_loss(logit, ...@@ -1354,7 +1342,7 @@ def sigmoid_focal_loss(logit,
label = paddle.to_tensor([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype='float32') label = paddle.to_tensor([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype='float32')
one = paddle.to_tensor([1.], dtype='float32') one = paddle.to_tensor([1.], dtype='float32')
fg_label = paddle.greater_equal(label, one) fg_label = paddle.greater_equal(label, one)
fg_num = paddle.reduce_sum(paddle.cast(fg_label, dtype='float32')) fg_num = paddle.fluid.layers.reduce_sum(paddle.cast(fg_label, dtype='float32'))
output = paddle.nn.functional.sigmoid_focal_loss(logit, label, normalizer=fg_num) output = paddle.nn.functional.sigmoid_focal_loss(logit, label, normalizer=fg_num)
print(output.numpy()) # [0.65782464] print(output.numpy()) # [0.65782464]
......
...@@ -109,8 +109,8 @@ def normalize(x, p=2, axis=1, epsilon=1e-12, name=None): ...@@ -109,8 +109,8 @@ def normalize(x, p=2, axis=1, epsilon=1e-12, name=None):
helper.append_op( helper.append_op(
type='p_norm', inputs={'X': x}, outputs={'Out': out}, attrs=attrs) type='p_norm', inputs={'X': x}, outputs={'Out': out}, attrs=attrs)
eps = out.block.create_var(dtype=out.dtype) eps = out.block.create_var(dtype=out.dtype)
paddle.fill_constant([1], out.dtype, epsilon, out=eps) paddle.fluid.layers.fill_constant([1], out.dtype, epsilon, out=eps)
return paddle.elementwise_div(x, paddle.maximum(out, eps), name=name) return paddle.fluid.layers.elementwise_div(x, paddle.maximum(out, eps), name=name)
def batch_norm(x, def batch_norm(x,
......
...@@ -13,16 +13,12 @@ ...@@ -13,16 +13,12 @@
# limitations under the License. # limitations under the License.
# TODO: define pooling functions # TODO: define pooling functions
from ...fluid.layers import pool2d #DEFINE_ALIAS
from ...fluid.layers import pool3d #DEFINE_ALIAS
from ...fluid import core from ...fluid import core
from ...fluid.framework import in_dygraph_mode from ...fluid.framework import in_dygraph_mode
from ...fluid.layers import utils, LayerHelper, unsqueeze, squeeze from ...fluid.layers import utils, LayerHelper, unsqueeze, squeeze
from ...fluid.data_feeder import check_type, check_variable_and_dtype from ...fluid.data_feeder import check_type, check_variable_and_dtype
__all__ = [ __all__ = [
'pool2d',
'pool3d',
'avg_pool1d', 'avg_pool1d',
'avg_pool2d', 'avg_pool2d',
'avg_pool3d', 'avg_pool3d',
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.fluid.layers.rnn import rnn, birnn
__all__ = ['rnn', 'birnn']
...@@ -20,71 +20,44 @@ from ...fluid import dygraph_utils ...@@ -20,71 +20,44 @@ from ...fluid import dygraph_utils
import numpy as np import numpy as np
# TODO: define specitial functions used in computer vision task # TODO: define specitial functions used in computer vision task
from ...fluid.layers import affine_channel #DEFINE_ALIAS # from ...fluid.layers import affine_channel #DEFINE_ALIAS
from ...fluid.layers import anchor_generator #DEFINE_ALIAS # from ...fluid.layers import anchor_generator #DEFINE_ALIAS
from ...fluid.layers import bipartite_match #DEFINE_ALIAS # from ...fluid.layers import bipartite_match #DEFINE_ALIAS
from ...fluid.layers import box_clip #DEFINE_ALIAS # from ...fluid.layers import box_clip #DEFINE_ALIAS
from ...fluid.layers import box_coder #DEFINE_ALIAS # from ...fluid.layers import box_coder #DEFINE_ALIAS
from ...fluid.layers import box_decoder_and_assign #DEFINE_ALIAS # from ...fluid.layers import box_decoder_and_assign #DEFINE_ALIAS
from ...fluid.layers import collect_fpn_proposals #DEFINE_ALIAS # from ...fluid.layers import collect_fpn_proposals #DEFINE_ALIAS
from ...fluid.layers import deformable_roi_pooling #DEFINE_ALIAS # from ...fluid.layers import deformable_roi_pooling #DEFINE_ALIAS
from ...fluid.layers import density_prior_box #DEFINE_ALIAS # from ...fluid.layers import density_prior_box #DEFINE_ALIAS
from ...fluid.layers import detection_output #DEFINE_ALIAS # from ...fluid.layers import detection_output #DEFINE_ALIAS
from ...fluid.layers import distribute_fpn_proposals #DEFINE_ALIAS # from ...fluid.layers import distribute_fpn_proposals #DEFINE_ALIAS
from ...fluid.layers import generate_mask_labels #DEFINE_ALIAS # from ...fluid.layers import generate_mask_labels #DEFINE_ALIAS
from ...fluid.layers import generate_proposal_labels #DEFINE_ALIAS # from ...fluid.layers import generate_proposal_labels #DEFINE_ALIAS
from ...fluid.layers import generate_proposals #DEFINE_ALIAS # from ...fluid.layers import generate_proposals #DEFINE_ALIAS
from ...fluid.layers import prior_box #DEFINE_ALIAS # from ...fluid.layers import image_resize #DEFINE_ALIAS
from ...fluid.layers import prroi_pool #DEFINE_ALIAS # from ...fluid.layers import prior_box #DEFINE_ALIAS
from ...fluid.layers import psroi_pool #DEFINE_ALIAS # from ...fluid.layers import prroi_pool #DEFINE_ALIAS
from ...fluid.layers import roi_align #DEFINE_ALIAS # from ...fluid.layers import psroi_pool #DEFINE_ALIAS
from ...fluid.layers import roi_pool #DEFINE_ALIAS # from ...fluid.layers import resize_bilinear #DEFINE_ALIAS
from ...fluid.layers import space_to_depth #DEFINE_ALIAS # from ...fluid.layers import resize_nearest #DEFINE_ALIAS
from ...fluid.layers import yolo_box #DEFINE_ALIAS # from ...fluid.layers import resize_trilinear #DEFINE_ALIAS
from ...fluid.layers import yolov3_loss #DEFINE_ALIAS # from ...fluid.layers import roi_align #DEFINE_ALIAS
# from ...fluid.layers import roi_pool #DEFINE_ALIAS
from ...fluid.layers import fsp_matrix #DEFINE_ALIAS # from ...fluid.layers import space_to_depth #DEFINE_ALIAS
from ...fluid.layers import image_resize_short #DEFINE_ALIAS # from ...fluid.layers import yolo_box #DEFINE_ALIAS
# from ...fluid.layers import yolov3_loss #DEFINE_ALIAS
# from ...fluid.layers import fsp_matrix #DEFINE_ALIAS
# from ...fluid.layers import image_resize_short #DEFINE_ALIAS
# from ...fluid.layers import pixel_shuffle #DEFINE_ALIAS # from ...fluid.layers import pixel_shuffle #DEFINE_ALIAS
from ...fluid.layers import retinanet_detection_output #DEFINE_ALIAS # from ...fluid.layers import retinanet_detection_output #DEFINE_ALIAS
from ...fluid.layers import retinanet_target_assign #DEFINE_ALIAS # from ...fluid.layers import retinanet_target_assign #DEFINE_ALIAS
from ...fluid.layers import roi_perspective_transform #DEFINE_ALIAS # from ...fluid.layers import roi_perspective_transform #DEFINE_ALIAS
from ...fluid.layers import shuffle_channel #DEFINE_ALIAS # from ...fluid.layers import shuffle_channel #DEFINE_ALIAS
__all__ = [ __all__ = [
'affine_channel',
'affine_grid', 'affine_grid',
'anchor_generator',
'bipartite_match',
'box_clip',
'box_coder',
'box_decoder_and_assign',
'collect_fpn_proposals',
# 'deformable_conv',
'deformable_roi_pooling',
'density_prior_box',
'detection_output',
'distribute_fpn_proposals',
'fsp_matrix',
'generate_mask_labels',
'generate_proposal_labels',
'generate_proposals',
'grid_sample', 'grid_sample',
'image_resize_short', 'pixel_shuffle'
# 'multi_box_head',
'pixel_shuffle',
'prior_box',
'prroi_pool',
'psroi_pool',
'retinanet_detection_output',
'retinanet_target_assign',
'roi_align',
'roi_perspective_transform',
'roi_pool',
'shuffle_channel',
'space_to_depth',
'yolo_box',
'yolov3_loss'
] ]
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册