未验证 提交 d05058d2 编写于 作者: C chentianyu03 提交者: GitHub

Remove and reorganize the alias of APIs (#27717)

* modify cond while_loop to paddle.static.nn.cond

* modify crop_tensor to paddle.crop

* modify Variable to paddle.static.Variable

* remove nn.beam_search, nn.beam_search_decode, nn.gather_tree

* remove bpr_loss, center_loss, rank_loss, smooth_l1, teacher_student_sigmoid_loss, edit_distance, sampled_softmax_with_cross_entropy in nn.functional

* remove apis in nn.functional.learn_rate.py

* remove pool2d, pool3d, adaptive_pool2d, adaptive_pool3d in nn.functional

* remove apis in nn.functional.vision

* remove erf, soft_relu in nn.functional.activation

* remove apis in nn.functional.extension

* remove nn.functional.rnn

* remove hash from nn.functional.lod

* remove row_conv from nn.functional.extension

* remove one_hot, pad2d, pad_constant_like from nn.functional.common

* remove nn.gather_tree, nn.BilinearTensorProduct, nn.Pool2D, nn.Pad2D

* remove apis from optimizer.__init

* remove tensor.creation.fill_constant

* remove elementwise_mul in nn.functional.common and  modify to paddle.multiply

* remove  tensor.stat.reduce_mean

* remove reduce_all, reduce_any in tensor.logic

* remove apis in tensor.math

* remove apis in tensor.__init__

* remove has_inf, has_nan in tensor.search

* remove apis in framework.__init__

* remove apis in paddle.__init__

* remove apis in nn.functional.__init__

* modify removed alias apis to raw api in doc and unittests

* fix remove grid_sample bug

* modify removed alias apis to raw api in doc and unittests

* modify removed alias apis to raw api in doc and unittests

* modify removed alias apis to raw api in doc and unittests

* modify removed alias apis to raw api in doc and unittests

* modify removed alias apis to raw api in doc and unittests

* modify removed alias apis to raw api in doc and unittests

* delete alias api relastions in doc

* reserve paddle.compat, paddle.sysconfig

* remove unittest for paddle.reduce_all, paddle.reduce_any

* modify removed alias apis to raw api in doc and unittests

* recover paddle.save and paddle.load

* resolve conflicts

* fix sample code missing paddle.enable_static() bug

* fix sample code missing paddle.enable_static() bug

* fix to_string sample code error
上级 6e5034e2
......@@ -712,7 +712,7 @@ void BindImperative(py::module *m_ptr) {
tmp.stop_gradient=False
inputs.append(tmp)
ret = paddle.sums(inputs2)
loss = paddle.reduce_sum(ret)
loss = paddle.fluid.layers.reduce_sum(ret)
loss.backward()
print("Before clear_gradient {}".format(loss.grad))
loss.clear_gradient()
......
......@@ -59,10 +59,9 @@ from .tensor.random import bernoulli
from .tensor.attribute import rank #DEFINE_ALIAS
from .tensor.attribute import shape #DEFINE_ALIAS
from .tensor.creation import to_tensor #DEFINE_ALIAS
from .tensor.creation import crop_tensor #DEFINE_ALIAS
from .tensor.creation import diag #DEFINE_ALIAS
from .tensor.creation import eye #DEFINE_ALIAS
from .tensor.creation import fill_constant #DEFINE_ALIAS
# from .tensor.creation import fill_constant #DEFINE_ALIAS
# from .tensor.creation import get_tensor_from_selected_rows #DEFINE_ALIAS
from .tensor.creation import linspace #DEFINE_ALIAS
from .tensor.creation import ones #DEFINE_ALIAS
......@@ -103,8 +102,8 @@ from .tensor.logic import logical_not #DEFINE_ALIAS
from .tensor.logic import logical_or #DEFINE_ALIAS
from .tensor.logic import logical_xor #DEFINE_ALIAS
from .tensor.logic import not_equal #DEFINE_ALIAS
from .tensor.logic import reduce_all #DEFINE_ALIAS
from .tensor.logic import reduce_any #DEFINE_ALIAS
# from .tensor.logic import reduce_all #DEFINE_ALIAS
# from .tensor.logic import reduce_any #DEFINE_ALIAS
from .tensor.logic import allclose #DEFINE_ALIAS
from .tensor.logic import equal_all #DEFINE_ALIAS
# from .tensor.logic import isnan #DEFINE_ALIAS
......@@ -144,12 +143,12 @@ from .tensor.math import ceil #DEFINE_ALIAS
from .tensor.math import cos #DEFINE_ALIAS
from .tensor.math import cosh #DEFINE_ALIAS
from .tensor.math import cumsum #DEFINE_ALIAS
from .tensor.math import elementwise_add #DEFINE_ALIAS
from .tensor.math import elementwise_div #DEFINE_ALIAS
from .tensor.math import elementwise_floordiv #DEFINE_ALIAS
from .tensor.math import elementwise_mod #DEFINE_ALIAS
from .tensor.math import elementwise_pow #DEFINE_ALIAS
from .tensor.math import elementwise_sub #DEFINE_ALIAS
# from .tensor.math import elementwise_add #DEFINE_ALIAS
# from .tensor.math import elementwise_div #DEFINE_ALIAS
# from .tensor.math import elementwise_floordiv #DEFINE_ALIAS
# from .tensor.math import elementwise_mod #DEFINE_ALIAS
# from .tensor.math import elementwise_pow #DEFINE_ALIAS
# from .tensor.math import elementwise_sub #DEFINE_ALIAS
from .tensor.math import exp #DEFINE_ALIAS
from .tensor.math import floor #DEFINE_ALIAS
from .tensor.math import increment #DEFINE_ALIAS
......@@ -157,10 +156,10 @@ from .tensor.math import log #DEFINE_ALIAS
from .tensor.math import multiplex #DEFINE_ALIAS
from .tensor.math import pow #DEFINE_ALIAS
from .tensor.math import reciprocal #DEFINE_ALIAS
from .tensor.math import reduce_max #DEFINE_ALIAS
from .tensor.math import reduce_min #DEFINE_ALIAS
from .tensor.math import reduce_prod #DEFINE_ALIAS
from .tensor.math import reduce_sum #DEFINE_ALIAS
# from .tensor.math import reduce_max #DEFINE_ALIAS
# from .tensor.math import reduce_min #DEFINE_ALIAS
# from .tensor.math import reduce_prod #DEFINE_ALIAS
# from .tensor.math import reduce_sum #DEFINE_ALIAS
from .tensor.math import round #DEFINE_ALIAS
from .tensor.math import rsqrt #DEFINE_ALIAS
from .tensor.math import scale #DEFINE_ALIAS
......@@ -190,7 +189,7 @@ from .tensor.math import logsumexp #DEFINE_ALIAS
from .tensor.math import inverse #DEFINE_ALIAS
from .tensor.math import log1p #DEFINE_ALIAS
from .tensor.math import erf #DEFINE_ALIAS
from .tensor.math import addcmul #DEFINE_ALIAS
# from .tensor.math import addcmul #DEFINE_ALIAS
from .tensor.math import addmm #DEFINE_ALIAS
from .tensor.math import clip #DEFINE_ALIAS
from .tensor.math import trace #DEFINE_ALIAS
......@@ -210,8 +209,8 @@ from .tensor.random import randperm #DEFINE_ALIAS
from .tensor.search import argmax #DEFINE_ALIAS
from .tensor.search import argmin #DEFINE_ALIAS
from .tensor.search import argsort #DEFINE_ALIAS
from .tensor.search import has_inf #DEFINE_ALIAS
from .tensor.search import has_nan #DEFINE_ALIAS
# from .tensor.search import has_inf #DEFINE_ALIAS
# from .tensor.search import has_nan #DEFINE_ALIAS
from .tensor.search import masked_select #DEFINE_ALIAS
from .tensor.search import topk #DEFINE_ALIAS
from .tensor.search import where #DEFINE_ALIAS
......@@ -224,9 +223,8 @@ from .tensor.to_string import set_printoptions
from .framework.random import manual_seed #DEFINE_ALIAS
from .framework.random import get_cuda_rng_state #DEFINE_ALIAS
from .framework.random import set_cuda_rng_state #DEFINE_ALIAS
from .framework import Variable #DEFINE_ALIAS
from .framework import ParamAttr #DEFINE_ALIAS
from .framework import create_global_var #DEFINE_ALIAS
# from .framework import create_global_var #DEFINE_ALIAS
from .framework import create_parameter #DEFINE_ALIAS
from .framework import CPUPlace #DEFINE_ALIAS
from .framework import CUDAPlace #DEFINE_ALIAS
......@@ -243,10 +241,10 @@ from .framework import get_default_dtype #DEFINE_ALIAS
from .tensor.search import index_sample #DEFINE_ALIAS
from .tensor.stat import mean #DEFINE_ALIAS
from .tensor.stat import reduce_mean #DEFINE_ALIAS
# from .tensor.stat import reduce_mean #DEFINE_ALIAS
from .tensor.stat import std #DEFINE_ALIAS
from .tensor.stat import var #DEFINE_ALIAS
from .fluid.data import data
# from .fluid.data import data
from .tensor.stat import numel #DEFINE_ALIAS
from .device import get_cudnn_version
from .device import set_device
......@@ -262,6 +260,8 @@ from .fluid.dygraph.base import enable_dygraph as disable_static #DEFINE_ALIAS
from .fluid.dygraph.base import disable_dygraph as enable_static #DEFINE_ALIAS
from .fluid.framework import in_dygraph_mode as in_dynamic_mode #DEFINE_ALIAS
from .fluid.dygraph.base import no_grad_ as no_grad #DEFINE_ALIAS
from .fluid.layers import crop_tensor as crop #DEFINE_ALIAS
from . import jit
from . import static
......
......@@ -56,7 +56,7 @@ class GradScaler(AmpScaler):
data = paddle.rand([10, 3, 32, 32])
with paddle.amp.auto_cast():
conv = model(data)
loss = paddle.reduce_mean(conv)
loss = paddle.fluid.layers.reduce_mean(conv)
scaled = scaler.scale(loss) # scale the loss
scaled.backward() # do backward
scaler.minimize(optimizer, scaled) # update parameters
......@@ -96,7 +96,7 @@ class GradScaler(AmpScaler):
data = paddle.rand([10, 3, 32, 32])
with paddle.amp.auto_cast():
conv = model(data)
loss = paddle.reduce_mean(conv)
loss = paddle.fluid.layers.reduce_mean(conv)
scaled = scaler.scale(loss) # scale the loss
scaled.backward() # do backward
scaler.minimize(optimizer, scaled) # update parameters
......@@ -128,7 +128,7 @@ class GradScaler(AmpScaler):
data = paddle.rand([10, 3, 32, 32])
with paddle.amp.auto_cast():
conv = model(data)
loss = paddle.reduce_mean(conv)
loss = paddle.fluid.layers.reduce_mean(conv)
scaled = scaler.scale(loss) # scale the loss
scaled.backward() # do backward
scaler.minimize(optimizer, scaled) # update parameters
......
......@@ -439,7 +439,7 @@ def barrier(group=0):
paddle.distributed.barrier()
"""
op_type = 'barrier'
temp = paddle.fill_constant([1], dtype="int32", value="1")
temp = fill_constant([1], dtype="int32", value="1")
if in_dygraph_mode():
return core.ops.barrier(temp, temp, 'ring_id', group)
if not isinstance(group, int):
......
......@@ -25,9 +25,9 @@ from .fluid.layers import control_flow
from .fluid.layers import tensor
from .fluid.layers import ops
from .fluid.layers import nn
from .fluid.layers import elementwise_mul, elementwise_div, elementwise_add, elementwise_sub
from .fluid import core
from .fluid.framework import in_dygraph_mode
from .tensor.math import elementwise_mul, elementwise_div, elementwise_add, elementwise_sub
from .tensor import arange, gather_nd, concat, multinomial
import math
import numpy as np
......
......@@ -480,7 +480,7 @@ def grad(outputs,
paddle.disable_static()
def test_dygraph_grad(grad_outputs=None):
x = paddle.fill_constant(shape=[1], value=2.0, dtype='float32')
x = paddle.fluid.layers.fill_constant(shape=[1], value=2.0, dtype='float32')
x.stop_gradient = False
y1 = x * x
......@@ -503,7 +503,7 @@ def grad(outputs,
return dx.numpy()
grad_value = paddle.fill_constant(shape=[1], value=4.0, dtype='float32')
grad_value = paddle.fluid.layers.fill_constant(shape=[1], value=4.0, dtype='float32')
# dy1 = [1], dy2 = [1]
print(test_dygraph_grad(None)) # [7.]
......@@ -515,7 +515,7 @@ def grad(outputs,
print(test_dygraph_grad([grad_value, None])) # [19.]
# dy1 = [3], dy2 = [4]
grad_y1 = paddle.fill_constant(shape=[1], value=3.0, dtype='float32')
grad_y1 = paddle.fluid.layers.fill_constant(shape=[1], value=3.0, dtype='float32')
print(test_dygraph_grad([grad_y1, grad_value])) # [24.]
'''
......
......@@ -87,7 +87,7 @@ def create_static_variable_gast_node(name):
def create_fill_constant_node(name, value):
func_code = "{} = paddle.fill_constant(shape=[1], ".format(name)
func_code = "{} = paddle.fluid.layers.fill_constant(shape=[1], ".format(name)
if isinstance(value, bool):
func_code += "dtype='bool', value={})".format(value)
return gast.parse(func_code).body[0]
......
......@@ -702,9 +702,6 @@ class Conv3DTranspose(layers.Layer):
class Pool2D(layers.Layer):
"""
:alias_main: paddle.nn.Pool2D
:alias: paddle.nn.Pool2D,paddle.nn.layer.Pool2D,paddle.nn.layer.common.Pool2D
:old_api: paddle.fluid.dygraph.Pool2D
This interface is used to construct a callable object of the ``Pool2D`` class.
For more details, refer to code examples.
......@@ -2354,9 +2351,6 @@ class PRelu(layers.Layer):
class BilinearTensorProduct(layers.Layer):
"""
:alias_main: paddle.nn.BilinearTensorProduct
:alias: paddle.nn.BilinearTensorProduct,paddle.nn.layer.BilinearTensorProduct,paddle.nn.layer.common.BilinearTensorProduct
:old_api: paddle.fluid.dygraph.BilinearTensorProduct
**Add Bilinear Tensor Product Layer**
......
......@@ -163,7 +163,7 @@ def monkey_patch_varbase():
tmp.stop_gradient=False
inputs.append(tmp)
ret = paddle.sums(inputs)
loss = paddle.reduce_sum(ret)
loss = paddle.fluid.layers.reduce_sum(ret)
loss.backward()
"""
......
......@@ -543,7 +543,7 @@ def name_scope(prefix=None):
import paddle
paddle.enable_static()
with paddle.static.name_scope("s1"):
a = paddle.data(name='data', shape=[None, 1], dtype='int32')
a = paddle.fluid.data(name='data', shape=[None, 1], dtype='int32')
b = a + 1
with paddle.static.name_scope("s2"):
c = b * 1
......@@ -1193,7 +1193,7 @@ class Variable(object):
tmp.stop_gradient=False
inputs.append(tmp)
ret = paddle.sums(inputs)
loss = paddle.reduce_sum(ret)
loss = paddle.fluid.layers.reduce_sum(ret)
loss.backward()
"""
......@@ -1343,7 +1343,9 @@ class Variable(object):
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
......@@ -5355,8 +5357,8 @@ def default_startup_program():
main_program = paddle.static.Program()
startup_program = paddle.static.Program()
with paddle.static.program_guard(main_program=main_program, startup_program=startup_program):
x = paddle.data(name="x", shape=[-1, 784], dtype='float32')
y = paddle.data(name="y", shape=[-1, 1], dtype='int32')
x = paddle.fluid.data(name="x", shape=[-1, 784], dtype='float32')
y = paddle.fluid.data(name="y", shape=[-1, 1], dtype='int32')
z = paddle.static.nn.fc(name="fc", x=x, size=10, activation="relu")
print("main program is: {}".format(paddle.static.default_main_program()))
......@@ -5370,7 +5372,7 @@ def default_main_program():
This API can be used to get ``default main program`` which store the
descriptions of Ops and tensors.
For example ``z = paddle.elementwise_add(x, y)`` will create a new ``elementwise_add``
For example ``z = paddle.fluid.layers.elementwise_add(x, y)`` will create a new ``elementwise_add``
Op and a new ``z`` tensor, and they will be recorded in ``default main program`` .
The ``default main program`` is the default value for ``Program`` parameter in
......@@ -5389,15 +5391,15 @@ def default_main_program():
paddle.enable_static()
# Sample Network:
data = paddle.data(name='image', shape=[None, 3, 224, 224], dtype='float32')
label = paddle.data(name='label', shape=[None, 1], dtype='int64')
data = paddle.fluid.data(name='image', shape=[None, 3, 224, 224], dtype='float32')
label = paddle.fluid.data(name='label', shape=[None, 1], dtype='int64')
conv1 = paddle.static.nn.conv2d(data, 4, 5, 1, act=None)
bn1 = paddle.static.nn.batch_norm(conv1, act='relu')
pool1 = paddle.nn.functional.pool2d(bn1, 2, 'max', 2)
pool1 = paddle.fluid.layers.pool2d(bn1, 2, 'max', 2)
conv2 = paddle.static.nn.conv2d(pool1, 16, 5, 1, act=None)
bn2 = paddle.static.nn.batch_norm(conv2, act='relu')
pool2 = paddle.nn.functional.pool2d(bn2, 2, 'max', 2)
pool2 = paddle.fluid.layers.pool2d(bn2, 2, 'max', 2)
fc1 = paddle.static.nn.fc(x=pool2, size=50, activation='relu')
fc2 = paddle.static.nn.fc(x=fc1, size=102, activation='softmax')
......
......@@ -1110,9 +1110,6 @@ def assign_skip_lod_tensor_array(input, output):
def while_loop(cond, body, loop_vars, is_test=False, name=None):
"""
:api_attr: Static Graph
:alias_main: paddle.nn.while_loop
:alias: paddle.nn.while_loop,paddle.nn.control_flow.while_loop
:old_api: paddle.fluid.layers.while_loop
while_loop is one of the control flows. Repeats while_loop `body` until `cond` returns False.
......@@ -1151,6 +1148,9 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None):
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle
paddle.enable_static()
def cond(i, ten):
return i < ten
......@@ -2506,21 +2506,21 @@ def case(pred_fn_pairs, default=None, name=None):
paddle.enable_static()
def fn_1():
return paddle.fill_constant(shape=[1, 2], dtype='float32', value=1)
return paddle.fluid.layers.fill_constant(shape=[1, 2], dtype='float32', value=1)
def fn_2():
return paddle.fill_constant(shape=[2, 2], dtype='int32', value=2)
return paddle.fluid.layers.fill_constant(shape=[2, 2], dtype='int32', value=2)
def fn_3():
return paddle.fill_constant(shape=[3], dtype='int32', value=3)
return paddle.fluid.layers.fill_constant(shape=[3], dtype='int32', value=3)
main_program = paddle.static.default_startup_program()
startup_program = paddle.static.default_main_program()
with paddle.static.program_guard(main_program, startup_program):
x = paddle.fill_constant(shape=[1], dtype='float32', value=0.3)
y = paddle.fill_constant(shape=[1], dtype='float32', value=0.1)
z = paddle.fill_constant(shape=[1], dtype='float32', value=0.2)
x = paddle.fluid.layers.fill_constant(shape=[1], dtype='float32', value=0.3)
y = paddle.fluid.layers.fill_constant(shape=[1], dtype='float32', value=0.1)
z = paddle.fluid.layers.fill_constant(shape=[1], dtype='float32', value=0.2)
pred_1 = paddle.less_than(z, x) # true: 0.2 < 0.3
pred_2 = paddle.less_than(x, y) # false: 0.3 < 0.1
......@@ -3626,19 +3626,19 @@ def switch_case(branch_index, branch_fns, default=None, name=None):
paddle.enable_static()
def fn_1():
return paddle.fill_constant(shape=[1, 2], dtype='float32', value=1)
return paddle.fluid.layers.fill_constant(shape=[1, 2], dtype='float32', value=1)
def fn_2():
return paddle.fill_constant(shape=[2, 2], dtype='int32', value=2)
return paddle.fluid.layers.fill_constant(shape=[2, 2], dtype='int32', value=2)
def fn_3():
return paddle.fill_constant(shape=[3], dtype='int32', value=3)
return paddle.fluid.layers.fill_constant(shape=[3], dtype='int32', value=3)
main_program = paddle.static.default_startup_program()
startup_program = paddle.static.default_main_program()
with paddle.static.program_guard(main_program, startup_program):
index_1 = paddle.fill_constant(shape=[1], dtype='int32', value=1)
index_2 = paddle.fill_constant(shape=[1], dtype='int32', value=2)
index_1 = paddle.fluid.layers.fill_constant(shape=[1], dtype='int32', value=1)
index_2 = paddle.fluid.layers.fill_constant(shape=[1], dtype='int32', value=2)
out_1 = paddle.static.nn.switch_case(
branch_index=index_1,
......
......@@ -629,9 +629,6 @@ def detection_output(loc,
nms_eta=1.0,
return_index=False):
"""
:alias_main: paddle.nn.functional.detection_output
:alias: paddle.nn.functional.detection_output,paddle.nn.functional.vision.detection_output
:old_api: paddle.fluid.layers.detection_output
Given the regression locations, classification confidences and prior boxes,
calculate the detection outputs by performing following steps:
......@@ -700,6 +697,9 @@ def detection_output(loc,
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
pb = fluid.data(name='prior_box', shape=[10, 4], dtype='float32')
pbv = fluid.data(name='prior_box_var', shape=[10, 4], dtype='float32')
......@@ -822,9 +822,6 @@ def box_coder(prior_box,
name=None,
axis=0):
"""
:alias_main: paddle.nn.functional.box_coder
:alias: paddle.nn.functional.box_coder,paddle.nn.functional.vision.box_coder
:old_api: paddle.fluid.layers.box_coder
**Box Coder Layer**
......@@ -911,6 +908,8 @@ def box_coder(prior_box,
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
# For encode
prior_box_encode = fluid.data(name='prior_box_encode',
shape=[512, 4],
......@@ -1013,9 +1012,6 @@ def yolov3_loss(x,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolov3_loss
:alias: paddle.nn.functional.yolov3_loss,paddle.nn.functional.vision.yolov3_loss
:old_api: paddle.fluid.layers.yolov3_loss
${comment}
......@@ -1060,6 +1056,8 @@ def yolov3_loss(x,
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
gt_box = fluid.data(name='gt_box', shape=[None, 6, 4], dtype='float32')
gt_label = fluid.data(name='gt_label', shape=[None, 6], dtype='int32')
......@@ -1140,9 +1138,6 @@ def yolo_box(x,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolo_box
:alias: paddle.nn.functional.yolo_box,paddle.nn.functional.vision.yolo_box
:old_api: paddle.fluid.layers.yolo_box
${comment}
......@@ -1175,6 +1170,8 @@ def yolo_box(x,
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
img_size = fluid.data(name='img_size',shape=[None, 2],dtype='int64')
anchors = [10, 13, 16, 30, 33, 23]
......@@ -1319,9 +1316,6 @@ def bipartite_match(dist_matrix,
dist_threshold=None,
name=None):
"""
:alias_main: paddle.nn.functional.bipartite_match
:alias: paddle.nn.functional.bipartite_match,paddle.nn.functional.vision.bipartite_match
:old_api: paddle.fluid.layers.bipartite_match
This operator implements a greedy bipartite matching algorithm, which is
used to obtain the matching with the maximum distance based on the input
......@@ -1413,9 +1407,6 @@ def target_assign(input,
mismatch_value=None,
name=None):
"""
:alias_main: paddle.nn.functional.target_assign
:alias: paddle.nn.functional.target_assign,paddle.nn.functional.extension.target_assign
:old_api: paddle.fluid.layers.target_assign
This operator can be, for given the target bounding boxes or labels,
to assign classification and regression targets to each prediction as well as
......@@ -1484,6 +1475,8 @@ def target_assign(input,
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
x = fluid.data(
name='x',
shape=[4, 20, 4],
......@@ -1778,9 +1771,6 @@ def prior_box(input,
name=None,
min_max_aspect_ratios_order=False):
"""
:alias_main: paddle.nn.functional.prior_box
:alias: paddle.nn.functional.prior_box,paddle.nn.functional.vision.prior_box
:old_api: paddle.fluid.layers.prior_box
This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm.
Each position of the input produce N prior boxes, N is determined by
......@@ -1832,6 +1822,8 @@ def prior_box(input,
#declarative mode
import paddle.fluid as fluid
import numpy as np
import paddle
paddle.enable_static()
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.prior_box(
......@@ -1939,10 +1931,6 @@ def density_prior_box(input,
flatten_to_2d=False,
name=None):
"""
:alias_main: paddle.nn.functional.density_prior_box
:alias: paddle.nn.functional.density_prior_box,paddle.nn.functional.vision.density_prior_box
:old_api: paddle.fluid.layers.density_prior_box
This op generates density prior boxes for SSD(Single Shot MultiBox Detector)
algorithm. Each position of the input produce N prior boxes, N is
......@@ -2008,6 +1996,8 @@ def density_prior_box(input,
import paddle.fluid as fluid
import numpy as np
import paddle
paddle.enable_static()
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
......@@ -2408,9 +2398,6 @@ def anchor_generator(input,
offset=0.5,
name=None):
"""
:alias_main: paddle.nn.functional.anchor_generator
:alias: paddle.nn.functional.anchor_generator,paddle.nn.functional.vision.anchor_generator
:old_api: paddle.fluid.layers.anchor_generator
**Anchor generator operator**
......@@ -2457,6 +2444,9 @@ def anchor_generator(input,
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
conv1 = fluid.data(name='conv1', shape=[None, 48, 16, 16], dtype='float32')
anchor, var = fluid.layers.anchor_generator(
input=conv1,
......@@ -2613,9 +2603,6 @@ def generate_proposal_labels(rpn_rois,
is_cls_agnostic=False,
is_cascade_rcnn=False):
"""
:alias_main: paddle.nn.functional.generate_proposal_labels
:alias: paddle.nn.functional.generate_proposal_labels,paddle.nn.functional.vision.generate_proposal_labels
:old_api: paddle.fluid.layers.generate_proposal_labels
**Generate Proposal Labels of Faster-RCNN**
......@@ -2738,9 +2725,6 @@ def generate_proposal_labels(rpn_rois,
def generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois,
labels_int32, num_classes, resolution):
"""
:alias_main: paddle.nn.functional.generate_mask_labels
:alias: paddle.nn.functional.generate_mask_labels,paddle.nn.functional.vision.generate_mask_labels
:old_api: paddle.fluid.layers.generate_mask_labels
**Generate Mask Labels for Mask-RCNN**
......@@ -2897,9 +2881,6 @@ def generate_proposals(scores,
return_rois_num=False,
name=None):
"""
:alias_main: paddle.nn.functional.generate_proposals
:alias: paddle.nn.functional.generate_proposals,paddle.nn.functional.vision.generate_proposals
:old_api: paddle.fluid.layers.generate_proposals
**Generate proposal Faster-RCNN**
......@@ -2965,6 +2946,8 @@ def generate_proposals(scores,
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
scores = fluid.data(name='scores', shape=[None, 4, 5, 5], dtype='float32')
bbox_deltas = fluid.data(name='bbox_deltas', shape=[None, 16, 5, 5], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
......@@ -3036,9 +3019,6 @@ def generate_proposals(scores,
def box_clip(input, im_info, name=None):
"""
:alias_main: paddle.nn.functional.box_clip
:alias: paddle.nn.functional.box_clip,paddle.nn.functional.vision.box_clip
:old_api: paddle.fluid.layers.box_clip
Clip the box into the size given by im_info
For each input box, The formula is given as follows:
......@@ -3079,6 +3059,8 @@ def box_clip(input, im_info, name=None):
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
boxes = fluid.data(
name='boxes', shape=[None, 8, 4], dtype='float32', lod_level=1)
im_info = fluid.data(name='im_info', shape=[-1 ,3])
......@@ -3265,9 +3247,6 @@ def multiclass_nms(bboxes,
background_label=0,
name=None):
"""
:alias_main: paddle.nn.functional.multiclass_nms
:alias: paddle.nn.functional.multiclass_nms,paddle.nn.functional.extension.multiclass_nms
:old_api: paddle.fluid.layers.multiclass_nms
**Multiclass NMS**
......@@ -3363,6 +3342,8 @@ def multiclass_nms(bboxes,
import paddle.fluid as fluid
import paddle
paddle.enable_static()
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
......@@ -3674,9 +3655,6 @@ def distribute_fpn_proposals(fpn_rois,
rois_num=None,
name=None):
"""
:alias_main: paddle.nn.functional.distribute_fpn_proposals
:alias: paddle.nn.functional.distribute_fpn_proposals,paddle.nn.functional.vision.distribute_fpn_proposals
:old_api: paddle.fluid.layers.distribute_fpn_proposals
**This op only takes LoDTensor as input.** In Feature Pyramid Networks
(FPN) models, it is needed to distribute all proposals into different FPN
......@@ -3732,6 +3710,8 @@ def distribute_fpn_proposals(fpn_rois,
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
fpn_rois = fluid.data(
name='data', shape=[None, 4], dtype='float32', lod_level=1)
multi_rois, restore_ind = fluid.layers.distribute_fpn_proposals(
......@@ -3798,9 +3778,6 @@ def box_decoder_and_assign(prior_box,
box_clip,
name=None):
"""
:alias_main: paddle.nn.functional.box_decoder_and_assign
:alias: paddle.nn.functional.box_decoder_and_assign,paddle.nn.functional.vision.box_decoder_and_assign
:old_api: paddle.fluid.layers.box_decoder_and_assign
${comment}
Args:
......@@ -3825,6 +3802,8 @@ def box_decoder_and_assign(prior_box,
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
pb = fluid.data(
name='prior_box', shape=[None, 4], dtype='float32')
pbv = fluid.data(
......@@ -3874,9 +3853,6 @@ def collect_fpn_proposals(multi_rois,
rois_num_per_level=None,
name=None):
"""
:alias_main: paddle.nn.functional.collect_fpn_proposals
:alias: paddle.nn.functional.collect_fpn_proposals,paddle.nn.functional.vision.collect_fpn_proposals
:old_api: paddle.fluid.layers.collect_fpn_proposals
**This OP only supports LoDTensor as input**. Concat multi-level RoIs
(Region of Interest) and select N RoIs with respect to multi_scores.
......@@ -3922,6 +3898,8 @@ def collect_fpn_proposals(multi_rois,
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
multi_rois = []
multi_scores = []
for i in range(4):
......
......@@ -52,9 +52,6 @@ def _decay_step_counter(begin=0):
def noam_decay(d_model, warmup_steps, learning_rate=1.0):
"""
:alias_main: paddle.nn.functional.noam_decay
:alias: paddle.nn.functional.noam_decay,paddle.nn.functional.learning_rate.noam_decay
:old_api: paddle.fluid.layers.noam_decay
Noam decay method. The numpy implementation of noam decay as follows.
......@@ -115,9 +112,6 @@ def noam_decay(d_model, warmup_steps, learning_rate=1.0):
def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False):
"""
:alias_main: paddle.nn.functional.exponential_decay
:alias: paddle.nn.functional.exponential_decay,paddle.nn.functional.learning_rate.exponential_decay
:old_api: paddle.fluid.layers.exponential_decay
Applies exponential decay to the learning rate.
......@@ -149,6 +143,9 @@ def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False):
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
base_lr = 0.1
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.layers.exponential_decay(
......@@ -176,9 +173,6 @@ def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False):
def natural_exp_decay(learning_rate, decay_steps, decay_rate, staircase=False):
"""
:alias_main: paddle.nn.functional.natural_exp_decay
:alias: paddle.nn.functional.natural_exp_decay,paddle.nn.functional.learning_rate.natural_exp_decay
:old_api: paddle.fluid.layers.natural_exp_decay
Applies natural exponential decay to the initial learning rate.
......@@ -210,6 +204,9 @@ Applies natural exponential decay to the initial learning rate.
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
base_lr = 0.1
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.layers.natural_exp_decay(
......@@ -237,9 +234,6 @@ Applies natural exponential decay to the initial learning rate.
def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False):
"""
:alias_main: paddle.nn.functional.inverse_time_decay
:alias: paddle.nn.functional.inverse_time_decay,paddle.nn.functional.learning_rate.inverse_time_decay
:old_api: paddle.fluid.layers.inverse_time_decay
Applies inverse time decay to the initial learning rate.
......@@ -271,6 +265,8 @@ def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False):
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
base_lr = 0.1
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.layers.inverse_time_decay(
......@@ -302,10 +298,6 @@ def polynomial_decay(learning_rate,
power=1.0,
cycle=False):
"""
:alias_main: paddle.nn.functional.polynomial_decay
:alias: paddle.nn.functional.polynomial_decay,paddle.nn.functional.learning_rate.polynomial_decay
:old_api: paddle.fluid.layers.polynomial_decay
2
Applies polynomial decay to the initial learning rate.
.. code-block:: text
......@@ -371,9 +363,6 @@ def polynomial_decay(learning_rate,
def piecewise_decay(boundaries, values):
"""
:alias_main: paddle.nn.functional.piecewise_decay
:alias: paddle.nn.functional.piecewise_decay,paddle.nn.functional.learning_rate.piecewise_decay
:old_api: paddle.fluid.layers.piecewise_decay
Applies piecewise decay to the initial learning rate.
......@@ -401,6 +390,8 @@ Applies piecewise decay to the initial learning rate.
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
boundaries = [10000, 20000]
values = [1.0, 0.5, 0.1]
optimizer = fluid.optimizer.Momentum(
......@@ -450,9 +441,6 @@ Applies piecewise decay to the initial learning rate.
def cosine_decay(learning_rate, step_each_epoch, epochs):
"""
:alias_main: paddle.nn.functional.cosine_decay
:alias: paddle.nn.functional.cosine_decay,paddle.nn.functional.learning_rate.cosine_decay
:old_api: paddle.fluid.layers.cosine_decay
Applies cosine decay to the learning rate.
......@@ -499,9 +487,6 @@ def cosine_decay(learning_rate, step_each_epoch, epochs):
def linear_lr_warmup(learning_rate, warmup_steps, start_lr, end_lr):
"""
:alias_main: paddle.nn.functional.linear_lr_warmup
:alias: paddle.nn.functional.linear_lr_warmup,paddle.nn.functional.learning_rate.linear_lr_warmup
:old_api: paddle.fluid.layers.linear_lr_warmup
This operator use the linear learning rate warm up strategy to adjust the learning rate preliminarily before the normal learning rate scheduling.
For more information, please refer to `Bag of Tricks for Image Classification with Convolutional Neural Networks <https://arxiv.org/abs/1812.01187>`_
......
......@@ -59,9 +59,6 @@ def center_loss(input,
update_center=True):
"""
:api_attr: Static Graph
:alias_main: paddle.nn.functional.center_loss
:alias: paddle.nn.functional.center_loss,paddle.nn.functional.loss.center_loss
:old_api: paddle.fluid.layers.center_loss
**Center loss Cost layer**
......@@ -92,6 +89,8 @@ def center_loss(input,
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
input = fluid.data(name='x',shape=[20,30],dtype='float32')
label = fluid.data(name='y',shape=[20,1],dtype='int64')
......@@ -153,9 +152,6 @@ def center_loss(input,
def bpr_loss(input, label, name=None):
"""
:alias_main: paddle.nn.functional.bpr_loss
:alias: paddle.nn.functional.bpr_loss,paddle.nn.functional.loss.bpr_loss
:old_api: paddle.fluid.layers.bpr_loss
**Bayesian Personalized Ranking Loss Operator**
......@@ -183,6 +179,9 @@ def bpr_loss(input, label, name=None):
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
neg_size = 10
label = fluid.data(
......@@ -1309,9 +1308,6 @@ def softmax_with_cross_entropy(logits,
def rank_loss(label, left, right, name=None):
"""
:alias_main: paddle.nn.functional.rank_loss
:alias: paddle.nn.functional.rank_loss,paddle.nn.functional.loss.rank_loss
:old_api: paddle.fluid.layers.rank_loss
This operator implements the sort loss layer in the RankNet model. RankNet is a pairwise ranking model
with a training sample consisting of a pair of documents (A and B), The label (P)
......@@ -1349,6 +1345,8 @@ def rank_loss(label, left, right, name=None):
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
label = fluid.data(name="label", shape=[-1, 1], dtype="float32")
left = fluid.data(name="left", shape=[-1, 1], dtype="float32")
right = fluid.data(name="right", shape=[-1, 1], dtype="float32")
......@@ -1491,9 +1489,6 @@ def teacher_student_sigmoid_loss(input,
soft_max_up_bound=15.0,
soft_max_lower_bound=-15.0):
"""
:alias_main: paddle.nn.functional.teacher_student_sigmoid_loss
:alias: paddle.nn.functional.teacher_student_sigmoid_loss,paddle.nn.functional.loss.teacher_student_sigmoid_loss
:old_api: paddle.fluid.layers.teacher_student_sigmoid_loss
**Teacher Student Log Loss Layer**
......@@ -1521,7 +1516,8 @@ def teacher_student_sigmoid_loss(input,
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
batch_size = 64
label = fluid.data(
name="label", shape=[batch_size, 1], dtype="int64")
......
此差异已折叠。
......@@ -488,7 +488,7 @@ def rnn(cell,
inputs = paddle.rand((4, 23, 16))
prev_h = paddle.randn((4, 32))
outputs, final_states = paddle.nn.functional.rnn(cell, inputs, prev_h)
outputs, final_states = paddle.fluid.layers.rnn(cell, inputs, prev_h)
"""
if in_dygraph_mode():
......@@ -711,7 +711,7 @@ def birnn(cell_fw,
hf, cf = paddle.rand((4, 32)), paddle.rand((4, 32))
hb, cb = paddle.rand((4, 32)), paddle.rand((4, 32))
initial_states = ((hf, cf), (hb, cb))
outputs, final_states = paddle.nn.functional.birnn(
outputs, final_states = paddle.fluid.layers.birnn(
cell_fw, cell_bw, inputs, initial_states)
"""
......@@ -3046,9 +3046,6 @@ def beam_search(pre_ids,
name=None,
return_parent_idx=False):
"""
:alias_main: paddle.nn.beam_search
:alias: paddle.nn.beam_search,paddle.nn.decode.beam_search
:old_api: paddle.fluid.layers.beam_search
Beam search is a classical algorithm for selecting candidate words in a
machine translation task.
......@@ -3126,6 +3123,8 @@ def beam_search(pre_ids,
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
# Suppose `probs` contains predicted results from the computation
# cell and `pre_ids` and `pre_scores` is the output of beam_search
......@@ -3197,9 +3196,6 @@ def beam_search(pre_ids,
def beam_search_decode(ids, scores, beam_size, end_id, name=None):
"""
:alias_main: paddle.nn.beam_search_decode
:alias: paddle.nn.beam_search_decode,paddle.nn.decode.beam_search_decode
:old_api: paddle.fluid.layers.beam_search_decode
This operator is used after beam search has completed. It constructs the
full predicted sequences for each sample by walking back along the search
......@@ -3246,7 +3242,8 @@ def beam_search_decode(ids, scores, beam_size, end_id, name=None):
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
# Suppose `ids` and `scores` are LodTensorArray variables reserving
# the selected ids and scores of all steps
ids = fluid.layers.create_array(dtype='int64')
......
......@@ -605,8 +605,6 @@ def assign(input, output=None):
def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None):
"""
:alias_main: paddle.fill_constant
:alias: paddle.tensor.fill_constant, paddle.tensor.creation.fill_constant
This OP creates a Tensor with specified `shape` and `dtype`, and
initializes it with a constant specified by `value`.
......@@ -715,7 +713,7 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None):
return out
@deprecated(since='1.8.0', update_to="paddle.fill_constant")
@deprecated(since='1.8.0', update_to="paddle.fluid.layers.fill_constant")
@templatedoc()
def fill_constant_batch_size_like(input,
shape,
......@@ -1228,7 +1226,7 @@ def has_inf(x):
import paddle
data = paddle.randn(shape=[4, 32, 32], dtype="float32")
res = paddle.has_inf(data)
res = paddle.fluid.layers.has_inf(data)
# [False]
"""
......@@ -1257,7 +1255,7 @@ def has_nan(x):
import paddle
data = paddle.randn(shape=[2,3], dtype="float32")
res = paddle.has_nan(data)
res = paddle.fluid.layers.has_nan(data)
# [False]
"""
......
......@@ -851,6 +851,9 @@ class DetectionMAP(object):
import paddle.fluid as fluid
import paddle
paddle.enable_static()
batch_size = None # can be any size
image_boxs_num = 10
bounding_bboxes_num = 21
......
......@@ -105,7 +105,7 @@ class ReduceMeanLayer(object):
"""
operation
"""
mean = paddle.reduce_mean(input)
mean = paddle.fluid.layers.reduce_mean(input)
return mean
......@@ -181,7 +181,7 @@ class ElementwiseSubLayer(object):
"""
operation
"""
sub = paddle.elementwise_sub(x, y)
sub = paddle.fluid.layers.elementwise_sub(x, y)
return sub
......@@ -203,7 +203,7 @@ class ConstantLayer(object):
shape = list(shape)
input_shape = paddle.shape(input)
shape[0] = input_shape[0]
constant = paddle.fill_constant(shape, dtype, value)
constant = paddle.fluid.layers.fill_constant(shape, dtype, value)
return constant
......@@ -473,8 +473,8 @@ class BOW(paddle.nn.Layer):
right_emb = paddle.reshape(
right_emb, shape=[-1, self.seq_len, self.bow_dim])
bow_left = paddle.reduce_sum(left_emb, dim=1)
bow_right = paddle.reduce_sum(right_emb, dim=1)
bow_left = paddle.fluid.layers.reduce_sum(left_emb, dim=1)
bow_right = paddle.fluid.layers.reduce_sum(right_emb, dim=1)
softsign_layer = SoftsignLayer()
left_soft = softsign_layer.ops(bow_left)
right_soft = softsign_layer.ops(bow_right)
......
......@@ -64,9 +64,9 @@ def get_source_code(func):
class StaticCode1():
# TODO: Transform return statement
def dyfunc_with_if_else(x_v, label=None):
__return_1 = paddle.fill_constant(shape=[1], dtype='bool', value=False)
__return_0 = paddle.fill_constant(shape=[1], dtype='bool', value=False)
__return_value_init_0 = paddle.fill_constant(
__return_1 = paddle.fluid.layers.fill_constant(shape=[1], dtype='bool', value=False)
__return_0 = paddle.fluid.layers.fill_constant(shape=[1], dtype='bool', value=False)
__return_value_init_0 = paddle.fluid.layers.fill_constant(
shape=[1], dtype='float64', value=0.0)
__return_value_0 = __return_value_init_0
......@@ -84,7 +84,7 @@ class StaticCode1():
def true_fn_1(__return_0, __return_value_0, label, x_v):
loss = fluid.layers.cross_entropy(x_v, label)
__return_0 = paddle.fill_constant(
__return_0 = paddle.fluid.layers.fill_constant(
shape=[1], dtype='bool', value=True)
__return_value_0 = loss
return __return_0, __return_value_0
......@@ -98,7 +98,7 @@ class StaticCode1():
(__return_0, __return_value_0), (__return_0, __return_value_0)))
def true_fn_2(__return_1, __return_value_0, x_v):
__return_1 = paddle.fill_constant(
__return_1 = paddle.fluid.layers.fill_constant(
shape=[1], dtype='bool', value=True)
__return_value_0 = x_v
return __return_1, __return_value_0
......@@ -116,9 +116,9 @@ class StaticCode1():
class StaticCode2():
# TODO: Transform return statement
def dyfunc_with_if_else(x_v, label=None):
__return_3 = paddle.fill_constant(shape=[1], dtype='bool', value=False)
__return_2 = paddle.fill_constant(shape=[1], dtype='bool', value=False)
__return_value_init_1 = paddle.fill_constant(
__return_3 = paddle.fluid.layers.fill_constant(shape=[1], dtype='bool', value=False)
__return_2 = paddle.fluid.layers.fill_constant(shape=[1], dtype='bool', value=False)
__return_value_init_1 = paddle.fluid.layers.fill_constant(
shape=[1], dtype='float64', value=0.0)
__return_value_1 = __return_value_init_1
......@@ -136,7 +136,7 @@ class StaticCode2():
def true_fn_4(__return_2, __return_value_1, label, x_v):
loss = fluid.layers.cross_entropy(x_v, label)
__return_2 = paddle.fill_constant(
__return_2 = paddle.fluid.layers.fill_constant(
shape=[1], dtype='bool', value=True)
__return_value_1 = loss
return __return_2, __return_value_1
......@@ -150,7 +150,7 @@ class StaticCode2():
(__return_2, __return_value_1), (__return_2, __return_value_1)))
def true_fn_5(__return_3, __return_value_1, x_v):
__return_3 = paddle.fill_constant(
__return_3 = paddle.fluid.layers.fill_constant(
shape=[1], dtype='bool', value=True)
__return_value_1 = x_v
return __return_3, __return_value_1
......
......@@ -187,8 +187,8 @@ class PtbModel(paddle.nn.Layer):
loss = paddle.nn.functional.softmax_with_cross_entropy(
logits=projection, label=label, soft_label=False)
loss = paddle.reshape(loss, shape=[-1, self.num_steps])
loss = paddle.reduce_mean(loss, dim=[0])
loss = paddle.reduce_sum(loss)
loss = paddle.fluid.layers.reduce_mean(loss, dim=[0])
loss = paddle.fluid.layers.reduce_sum(loss)
return loss, last_hidden, last_cell
......
......@@ -153,7 +153,7 @@ class ResNet(paddle.nn.Layer):
self.conv = ConvBNLayer(
num_channels=3, num_filters=64, filter_size=7, stride=2, act='relu')
self.pool2d_max = paddle.nn.Pool2D(
self.pool2d_max = paddle.fluid.dygraph.Pool2D(
pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
self.bottleneck_block_list = []
......@@ -171,7 +171,7 @@ class ResNet(paddle.nn.Layer):
self.bottleneck_block_list.append(bottleneck_block)
shortcut = True
self.pool2d_avg = paddle.nn.Pool2D(
self.pool2d_avg = paddle.fluid.dygraph.Pool2D(
pool_size=7, pool_type='avg', global_pooling=True)
self.pool2d_avg_output = num_filters[len(num_filters) - 1] * 4 * 1 * 1
......
......@@ -51,24 +51,24 @@ class TestDataLayerNotCheck(unittest.TestCase):
class TestVariableTransFunc(unittest.TestCase):
def test_create_fill_constant_node(self):
node = create_fill_constant_node("a", 1.0)
source = "a = paddle.fill_constant(shape=[1], dtype='float64', value=1.0)"
source = "a = paddle.fluid.layers.fill_constant(shape=[1], dtype='float64', value=1.0)"
self.assertEqual(ast_to_source_code(node).strip(), source)
node = create_fill_constant_node("b", True)
source = "b = paddle.fill_constant(shape=[1], dtype='bool', value=True)"
source = "b = paddle.fluid.layers.fill_constant(shape=[1], dtype='bool', value=True)"
self.assertEqual(ast_to_source_code(node).strip(), source)
if six.PY2:
node = create_fill_constant_node("c", 214)
source = "c = paddle.fill_constant(shape=[1], dtype='int32', value=214)"
source = "c = paddle.fluid.layers.fill_constant(shape=[1], dtype='int32', value=214)"
self.assertEqual(ast_to_source_code(node).strip(), source)
node = create_fill_constant_node("d", long(10086))
source = "d = paddle.fill_constant(shape=[1], dtype='int64', value=10086)"
source = "d = paddle.fluid.layers.fill_constant(shape=[1], dtype='int64', value=10086)"
self.assertEqual(ast_to_source_code(node).strip(), source)
else:
node = create_fill_constant_node("c", 4293)
source = "c = paddle.fill_constant(shape=[1], dtype='int64', value=4293)"
source = "c = paddle.fluid.layers.fill_constant(shape=[1], dtype='int64', value=4293)"
self.assertEqual(ast_to_source_code(node).strip(), source)
self.assertIsNone(create_fill_constant_node("e", None))
......
......@@ -40,9 +40,9 @@ class SquaredMatSubFusePassTest(InferencePassTest):
matmul_ab_square = paddle.square(matmul_ab)
matmul_square_ab = paddle.matmul(data_a_square, data_b_square)
scale = paddle.fill_constant(shape=[1], value=0.5, dtype='float32')
scale = paddle.fluid.layers.fill_constant(shape=[1], value=0.5, dtype='float32')
sub_val = paddle.elementwise_sub(matmul_ab_square, matmul_square_ab)
sub_val = paddle.fluid.layers.elementwise_sub(matmul_ab_square, matmul_square_ab)
squared_mat_sub_out = fluid.layers.elementwise_mul(sub_val, scale)
self.feeds = {
......
......@@ -26,7 +26,7 @@ import paddle.fluid as fluid
import paddle.fluid.dygraph as dygraph
from paddle.fluid import core
from paddle.fluid.optimizer import SGDOptimizer
from paddle.nn import Conv2d, Pool2D, Linear, SyncBatchNorm
from paddle.nn import Conv2d, Linear, SyncBatchNorm
from paddle.fluid.dygraph.base import to_variable
from test_dist_base import runtime_main, TestParallelDyGraphRunnerBase
......
......@@ -70,10 +70,10 @@ class TestSimpleRNNCell(unittest.TestCase):
with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp):
x_data = paddle.data(
x_data = paddle.fluid.data(
"input", [-1, 16],
dtype=paddle.framework.get_default_dtype())
init_h = paddle.data(
init_h = paddle.fluid.data(
"init_h", [-1, 32],
dtype=paddle.framework.get_default_dtype())
y, h = rnn2(x_data, init_h)
......@@ -98,7 +98,7 @@ class TestSimpleRNNCell(unittest.TestCase):
with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp):
x_data = paddle.data(
x_data = paddle.fluid.data(
"input", [-1, 16],
dtype=paddle.framework.get_default_dtype())
y, h = rnn2(x_data)
......@@ -166,10 +166,10 @@ class TestGRUCell(unittest.TestCase):
with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp):
x_data = paddle.data(
x_data = paddle.fluid.data(
"input", [-1, 16],
dtype=paddle.framework.get_default_dtype())
init_h = paddle.data(
init_h = paddle.fluid.data(
"init_h", [-1, 32],
dtype=paddle.framework.get_default_dtype())
y, h = rnn2(x_data, init_h)
......@@ -194,7 +194,7 @@ class TestGRUCell(unittest.TestCase):
with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp):
x_data = paddle.data(
x_data = paddle.fluid.data(
"input", [-1, 16],
dtype=paddle.framework.get_default_dtype())
y, h = rnn2(x_data)
......@@ -263,13 +263,13 @@ class TestLSTMCell(unittest.TestCase):
with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp):
x_data = paddle.data(
x_data = paddle.fluid.data(
"input", [-1, 16],
dtype=paddle.framework.get_default_dtype())
init_h = paddle.data(
init_h = paddle.fluid.data(
"init_h", [-1, 32],
dtype=paddle.framework.get_default_dtype())
init_c = paddle.data(
init_c = paddle.fluid.data(
"init_c", [-1, 32],
dtype=paddle.framework.get_default_dtype())
y, (h, c) = rnn2(x_data, (init_h, init_c))
......@@ -295,7 +295,7 @@ class TestLSTMCell(unittest.TestCase):
with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp):
x_data = paddle.data(
x_data = paddle.fluid.data(
"input", [-1, 16],
dtype=paddle.framework.get_default_dtype())
y, (h, c) = rnn2(x_data)
......
......@@ -81,10 +81,10 @@ class TestSimpleRNN(unittest.TestCase):
with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp):
x_data = paddle.data(
x_data = paddle.fluid.data(
"input", [-1, -1, 16],
dtype=paddle.framework.get_default_dtype())
init_h = paddle.data(
init_h = paddle.fluid.data(
"init_h", [2 * self.num_directions, -1, 32],
dtype=paddle.framework.get_default_dtype())
y, h = rnn2(x_data, init_h)
......@@ -112,7 +112,7 @@ class TestSimpleRNN(unittest.TestCase):
with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp):
x_data = paddle.data(
x_data = paddle.fluid.data(
"input", [-1, -1, 16],
dtype=paddle.framework.get_default_dtype())
y, h = rnn2(x_data)
......@@ -142,10 +142,10 @@ class TestSimpleRNN(unittest.TestCase):
with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp):
x_data = paddle.data(
x_data = paddle.fluid.data(
"input", [-1, -1, 16],
dtype=paddle.framework.get_default_dtype())
seq_len = paddle.data("seq_len", [-1], dtype="int64")
seq_len = paddle.fluid.data("seq_len", [-1], dtype="int64")
mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype())
if self.time_major:
mask = paddle.transpose(mask, [1, 0])
......@@ -226,10 +226,10 @@ class TestGRU(unittest.TestCase):
with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp):
x_data = paddle.data(
x_data = paddle.fluid.data(
"input", [-1, -1, 16],
dtype=paddle.framework.get_default_dtype())
init_h = paddle.data(
init_h = paddle.fluid.data(
"init_h", [2 * self.num_directions, -1, 32],
dtype=paddle.framework.get_default_dtype())
y, h = rnn2(x_data, init_h)
......@@ -257,7 +257,7 @@ class TestGRU(unittest.TestCase):
with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp):
x_data = paddle.data(
x_data = paddle.fluid.data(
"input", [-1, -1, 16],
dtype=paddle.framework.get_default_dtype())
y, h = rnn2(x_data)
......@@ -287,10 +287,10 @@ class TestGRU(unittest.TestCase):
with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp):
x_data = paddle.data(
x_data = paddle.fluid.data(
"input", [-1, -1, 16],
dtype=paddle.framework.get_default_dtype())
seq_len = paddle.data("seq_len", [-1], dtype="int64")
seq_len = paddle.fluid.data("seq_len", [-1], dtype="int64")
mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype())
if self.time_major:
mask = paddle.transpose(mask, [1, 0])
......@@ -368,13 +368,13 @@ class TestLSTM(unittest.TestCase):
with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp):
x_data = paddle.data(
x_data = paddle.fluid.data(
"input", [-1, -1, 16],
dtype=paddle.framework.get_default_dtype())
init_h = paddle.data(
init_h = paddle.fluid.data(
"init_h", [2 * self.num_directions, -1, 32],
dtype=paddle.framework.get_default_dtype())
init_c = paddle.data(
init_c = paddle.fluid.data(
"init_c", [2 * self.num_directions, -1, 32],
dtype=paddle.framework.get_default_dtype())
y, (h, c) = rnn2(x_data, (init_h, init_c))
......@@ -403,7 +403,7 @@ class TestLSTM(unittest.TestCase):
with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp):
x_data = paddle.data(
x_data = paddle.fluid.data(
"input", [-1, -1, 16],
dtype=paddle.framework.get_default_dtype())
y, (h, c) = rnn2(x_data)
......@@ -434,10 +434,10 @@ class TestLSTM(unittest.TestCase):
with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp):
x_data = paddle.data(
x_data = paddle.fluid.data(
"input", [-1, -1, 16],
dtype=paddle.framework.get_default_dtype())
seq_len = paddle.data("seq_len", [-1], dtype="int64")
seq_len = paddle.fluid.data("seq_len", [-1], dtype="int64")
mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype())
if self.time_major:
mask = paddle.transpose(mask, [1, 0])
......
......@@ -143,7 +143,7 @@ class TestLogSigmoidAPI(unittest.TestCase):
def test_static_api(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [11, 17])
x = paddle.fluid.data('X', [11, 17])
out1 = F.log_sigmoid(x)
m = paddle.nn.LogSigmoid()
out2 = m(x)
......@@ -167,7 +167,7 @@ class TestLogSigmoidAPI(unittest.TestCase):
def test_fluid_api(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [11, 17])
x = paddle.fluid.data('X', [11, 17])
out = paddle.fluid.layers.logsigmoid(x)
exe = paddle.static.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
......@@ -180,10 +180,10 @@ class TestLogSigmoidAPI(unittest.TestCase):
# The input type must be Variable.
self.assertRaises(TypeError, F.log_sigmoid, 1)
# The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[11, 17], dtype='int32')
x_int32 = paddle.fluid.data(name='x_int32', shape=[11, 17], dtype='int32')
self.assertRaises(TypeError, F.log_sigmoid, x_int32)
# support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[11, 17], dtype='float16')
x_fp16 = paddle.fluid.data(name='x_fp16', shape=[11, 17], dtype='float16')
F.log_sigmoid(x_fp16)
......@@ -222,7 +222,7 @@ class TestTanhAPI(unittest.TestCase):
def test_static_api(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [10, 12], self.dtype)
x = paddle.fluid.data('X', [10, 12], self.dtype)
out1 = F.tanh(x)
th = paddle.nn.Tanh()
out2 = th(x)
......@@ -260,10 +260,10 @@ class TestTanhAPI(unittest.TestCase):
# The input type must be Variable.
self.assertRaises(TypeError, F.tanh, 1)
# The input dtype must be float16, float32.
x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32')
x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, F.tanh, x_int32)
# support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16')
x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
F.tanh(x_fp16)
......@@ -482,7 +482,7 @@ class TestTanhshrinkAPI(unittest.TestCase):
def test_static_api(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.x_np.shape, self.x_np.dtype)
x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.tanhshrink(x)
tanhshrink = paddle.nn.Tanhshrink()
out2 = tanhshrink(x)
......@@ -519,10 +519,10 @@ class TestTanhshrinkAPI(unittest.TestCase):
# The input type must be Variable.
self.assertRaises(TypeError, F.tanhshrink, 1)
# The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32')
x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, F.tanhshrink, x_int32)
# support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16')
x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
F.tanhshrink(x_fp16)
......@@ -572,7 +572,7 @@ class TestHardShrinkAPI(unittest.TestCase):
def test_static_api(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [10, 12])
x = paddle.fluid.data('X', [10, 12])
out1 = F.hardshrink(x)
hd = paddle.nn.Hardshrink()
out2 = hd(x)
......@@ -616,10 +616,10 @@ class TestHardShrinkAPI(unittest.TestCase):
# The input type must be Variable.
self.assertRaises(TypeError, F.hardshrink, 1)
# The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32')
x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, F.hardshrink, x_int32)
# support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16')
x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
F.hardshrink(x_fp16)
......@@ -642,7 +642,7 @@ class TestHardtanhAPI(unittest.TestCase):
def test_static_api(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [10, 12])
x = paddle.fluid.data('X', [10, 12])
out1 = F.hardtanh(x)
m = paddle.nn.Hardtanh()
out2 = m(x)
......@@ -676,10 +676,10 @@ class TestHardtanhAPI(unittest.TestCase):
# The input type must be Variable.
self.assertRaises(TypeError, F.hardtanh, 1)
# The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32')
x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, F.hardtanh, x_int32)
# support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16')
x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
F.hardtanh(x_fp16)
......@@ -722,7 +722,7 @@ class TestSoftshrinkAPI(unittest.TestCase):
def test_static_api(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.x_np.shape, self.x_np.dtype)
x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.softshrink(x, self.threshold)
softshrink = paddle.nn.Softshrink(self.threshold)
out2 = softshrink(x)
......@@ -759,13 +759,13 @@ class TestSoftshrinkAPI(unittest.TestCase):
# The input type must be Variable.
self.assertRaises(TypeError, F.softshrink, 1)
# The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32')
x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, F.softshrink, x_int32)
# The threshold must be no less than zero
x_fp32 = paddle.data(name='x_fp32', shape=[12, 10], dtype='float32')
x_fp32 = paddle.fluid.data(name='x_fp32', shape=[12, 10], dtype='float32')
self.assertRaises(ValueError, F.softshrink, x_fp32, -1.0)
# support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16')
x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
F.softshrink(x_fp16)
......@@ -983,7 +983,7 @@ class TestReluAPI(unittest.TestCase):
def test_static_api(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [10, 12])
x = paddle.fluid.data('X', [10, 12])
out1 = F.relu(x)
m = paddle.nn.ReLU()
out2 = m(x)
......@@ -1010,10 +1010,10 @@ class TestReluAPI(unittest.TestCase):
# The input type must be Variable.
self.assertRaises(TypeError, F.relu, 1)
# The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[10, 12], dtype='int32')
x_int32 = paddle.fluid.data(name='x_int32', shape=[10, 12], dtype='int32')
self.assertRaises(TypeError, F.relu, x_int32)
# support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[10, 12], dtype='float16')
x_fp16 = paddle.fluid.data(name='x_fp16', shape=[10, 12], dtype='float16')
F.relu(x_fp16)
......@@ -1075,7 +1075,7 @@ class TestLeakyReluAPI(unittest.TestCase):
def test_static_api(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [10, 12])
x = paddle.fluid.data('X', [10, 12])
out1 = F.leaky_relu(x)
m = paddle.nn.LeakyReLU()
out2 = m(x)
......@@ -1119,10 +1119,10 @@ class TestLeakyReluAPI(unittest.TestCase):
# The input type must be Variable.
self.assertRaises(TypeError, F.leaky_relu, 1)
# The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32')
x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, F.leaky_relu, x_int32)
# support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16')
x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
F.leaky_relu(x_fp16)
......@@ -1184,7 +1184,7 @@ class TestGELUAPI(unittest.TestCase):
def test_static_api(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [11, 17])
x = paddle.fluid.data('X', [11, 17])
out1 = F.gelu(x)
m = paddle.nn.GELU()
out2 = m(x)
......@@ -1218,10 +1218,10 @@ class TestGELUAPI(unittest.TestCase):
# The input type must be Variable.
self.assertRaises(TypeError, F.gelu, 1)
# The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[11, 17], dtype='int32')
x_int32 = paddle.fluid.data(name='x_int32', shape=[11, 17], dtype='int32')
self.assertRaises(TypeError, F.gelu, x_int32)
# support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[11, 17], dtype='float16')
x_fp16 = paddle.fluid.data(name='x_fp16', shape=[11, 17], dtype='float16')
F.gelu(x_fp16)
......@@ -1331,7 +1331,7 @@ class TestRelu6API(unittest.TestCase):
def test_static_api(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.x_np.shape, self.x_np.dtype)
x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.relu6(x)
relu6 = paddle.nn.ReLU6()
out2 = relu6(x)
......@@ -1368,10 +1368,10 @@ class TestRelu6API(unittest.TestCase):
# The input type must be Variable.
self.assertRaises(TypeError, F.relu6, 1)
# The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32')
x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, F.relu6, x_int32)
# support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16')
x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
F.relu6(x_fp16)
......@@ -1414,7 +1414,7 @@ class TestHardswishAPI(unittest.TestCase):
def test_static_api(self):
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.x_np.shape, self.x_np.dtype)
x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.hardswish(x)
m = paddle.nn.Hardswish()
out2 = m(x)
......@@ -1455,10 +1455,10 @@ class TestHardswishAPI(unittest.TestCase):
# The input type must be Variable.
self.assertRaises(TypeError, F.hardswish, 1)
# The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32')
x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, F.hardswish, x_int32)
# support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16')
x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
F.hardswish(x_fp16)
......@@ -1538,7 +1538,7 @@ class TestELUAPI(unittest.TestCase):
def test_static_api(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [10, 12])
x = paddle.fluid.data('X', [10, 12])
out1 = F.elu(x)
m = paddle.nn.ELU()
out2 = m(x)
......@@ -1572,10 +1572,10 @@ class TestELUAPI(unittest.TestCase):
# The input type must be Variable.
self.assertRaises(TypeError, F.elu, 1)
# The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[10, 12], dtype='int32')
x_int32 = paddle.fluid.data(name='x_int32', shape=[10, 12], dtype='int32')
self.assertRaises(TypeError, F.elu, x_int32)
# support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[10, 12], dtype='float16')
x_fp16 = paddle.fluid.data(name='x_fp16', shape=[10, 12], dtype='float16')
F.elu(x_fp16)
......@@ -1858,7 +1858,7 @@ class TestSoftplusAPI(unittest.TestCase):
def test_static_api(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.x_np.shape, self.x_np.dtype)
x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.softplus(x, self.beta, self.threshold)
softplus = paddle.nn.Softplus(self.beta, self.threshold)
out2 = softplus(x)
......@@ -1895,10 +1895,10 @@ class TestSoftplusAPI(unittest.TestCase):
# The input type must be Variable.
self.assertRaises(TypeError, F.softplus, 1)
# The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32')
x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, F.softplus, x_int32)
# support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16')
x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
F.softplus(x_fp16)
......@@ -1935,7 +1935,7 @@ class TestSoftsignAPI(unittest.TestCase):
def test_static_api(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.x_np.shape, self.x_np.dtype)
x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.softsign(x)
softsign = paddle.nn.Softsign()
out2 = softsign(x)
......@@ -1972,10 +1972,10 @@ class TestSoftsignAPI(unittest.TestCase):
# The input type must be Variable.
self.assertRaises(TypeError, F.softsign, 1)
# The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32')
x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, F.softsign, x_int32)
# support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16')
x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
F.softsign(x_fp16)
......@@ -2018,7 +2018,7 @@ class TestThresholdedReluAPI(unittest.TestCase):
def test_static_api(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.x_np.shape, self.x_np.dtype)
x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.thresholded_relu(x, self.threshold)
thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
out2 = thresholded_relu(x)
......@@ -2055,10 +2055,10 @@ class TestThresholdedReluAPI(unittest.TestCase):
# The input type must be Variable.
self.assertRaises(TypeError, F.thresholded_relu, 1)
# The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32')
x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, F.thresholded_relu, x_int32)
# support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16')
x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
F.thresholded_relu(x_fp16)
......@@ -2113,7 +2113,7 @@ class TestHardsigmoidAPI(unittest.TestCase):
def test_static_api(self):
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.x_np.shape, self.x_np.dtype)
x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.hardsigmoid(x)
m = paddle.nn.Hardsigmoid()
out2 = m(x)
......@@ -2154,10 +2154,10 @@ class TestHardsigmoidAPI(unittest.TestCase):
# The input type must be Variable.
self.assertRaises(TypeError, F.hardsigmoid, 1)
# The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32')
x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, F.hardsigmoid, x_int32)
# support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16')
x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
F.hardsigmoid(x_fp16)
......@@ -2195,7 +2195,7 @@ class TestSwishAPI(unittest.TestCase):
def test_static_api(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.x_np.shape, self.x_np.dtype)
x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.swish(x)
swish = paddle.nn.Swish()
out2 = swish(x)
......@@ -2232,10 +2232,10 @@ class TestSwishAPI(unittest.TestCase):
# The input type must be Variable.
self.assertRaises(TypeError, F.swish, 1)
# The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32')
x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, F.swish, x_int32)
# support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16')
x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
F.swish(x_fp16)
......
......@@ -499,7 +499,7 @@ class TestAdamOpV2(unittest.TestCase):
cur_lr = adam.get_lr()
assert (lr == cur_lr)
with self.assertRaises(TypeError):
lr_var = paddle.create_global_var(
lr_var = paddle.fluid.layers.create_global_var(
shape=[1], value=lr, dtype='float32')
adam.set_lr(lr_var)
......
......@@ -110,7 +110,7 @@ class TestAdaptiveAvgPool2dAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.enable_static()
x = paddle.data(name="x", shape=[2, 3, 7, 7], dtype="float32")
x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32")
out_1 = paddle.nn.functional.adaptive_avg_pool2d(
x=x, output_size=[3, 3])
......@@ -205,7 +205,7 @@ class TestAdaptiveAvgPool2dClassAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.enable_static()
x = paddle.data(name="x", shape=[2, 3, 7, 7], dtype="float32")
x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32")
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d(output_size=[3, 3])
out_1 = adaptive_avg_pool(x=x)
......
......@@ -125,7 +125,7 @@ class TestAdaptiveAvgPool3dAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.enable_static()
x = paddle.data(name="x", shape=[2, 3, 5, 7, 7], dtype="float32")
x = paddle.fluid.data(name="x", shape=[2, 3, 5, 7, 7], dtype="float32")
out_1 = paddle.nn.functional.adaptive_avg_pool3d(
x=x, output_size=[3, 3, 3])
......@@ -220,7 +220,7 @@ class TestAdaptiveAvgPool3dClassAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.enable_static()
x = paddle.data(name="x", shape=[2, 3, 5, 7, 7], dtype="float32")
x = paddle.fluid.data(name="x", shape=[2, 3, 5, 7, 7], dtype="float32")
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3d(
output_size=[3, 3, 3])
......
......@@ -110,7 +110,7 @@ class TestAdaptiveMaxPool2dAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.enable_static()
x = paddle.data(name="x", shape=[2, 3, 7, 7], dtype="float32")
x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32")
out_1 = paddle.nn.functional.adaptive_max_pool2d(
x=x, output_size=[3, 3])
......@@ -200,7 +200,7 @@ class TestAdaptiveMaxPool2dClassAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.enable_static()
x = paddle.data(name="x", shape=[2, 3, 7, 7], dtype="float32")
x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32")
adaptive_max_pool = paddle.nn.AdaptiveMaxPool2d(output_size=[3, 3])
out_1 = adaptive_max_pool(x=x)
......
......@@ -125,7 +125,7 @@ class TestAdaptiveMaxPool3dAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.enable_static()
x = paddle.data(name="x", shape=[2, 3, 5, 7, 7], dtype="float32")
x = paddle.fluid.data(name="x", shape=[2, 3, 5, 7, 7], dtype="float32")
out_1 = paddle.nn.functional.adaptive_max_pool3d(
x=x, output_size=[3, 3, 3])
......@@ -215,7 +215,7 @@ class TestAdaptiveMaxPool3dClassAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.enable_static()
x = paddle.data(name="x", shape=[2, 3, 5, 7, 7], dtype="float32")
x = paddle.fluid.data(name="x", shape=[2, 3, 5, 7, 7], dtype="float32")
adaptive_max_pool = paddle.nn.AdaptiveMaxPool3d(
output_size=[3, 3, 3])
......
......@@ -18,7 +18,6 @@ import paddle.fluid.core as core
from op_test import OpTest
import paddle.fluid as fluid
import paddle
import paddle.nn.functional as F
from paddle.fluid import Program, program_guard
......@@ -157,7 +156,7 @@ class TestAddPositionEncodingOpDygraph(unittest.TestCase):
def test_dygraph(self):
paddle.disable_static()
tensor = np.random.randn(16, 32, 64)
position_tensor = F.add_position_encoding(
position_tensor = paddle.fluid.layers.add_position_encoding(
input=paddle.to_tensor(tensor), alpha=1.0, beta=1.0).numpy()
paddle.enable_static()
......
......@@ -37,7 +37,7 @@ class TestAddcmulLayer(unittest.TestCase):
tensor1 = fluid.data(name="tensor1", dtype=self._dtype, shape=[100])
tensor2 = fluid.data(
name="tensor2", dtype=self._dtype, shape=[3, 100])
out = paddle.addcmul(input, tensor1, tensor2, value)
out = paddle.tensor.math.addcmul(input, tensor1, tensor2, value)
exe = fluid.Executor(self._place)
return exe.run(feed={
......@@ -53,7 +53,7 @@ class TestAddcmulLayer(unittest.TestCase):
input = fluid.dygraph.to_variable(self.input)
tensor1 = fluid.dygraph.to_variable(self.tensor1)
tensor2 = fluid.dygraph.to_variable(self.tensor2)
out = paddle.addcmul(input, tensor1, tensor2, value)
out = paddle.tensor.math.addcmul(input, tensor1, tensor2, value)
return out.numpy()
def numpy(self, value=1.0):
......@@ -85,7 +85,7 @@ class TestAddcmul(unittest.TestCase):
tensor1 = fluid.data(name='t1', shape=data_shape, dtype='float32')
tensor2 = fluid.data(name='t2', shape=data_shape, dtype='float32')
out = paddle.addcmul(input, tensor1, tensor2)
out = paddle.tensor.math.addcmul(input, tensor1, tensor2)
self.assertEqual(out.shape, input.shape)
def test_addcmul_with_broadcast0(self):
......@@ -95,7 +95,7 @@ class TestAddcmul(unittest.TestCase):
tensor1 = fluid.data(name='t1', shape=[3, 100], dtype='float32')
tensor2 = fluid.data(name='t2', shape=[100], dtype='float32')
out = paddle.addcmul(input, tensor1, tensor2)
out = paddle.tensor.math.addcmul(input, tensor1, tensor2)
self.assertEqual(out.shape, input.shape)
def test_addcmul_with_broadcast1(self):
......@@ -105,7 +105,7 @@ class TestAddcmul(unittest.TestCase):
tensor1 = fluid.data(name='t1', shape=[100], dtype='float32')
tensor2 = fluid.data(name='t2', shape=[4, 100], dtype='float32')
out = paddle.addcmul(input, tensor1, tensor2)
out = paddle.tensor.math.addcmul(input, tensor1, tensor2)
self.assertEqual(out.shape, input.shape)
def test_addcmul_with_broadcast2(self):
......@@ -115,7 +115,7 @@ class TestAddcmul(unittest.TestCase):
tensor1 = fluid.data(name='t1', shape=[100], dtype='float32')
tensor2 = fluid.data(name='t2', shape=[100], dtype='float32')
out = paddle.addcmul(input, tensor1, tensor2)
out = paddle.tensor.math.addcmul(input, tensor1, tensor2)
self.assertEqual(out.shape, input.shape)
......@@ -129,7 +129,7 @@ class InvalidInputTest(unittest.TestCase):
name='tensor1', shape=[20, 20], dtype='float32')
tensor2 = fluid.data(
name='tensor2', shape=[20, 20], dtype='float32')
out = paddle.addcmul(input, tensor1, tensor2)
out = paddle.tensor.math.addcmul(input, tensor1, tensor2)
self.assertRaises(TypeError, test_invalid_input)
......@@ -141,7 +141,7 @@ class InvalidInputTest(unittest.TestCase):
tensor1 = [20, 20]
tensor2 = fluid.data(
name='tensor2', shape=[20, 20], dtype='float32')
out = paddle.addcmul(input, tensor1, tensor2)
out = paddle.tensor.math.addcmul(input, tensor1, tensor2)
self.assertRaises(TypeError, test_invalid_tensor1)
......@@ -153,7 +153,7 @@ class InvalidInputTest(unittest.TestCase):
tensor1 = fluid.data(
name='tensor1', shape=[20, 20], dtype='float32')
tensor2 = [20, 20]
out = paddle.addcmul(input, tensor1, tensor2)
out = paddle.tensor.math.addcmul(input, tensor1, tensor2)
self.assertRaises(TypeError, test_invalid_tensor2)
......@@ -166,7 +166,7 @@ class InvalidInputTest(unittest.TestCase):
name='tensor1', shape=[20, 20], dtype='float32')
tensor2 = fluid.data(
name='tensor2', shape=[20, 20], dtype='float32')
out = paddle.addcmul(input, tensor1, tensor2, value=1)
out = paddle.tensor.math.addcmul(input, tensor1, tensor2, value=1)
self.assertRaises(TypeError, test_invalid_value_int)
......@@ -178,7 +178,7 @@ class InvalidInputTest(unittest.TestCase):
name='tensor1', shape=[20, 20], dtype='int32')
tensor2 = fluid.data(
name='tensor2', shape=[20, 20], dtype='int32')
out = paddle.addcmul(input, tensor1, tensor2, value=1.0)
out = paddle.tensor.math.addcmul(input, tensor1, tensor2, value=1.0)
self.assertRaises(TypeError, test_invalid_value_float)
......
......@@ -95,8 +95,8 @@ class TestAllcloseError(unittest.TestCase):
def test_x_dtype():
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
x = paddle.data(name='x', shape=[10, 10], dtype='float16')
y = paddle.data(name='y', shape=[10, 10], dtype='float64')
x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float16')
y = paddle.fluid.data(name='y', shape=[10, 10], dtype='float64')
result = paddle.allclose(x, y)
self.assertRaises(TypeError, test_x_dtype)
......@@ -104,15 +104,15 @@ class TestAllcloseError(unittest.TestCase):
def test_y_dtype():
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
x = paddle.data(name='x', shape=[10, 10], dtype='float64')
y = paddle.data(name='y', shape=[10, 10], dtype='int32')
x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64')
y = paddle.fluid.data(name='y', shape=[10, 10], dtype='int32')
result = paddle.allclose(x, y)
self.assertRaises(TypeError, test_y_dtype)
def test_attr(self):
x = paddle.data(name='x', shape=[10, 10], dtype='float64')
y = paddle.data(name='y', shape=[10, 10], dtype='float64')
x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64')
y = paddle.fluid.data(name='y', shape=[10, 10], dtype='float64')
def test_rtol():
result = paddle.allclose(x, y, rtol=True)
......
......@@ -27,10 +27,10 @@ def test_static_layer(place,
prog = paddle.static.Program()
startup_prog = paddle.static.Program()
with paddle.static.program_guard(prog, startup_prog):
input = paddle.data(name='input', shape=input_np.shape, dtype='float64')
label = paddle.data(name='label', shape=label_np.shape, dtype='float64')
input = paddle.fluid.data(name='input', shape=input_np.shape, dtype='float64')
label = paddle.fluid.data(name='label', shape=label_np.shape, dtype='float64')
if weight_np is not None:
weight = paddle.data(
weight = paddle.fluid.data(
name='weight', shape=weight_np.shape, dtype='float64')
bce_loss = paddle.nn.loss.BCELoss(
weight=weight, reduction=reduction)
......@@ -58,10 +58,10 @@ def test_static_functional(place,
prog = paddle.static.Program()
startup_prog = paddle.static.Program()
with paddle.static.program_guard(prog, startup_prog):
input = paddle.data(name='input', shape=input_np.shape, dtype='float64')
label = paddle.data(name='label', shape=label_np.shape, dtype='float64')
input = paddle.fluid.data(name='input', shape=input_np.shape, dtype='float64')
label = paddle.fluid.data(name='label', shape=label_np.shape, dtype='float64')
if weight_np is not None:
weight = paddle.data(
weight = paddle.fluid.data(
name='weight', shape=weight_np.shape, dtype='float64')
res = paddle.nn.functional.binary_cross_entropy(
input, label, weight=weight, reduction=reduction)
......
......@@ -48,18 +48,18 @@ def test_static(place,
prog = paddle.static.Program()
startup_prog = paddle.static.Program()
with paddle.static.program_guard(prog, startup_prog):
logit = paddle.data(name='logit', shape=logit_np.shape, dtype='float64')
label = paddle.data(name='label', shape=label_np.shape, dtype='float64')
logit = paddle.fluid.data(name='logit', shape=logit_np.shape, dtype='float64')
label = paddle.fluid.data(name='label', shape=label_np.shape, dtype='float64')
feed_dict = {"logit": logit_np, "label": label_np}
pos_weight = None
weight = None
if pos_weight_np is not None:
pos_weight = paddle.data(
pos_weight = paddle.fluid.data(
name='pos_weight', shape=pos_weight_np.shape, dtype='float64')
feed_dict["pos_weight"] = pos_weight_np
if weight_np is not None:
weight = paddle.data(
weight = paddle.fluid.data(
name='weight', shape=weight_np.shape, dtype='float64')
feed_dict["weight"] = weight_np
if functional:
......
......@@ -27,28 +27,28 @@ class TestChunkOpError(unittest.TestCase):
with program_guard(Program(), Program()):
# The type of axis in chunk_op should be int or Variable.
def test_axis_type():
x1 = paddle.data(shape=[4], dtype='float16', name='x3')
x1 = paddle.fluid.data(shape=[4], dtype='float16', name='x3')
paddle.chunk(x=x1, chunks=2, axis=3.2)
self.assertRaises(TypeError, test_axis_type)
# The type of axis in chunk op should be int or Variable.
def test_axis_variable_type():
x2 = paddle.data(shape=[4], dtype='float16', name='x9')
x3 = paddle.data(shape=[1], dtype='float16', name='x10')
x2 = paddle.fluid.data(shape=[4], dtype='float16', name='x9')
x3 = paddle.fluid.data(shape=[1], dtype='float16', name='x10')
paddle.chunk(input=x2, chunks=2, axis=x3)
self.assertRaises(TypeError, test_axis_variable_type)
# The type of num_or_sections in chunk_op should be int, tuple or list.
def test_chunks_type():
x4 = paddle.data(shape=[4], dtype='float16', name='x4')
x4 = paddle.fluid.data(shape=[4], dtype='float16', name='x4')
paddle.chunk(input=x4, chunks=2.1, axis=3)
self.assertRaises(TypeError, test_chunks_type)
def test_axis_type_tensor():
x5 = paddle.data(shape=[4], dtype='float16', name='x6')
x5 = paddle.fluid.data(shape=[4], dtype='float16', name='x6')
paddle.chunk(input=x5, chunks=2, axis=3.2)
self.assertRaises(TypeError, test_axis_type_tensor)
......@@ -57,8 +57,8 @@ class TestChunkOpError(unittest.TestCase):
class API_TestChunk(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = paddle.data('data1', shape=[4, 6, 6], dtype='float64')
data2 = paddle.data('data2', shape=[1], dtype='int32')
data1 = paddle.fluid.data('data1', shape=[4, 6, 6], dtype='float64')
data2 = paddle.fluid.data('data2', shape=[1], dtype='int32')
x0, x1, x2 = paddle.chunk(data1, chunks=3, axis=data2)
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
......@@ -76,7 +76,7 @@ class API_TestChunk(unittest.TestCase):
class API_TestChunk1(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = paddle.data('data1', shape=[4, 6, 6], dtype='float64')
data1 = paddle.fluid.data('data1', shape=[4, 6, 6], dtype='float64')
x0, x1, x2 = paddle.chunk(data1, chunks=3, axis=2)
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
......
......@@ -253,16 +253,16 @@ class TestConcatAPI(unittest.TestCase):
assert np.array_equal(res_3, np.concatenate((input_2, input_3), axis=1))
def test_api(self):
x_1 = paddle.data(shape=[None, 1, 4, 5], dtype='int32', name='x_1')
x_1 = paddle.fluid.data(shape=[None, 1, 4, 5], dtype='int32', name='x_1')
paddle.concat([x_1, x_1], 0)
input_2 = np.random.random([2, 1, 4, 5]).astype("int32")
input_3 = np.random.random([2, 2, 4, 5]).astype("int32")
x_2 = fluid.data(shape=[2, 1, 4, 5], dtype='int32', name='x_2')
x_3 = fluid.data(shape=[2, 2, 4, 5], dtype='int32', name='x_3')
positive_1_int32 = paddle.fill_constant([1], "int32", 1)
positive_1_int64 = paddle.fill_constant([1], "int64", 1)
negative_int64 = paddle.fill_constant([1], "int64", -3)
positive_1_int32 = paddle.fluid.layers.fill_constant([1], "int32", 1)
positive_1_int64 = paddle.fluid.layers.fill_constant([1], "int64", 1)
negative_int64 = paddle.fluid.layers.fill_constant([1], "int64", -3)
out_1 = paddle.concat(x=[x_2, x_3], axis=1)
out_2 = paddle.concat(x=[x_2, x_3], axis=positive_1_int32)
out_3 = paddle.concat(x=[x_2, x_3], axis=positive_1_int64)
......@@ -305,8 +305,8 @@ class TestConcatAPI(unittest.TestCase):
np.array([[-1]]), [[1]], fluid.CPUPlace())
self.assertRaises(TypeError, paddle.concat, [x2])
# The input dtype of concat_op must be float16, float32, float64, int32, int64.
x4 = paddle.data(shape=[4], dtype='uint8', name='x4')
x5 = paddle.data(shape=[4], dtype='uint8', name='x5')
x4 = paddle.fluid.data(shape=[4], dtype='uint8', name='x4')
x5 = paddle.fluid.data(shape=[4], dtype='uint8', name='x5')
self.assertRaises(TypeError, fluid.layers.concat, [x4, x5])
# The type of axis in concat_op should be int or Variable.
......
......@@ -48,8 +48,8 @@ class TestCosineSimilarityAPI(unittest.TestCase):
np_x1 = np.random.rand(*shape).astype(np.float32)
np_x2 = np.random.rand(*shape).astype(np.float32)
x1 = paddle.data(name="x1", shape=shape)
x2 = paddle.data(name="x2", shape=shape)
x1 = paddle.fluid.data(name="x1", shape=shape)
x2 = paddle.fluid.data(name="x2", shape=shape)
result = F.cosine_similarity(x1, x2, axis=axis, eps=eps)
exe = Executor(place)
fetches = exe.run(default_main_program(),
......
......@@ -172,11 +172,11 @@ class TestDiagV2API(unittest.TestCase):
self.assertTrue(np.allclose(y.numpy(), self.expected11))
def run_static(self, use_gpu=False):
x = paddle.data(name='input', shape=[10, 10], dtype='float32')
x2 = paddle.data(name='input2', shape=[100], dtype='float64')
x3 = paddle.data(name='input3', shape=[100], dtype='int64')
x4 = paddle.data(name='input4', shape=[2000, 2000], dtype='float32')
x5 = paddle.data(name='input5', shape=[2000], dtype='float32')
x = paddle.fluid.data(name='input', shape=[10, 10], dtype='float32')
x2 = paddle.fluid.data(name='input2', shape=[100], dtype='float64')
x3 = paddle.fluid.data(name='input3', shape=[100], dtype='int64')
x4 = paddle.fluid.data(name='input4', shape=[2000, 2000], dtype='float32')
x5 = paddle.fluid.data(name='input5', shape=[2000], dtype='float32')
result0 = paddle.diag(x)
result1 = paddle.diag(x, offset=1)
result2 = paddle.diag(x, offset=-1)
......
......@@ -37,8 +37,7 @@ class TestDirectory(unittest.TestCase):
new_directory = [
'paddle.enable_static', 'paddle.disable_static',
'paddle.in_dynamic_mode', 'paddle.to_tensor', 'paddle.grad',
'paddle.no_grad', 'paddle.save', 'paddle.load',
'paddle.static.save', 'paddle.static.load',
'paddle.no_grad', 'paddle.static.save', 'paddle.static.load',
'paddle.distributed.ParallelEnv',
'paddle.distributed.prepare_context', 'paddle.DataParallel',
'paddle.jit', 'paddle.jit.TracedLayer', 'paddle.jit.to_static',
......
......@@ -170,7 +170,7 @@ class TestFlatten2OpError(unittest.TestCase):
x2 = np.arange(image_shape[0] * image_shape[1] * image_shape[2] *
image_shape[3]).reshape(image_shape) / 100.
x2 = x2.astype('float16')
x2_var = paddle.data(name='x2', shape=[3, 2, 4, 5], dtype='float16')
x2_var = paddle.fluid.data(name='x2', shape=[3, 2, 4, 5], dtype='float16')
paddle.flatten(x2_var)
self.assertRaises(TypeError, test_type)
......
......@@ -31,7 +31,7 @@ class TestFullOp(unittest.TestCase):
train_program = Program()
with program_guard(train_program, startup_program):
fill_value = 2.0
input = paddle.data(name='input', dtype='float32', shape=[2, 3])
input = paddle.fluid.data(name='input', dtype='float32', shape=[2, 3])
output = paddle.full_like(input, fill_value)
output_dtype = paddle.full_like(input, fill_value, dtype='float32')
......@@ -67,7 +67,7 @@ class TestFullOpError(unittest.TestCase):
with program_guard(Program(), Program()):
#for ci coverage
input_data = paddle.data(
input_data = paddle.fluid.data(
name='input', dtype='float32', shape=[2, 3])
output = paddle.full_like(input_data, 2.0)
......
......@@ -192,9 +192,9 @@ class TestGatherNdError(unittest.TestCase):
paddle.static.Program()):
shape = [8, 9, 6]
x = paddle.data(shape=shape, dtype='float32', name='x')
index = paddle.data(shape=shape, dtype='bool', name='index')
index_float = paddle.data(
x = paddle.fluid.data(shape=shape, dtype='float32', name='x')
index = paddle.fluid.data(shape=shape, dtype='bool', name='index')
index_float = paddle.fluid.data(
shape=shape, dtype='float32', name='index_float')
np_x = np.random.random(shape).astype('float32')
np_index = np.array(np.random.randint(2, size=shape, dtype=bool))
......
......@@ -202,9 +202,9 @@ class API_TestGather(unittest.TestCase):
def test_out2(self):
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
x = paddle.data('x', shape=[-1, 2], dtype='float64')
index = paddle.data('index', shape=[-1, 1], dtype='int32')
axis = paddle.data('axis', shape=[1], dtype='int32')
x = paddle.fluid.data('x', shape=[-1, 2], dtype='float64')
index = paddle.fluid.data('index', shape=[-1, 1], dtype='int32')
axis = paddle.fluid.data('axis', shape=[1], dtype='int32')
out = paddle.gather(x, index, axis)
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
......@@ -252,10 +252,10 @@ class TestGathertError(unittest.TestCase):
paddle.static.Program()):
shape = [8, 9, 6]
x = paddle.data(shape=shape, dtype='int8', name='x')
axis = paddle.data(shape=[1], dtype='float32', name='axis')
index = paddle.data(shape=shape, dtype='int32', name='index')
index_float = paddle.data(
x = paddle.fluid.data(shape=shape, dtype='int8', name='x')
axis = paddle.fluid.data(shape=[1], dtype='float32', name='axis')
index = paddle.fluid.data(shape=shape, dtype='int32', name='index')
index_float = paddle.fluid.data(
shape=shape, dtype='float32', name='index_float')
def test_x_type():
......
......@@ -73,7 +73,7 @@ class TestHistogramOpError(unittest.TestCase):
"""Test bins should be greater than or equal to 1."""
def net_func():
input_value = paddle.fill_constant(
input_value = paddle.fluid.layers.fill_constant(
shape=[3, 4], dtype='float32', value=3.0)
paddle.histogram(input=input_value, bins=-1, min=1, max=5)
......@@ -84,7 +84,7 @@ class TestHistogramOpError(unittest.TestCase):
"""Test max must be larger or equal to min."""
def net_func():
input_value = paddle.fill_constant(
input_value = paddle.fluid.layers.fill_constant(
shape=[3, 4], dtype='float32', value=3.0)
paddle.histogram(input=input_value, bins=1, min=5, max=1)
......@@ -95,7 +95,7 @@ class TestHistogramOpError(unittest.TestCase):
"""Test range of min, max is not finite"""
def net_func():
input_value = paddle.fill_constant(
input_value = paddle.fluid.layers.fill_constant(
shape=[3, 4], dtype='float32', value=3.0)
paddle.histogram(input=input_value, bins=1, min=-np.inf, max=5)
......
......@@ -31,11 +31,11 @@ class LeNetDygraph(fluid.dygraph.Layer):
nn.Conv2d(
1, 6, 3, stride=1, padding=1),
nn.ReLU(),
nn.Pool2D(2, 'max', 2),
paddle.fluid.dygraph.Pool2D(2, 'max', 2),
nn.Conv2d(
6, 16, 5, stride=1, padding=0),
nn.ReLU(),
nn.Pool2D(2, 'max', 2))
paddle.fluid.dygraph.Pool2D(2, 'max', 2))
if num_classes > 0:
self.fc = nn.Sequential(
......@@ -54,17 +54,17 @@ class LeNetDygraph(fluid.dygraph.Layer):
def init_weights(layer):
if type(layer) == nn.Linear:
new_weight = paddle.fill_constant(
new_weight = paddle.fluid.layers.fill_constant(
layer.weight.shape, layer.weight.dtype, value=0.9)
layer.weight.set_value(new_weight)
new_bias = paddle.fill_constant(
new_bias = paddle.fluid.layers.fill_constant(
layer.bias.shape, layer.bias.dtype, value=-0.1)
layer.bias.set_value(new_bias)
elif type(layer) == nn.Conv2d:
new_weight = paddle.fill_constant(
new_weight = paddle.fluid.layers.fill_constant(
layer.weight.shape, layer.weight.dtype, value=0.7)
layer.weight.set_value(new_weight)
new_bias = paddle.fill_constant(
new_bias = paddle.fluid.layers.fill_constant(
layer.bias.shape, layer.bias.dtype, value=-0.2)
layer.bias.set_value(new_bias)
......
......@@ -30,11 +30,11 @@ class LeNetDygraph(fluid.dygraph.Layer):
nn.Conv2d(
1, 6, 3, stride=1, padding=1),
nn.ReLU(),
nn.Pool2D(2, 'max', 2),
paddle.fluid.dygraph.Pool2D(2, 'max', 2),
nn.Conv2d(
6, 16, 5, stride=1, padding=0),
nn.ReLU(),
nn.Pool2D(2, 'max', 2))
paddle.fluid.dygraph.Pool2D(2, 'max', 2))
def forward(self, inputs):
x = self.features(inputs)
......
......@@ -135,10 +135,10 @@ class BadInputTest(unittest.TestCase):
with fluid.dygraph.guard():
data = paddle.zeros([2, 3])
result = paddle.has_inf(data)
result = paddle.fluid.layers.has_inf(data)
expect_value = np.array([False])
self.assertEqual((result.numpy() == expect_value).all(), True)
result = paddle.has_nan(data)
result = paddle.fluid.layers.has_nan(data)
self.assertEqual((result.numpy() == expect_value).all(), True)
......
......@@ -27,7 +27,7 @@ def run_static(x_np, dtype, op_str, use_gpu=False):
place = paddle.CUDAPlace(0)
exe = fluid.Executor(place)
with fluid.program_guard(main_program, startup_program):
x = paddle.data(name='x', shape=x_np.shape, dtype=dtype)
x = paddle.fluid.data(name='x', shape=x_np.shape, dtype=dtype)
res = getattr(paddle.tensor, op_str)(x)
exe.run(startup_program)
static_result = exe.run(main_program,
......
......@@ -44,8 +44,8 @@ class TestFunctionalL1Loss(unittest.TestCase):
self.assertTrue(dy_result.shape, [10, 10, 5])
def run_static(self, use_gpu=False):
input = paddle.data(name='input', shape=[10, 10, 5], dtype='float32')
label = paddle.data(name='label', shape=[10, 10, 5], dtype='float32')
input = paddle.fluid.data(name='input', shape=[10, 10, 5], dtype='float32')
label = paddle.fluid.data(name='label', shape=[10, 10, 5], dtype='float32')
result0 = paddle.nn.functional.l1_loss(input, label)
result1 = paddle.nn.functional.l1_loss(input, label, reduction='sum')
result2 = paddle.nn.functional.l1_loss(input, label, reduction='none')
......@@ -90,9 +90,9 @@ class TestFunctionalL1Loss(unittest.TestCase):
# test case the raise message
def test_errors(self):
def test_value_error():
input = paddle.data(
input = paddle.fluid.data(
name='input', shape=[10, 10, 5], dtype='float32')
label = paddle.data(
label = paddle.fluid.data(
name='label', shape=[10, 10, 5], dtype='float32')
loss = paddle.nn.functional.l1_loss(
input, label, reduction='reduce_mean')
......@@ -127,8 +127,8 @@ class TestClassL1Loss(unittest.TestCase):
self.assertTrue(dy_result.shape, [10, 10, 5])
def run_static(self, use_gpu=False):
input = paddle.data(name='input', shape=[10, 10, 5], dtype='float32')
label = paddle.data(name='label', shape=[10, 10, 5], dtype='float32')
input = paddle.fluid.data(name='input', shape=[10, 10, 5], dtype='float32')
label = paddle.fluid.data(name='label', shape=[10, 10, 5], dtype='float32')
l1_loss = paddle.nn.loss.L1Loss()
result0 = l1_loss(input, label)
l1_loss = paddle.nn.loss.L1Loss(reduction='sum')
......
......@@ -327,7 +327,7 @@ class TestLayer(LayerTest):
with self.dynamic_graph():
t = np.ones([3, 3, 5, 5], dtype='float32')
my_pad2d = paddle.nn.Pad2D(paddings=1)
my_pad2d = paddle.nn.layer.Pad2D(paddings=1)
dy_ret = my_pad2d(base.to_variable(t))
dy_ret_value = dy_ret.numpy()
......
......@@ -88,7 +88,7 @@ class TestNNLogSoftmaxAPI(unittest.TestCase):
logsoftmax = paddle.nn.LogSoftmax(axis)
# test static api
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data(name='x', shape=self.x_shape)
x = paddle.fluid.data(name='x', shape=self.x_shape)
y = logsoftmax(x)
exe = paddle.static.Executor(self.place)
out = exe.run(feed={'x': self.x}, fetch_list=[y])
......@@ -120,7 +120,7 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase):
x = x.astype(dtype)
ref_out = np.apply_along_axis(ref_log_softmax, axis, x)
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data(name='x', shape=self.x_shape)
x = paddle.fluid.data(name='x', shape=self.x_shape)
y = F.log_softmax(x, axis, dtype)
exe = paddle.static.Executor(self.place)
out = exe.run(feed={'x': self.x}, fetch_list=[y])
......@@ -139,10 +139,10 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase):
def test_errors(self):
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data(name='X1', shape=[100], dtype='int32')
x = paddle.fluid.data(name='X1', shape=[100], dtype='int32')
self.assertRaises(TypeError, F.log_softmax, x)
x = paddle.data(name='X2', shape=[100], dtype='float32')
x = paddle.fluid.data(name='X2', shape=[100], dtype='float32')
self.assertRaises(TypeError, F.log_softmax, x, dtype='int32')
......
......@@ -90,7 +90,7 @@ class TestLogsumexpError(unittest.TestCase):
def test_errors(self):
with paddle.static.program_guard(paddle.static.Program()):
self.assertRaises(TypeError, paddle.logsumexp, 1)
x1 = paddle.data(name='x1', shape=[120], dtype="int32")
x1 = paddle.fluid.data(name='x1', shape=[120], dtype="int32")
self.assertRaises(TypeError, paddle.logsumexp, x1)
......@@ -104,7 +104,7 @@ class TestLogsumexpAPI(unittest.TestCase):
def api_case(self, axis=None, keepdim=False):
out_ref = ref_logsumexp(self.x, axis, keepdim)
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.shape)
x = paddle.fluid.data('X', self.shape)
out = paddle.logsumexp(x, axis, keepdim)
exe = paddle.static.Executor(self.place)
res = exe.run(feed={'X': self.x}, fetch_list=[out])
......
......@@ -414,7 +414,7 @@ class TestLRScheduler(unittest.TestCase):
for batch_id in range(2):
x = paddle.to_tensor(x)
out = linear(x)
loss = paddle.reduce_mean(out)
loss = paddle.fluid.layers.reduce_mean(out)
loss.backward()
adam.step()
adam.clear_grad()
......
......@@ -74,8 +74,8 @@ class TestMaskedSelectAPI(unittest.TestCase):
def test_static_mode(self):
shape = [8, 9, 6]
x = paddle.data(shape=shape, dtype='float32', name='x')
mask = paddle.data(shape=shape, dtype='bool', name='mask')
x = paddle.fluid.data(shape=shape, dtype='float32', name='x')
mask = paddle.fluid.data(shape=shape, dtype='bool', name='mask')
np_x = np.random.random(shape).astype('float32')
np_mask = np.array(np.random.randint(2, size=shape, dtype=bool))
......@@ -97,9 +97,9 @@ class TestMaskedSelectError(unittest.TestCase):
paddle.static.Program()):
shape = [8, 9, 6]
x = paddle.data(shape=shape, dtype='float32', name='x')
mask = paddle.data(shape=shape, dtype='bool', name='mask')
mask_float = paddle.data(
x = paddle.fluid.data(shape=shape, dtype='float32', name='x')
mask = paddle.fluid.data(shape=shape, dtype='bool', name='mask')
mask_float = paddle.fluid.data(
shape=shape, dtype='float32', name='mask_float')
np_x = np.random.random(shape).astype('float32')
np_mask = np.array(np.random.randint(2, size=shape, dtype=bool))
......
......@@ -473,12 +473,6 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
# 3. Bool tensor operation
x = paddle.to_tensor([[True, False], [True, False]])
y = paddle.to_tensor([[False, False], [False, True]])
self.assertTrue(
np.array_equal(x.reduce_all().numpy(), paddle.reduce_all(x).numpy(
)))
self.assertTrue(
np.array_equal(x.reduce_any().numpy(), paddle.reduce_any(x).numpy(
)))
self.assertTrue(
np.array_equal(
x.logical_and(y).numpy(), paddle.logical_and(x, y).numpy()))
......@@ -501,18 +495,9 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
x.where(a, b).numpy(), paddle.where(x, a, b).numpy()))
self.assertTrue(inspect.ismethod(a.dot))
self.assertTrue(inspect.ismethod(a.elementwise_add))
self.assertTrue(inspect.ismethod(a.elementwise_div))
self.assertTrue(inspect.ismethod(a.elementwise_floordiv))
self.assertTrue(inspect.ismethod(a.elementwise_mod))
self.assertTrue(inspect.ismethod(a.elementwise_sub))
self.assertTrue(inspect.ismethod(a.logsumexp))
self.assertTrue(inspect.ismethod(a.multiplex))
self.assertTrue(inspect.ismethod(a.prod))
self.assertTrue(inspect.ismethod(a.reduce_max))
self.assertTrue(inspect.ismethod(a.reduce_min))
self.assertTrue(inspect.ismethod(a.reduce_prod))
self.assertTrue(inspect.ismethod(a.reduce_sum))
self.assertTrue(inspect.ismethod(a.scale))
self.assertTrue(inspect.ismethod(a.stanh))
self.assertTrue(inspect.ismethod(a.add_n))
......@@ -528,7 +513,6 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
self.assertTrue(inspect.ismethod(a.inverse))
self.assertTrue(inspect.ismethod(a.log1p))
self.assertTrue(inspect.ismethod(a.erf))
self.assertTrue(inspect.ismethod(a.addcmul))
self.assertTrue(inspect.ismethod(a.addmm))
self.assertTrue(inspect.ismethod(a.clip))
self.assertTrue(inspect.ismethod(a.trace))
......@@ -548,8 +532,6 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
self.assertTrue(inspect.ismethod(a.argmax))
self.assertTrue(inspect.ismethod(a.argmin))
self.assertTrue(inspect.ismethod(a.argsort))
self.assertTrue(inspect.ismethod(a.has_inf))
self.assertTrue(inspect.ismethod(a.has_nan))
self.assertTrue(inspect.ismethod(a.masked_select))
self.assertTrue(inspect.ismethod(a.topk))
self.assertTrue(inspect.ismethod(a.index_select))
......@@ -557,7 +539,6 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
self.assertTrue(inspect.ismethod(a.sort))
self.assertTrue(inspect.ismethod(a.index_sample))
self.assertTrue(inspect.ismethod(a.mean))
self.assertTrue(inspect.ismethod(a.reduce_mean))
self.assertTrue(inspect.ismethod(a.std))
self.assertTrue(inspect.ismethod(a.numel))
......
......@@ -92,7 +92,7 @@ class TestMaxoutAPI(unittest.TestCase):
def test_static_api(self):
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.x_np.shape, self.x_np.dtype)
x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.maxout(x, self.groups, self.axis)
m = paddle.nn.Maxout(self.groups, self.axis)
out2 = m(x)
......@@ -137,11 +137,11 @@ class TestMaxoutAPI(unittest.TestCase):
# The input type must be Variable.
self.assertRaises(TypeError, F.maxout, 1)
# The input dtype must be float16, float32, float64.
x_int32 = paddle.data(
x_int32 = paddle.fluid.data(
name='x_int32', shape=[2, 4, 6, 8], dtype='int32')
self.assertRaises(TypeError, F.maxout, x_int32)
x_float32 = paddle.data(name='x_float32', shape=[2, 4, 6, 8])
x_float32 = paddle.fluid.data(name='x_float32', shape=[2, 4, 6, 8])
self.assertRaises(ValueError, F.maxout, x_float32, 2, 2)
......
......@@ -185,7 +185,7 @@ class TestMeanAPI(unittest.TestCase):
def test_api_static(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.x_shape)
x = paddle.fluid.data('X', self.x_shape)
out1 = paddle.mean(x)
out2 = paddle.tensor.mean(x)
out3 = paddle.tensor.stat.mean(x)
......@@ -249,7 +249,7 @@ class TestMeanAPI(unittest.TestCase):
self.assertRaises(Exception, paddle.mean, x, 2)
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [10, 12], 'int32')
x = paddle.fluid.data('X', [10, 12], 'int32')
self.assertRaises(TypeError, paddle.mean, x)
......
......@@ -191,8 +191,8 @@ class TestNNFunctionalMseLoss(unittest.TestCase):
place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda(
) else paddle.CPUPlace()
with paddle.static.program_guard(prog, startup_prog):
input = paddle.data(name='input', shape=dim, dtype='float32')
target = paddle.data(name='target', shape=dim, dtype='float32')
input = paddle.fluid.data(name='input', shape=dim, dtype='float32')
target = paddle.fluid.data(name='target', shape=dim, dtype='float32')
mse_loss = paddle.nn.functional.mse_loss(input, target, 'mean')
exe = paddle.static.Executor(place)
......@@ -225,8 +225,8 @@ class TestNNFunctionalMseLoss(unittest.TestCase):
place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda(
) else paddle.CPUPlace()
with paddle.static.program_guard(prog, startup_prog):
input = paddle.data(name='input', shape=dim, dtype='float32')
target = paddle.data(name='target', shape=dim, dtype='float32')
input = paddle.fluid.data(name='input', shape=dim, dtype='float32')
target = paddle.fluid.data(name='target', shape=dim, dtype='float32')
mse_loss = paddle.nn.functional.mse_loss(input, target, 'sum')
exe = paddle.static.Executor(place)
......@@ -259,8 +259,8 @@ class TestNNFunctionalMseLoss(unittest.TestCase):
place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda(
) else paddle.CPUPlace()
with paddle.static.program_guard(prog, startup_prog):
input = paddle.data(name='input', shape=dim, dtype='float32')
target = paddle.data(name='target', shape=dim, dtype='float32')
input = paddle.fluid.data(name='input', shape=dim, dtype='float32')
target = paddle.fluid.data(name='target', shape=dim, dtype='float32')
mse_loss = paddle.nn.functional.mse_loss(input, target, 'none')
exe = paddle.static.Executor(place)
......
......@@ -884,8 +884,8 @@ class TestNLLLossName(unittest.TestCase):
startup_prog = paddle.static.Program()
place = paddle.CPUPlace()
with paddle.static.program_guard(prog, startup_prog):
x = paddle.data(name='x', shape=[10, 10], dtype='float64')
label = paddle.data(name='label', shape=[10], dtype='int64')
x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64')
label = paddle.fluid.data(name='label', shape=[10], dtype='int64')
nll_loss = paddle.nn.loss.NLLLoss(name='nll_loss')
res = nll_loss(x, label)
self.assertTrue(res.name.startswith('nll_loss'))
......@@ -898,8 +898,8 @@ class TestNLLLossInvalidArgs(unittest.TestCase):
startup_prog = paddle.static.Program()
place = paddle.CPUPlace()
with paddle.static.program_guard(prog, startup_prog):
x = paddle.data(name='x', shape=[10, ], dtype='float64')
label = paddle.data(name='label', shape=[10, ], dtype='float64')
x = paddle.fluid.data(name='x', shape=[10, ], dtype='float64')
label = paddle.fluid.data(name='label', shape=[10, ], dtype='float64')
nll_loss = paddle.nn.loss.NLLLoss()
res = nll_loss(x, label)
......@@ -922,8 +922,8 @@ class TestNLLLossInvalidArgs(unittest.TestCase):
startup_prog = paddle.static.Program()
place = paddle.CPUPlace()
with paddle.static.program_guard(prog, startup_prog):
x = paddle.data(name='x', shape=[10, 10], dtype='float64')
label = paddle.data(name='label', shape=[10], dtype='int64')
x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64')
label = paddle.fluid.data(name='label', shape=[10], dtype='int64')
nll_loss = paddle.nn.loss.NLLLoss(reduction='')
res = nll_loss(x, label)
......@@ -946,8 +946,8 @@ class TestNLLLossInvalidArgs(unittest.TestCase):
startup_prog = paddle.static.Program()
place = paddle.CPUPlace()
with paddle.static.program_guard(prog, startup_prog):
x = paddle.data(name='x', shape=[10, 10], dtype='float64')
label = paddle.data(name='label', shape=[10], dtype='int64')
x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64')
label = paddle.fluid.data(name='label', shape=[10], dtype='int64')
res = paddle.nn.functional.nll_loss(x, label, reduction='')
self.assertRaises(ValueError,
......
......@@ -61,8 +61,8 @@ class TestNormalAPI(unittest.TestCase):
if isinstance(self.mean, np.ndarray) \
and isinstance(self.std, np.ndarray):
with paddle.static.program_guard(paddle.static.Program()):
mean = paddle.data('Mean', self.mean.shape, self.mean.dtype)
std = paddle.data('Std', self.std.shape, self.std.dtype)
mean = paddle.fluid.data('Mean', self.mean.shape, self.mean.dtype)
std = paddle.fluid.data('Std', self.std.shape, self.std.dtype)
out = paddle.normal(mean, std, self.shape)
exe = paddle.static.Executor(self.place)
......@@ -76,7 +76,7 @@ class TestNormalAPI(unittest.TestCase):
return ret_all
elif isinstance(self.mean, np.ndarray):
with paddle.static.program_guard(paddle.static.Program()):
mean = paddle.data('Mean', self.mean.shape, self.mean.dtype)
mean = paddle.fluid.data('Mean', self.mean.shape, self.mean.dtype)
out = paddle.normal(mean, self.std, self.shape)
exe = paddle.static.Executor(self.place)
......@@ -86,7 +86,7 @@ class TestNormalAPI(unittest.TestCase):
return ret_all
elif isinstance(self.std, np.ndarray):
with paddle.static.program_guard(paddle.static.Program()):
std = paddle.data('Std', self.std.shape, self.std.dtype)
std = paddle.fluid.data('Std', self.std.shape, self.std.dtype)
out = paddle.normal(self.mean, std, self.shape)
exe = paddle.static.Executor(self.place)
......@@ -180,17 +180,17 @@ class TestNormalErrors(unittest.TestCase):
std = [1, 2, 3]
self.assertRaises(TypeError, paddle.normal, std=std)
mean = paddle.data('Mean', [100], 'int32')
mean = paddle.fluid.data('Mean', [100], 'int32')
self.assertRaises(TypeError, paddle.normal, mean)
std = paddle.data('Std', [100], 'int32')
std = paddle.fluid.data('Std', [100], 'int32')
self.assertRaises(TypeError, paddle.normal, mean=1.0, std=std)
self.assertRaises(TypeError, paddle.normal, shape=1)
self.assertRaises(TypeError, paddle.normal, shape=[1.0])
shape = paddle.data('Shape', [100], 'float32')
shape = paddle.fluid.data('Shape', [100], 'float32')
self.assertRaises(TypeError, paddle.normal, shape=shape)
......
......@@ -56,8 +56,8 @@ class TestNNFunctionalNormalize(unittest.TestCase):
self.assertRaises(BaseException, F.normalize, x)
def run_static(self, use_gpu=False):
x = paddle.data(name='input', shape=[10, 10], dtype='float32')
x2 = paddle.data(name='input2', shape=[2], dtype='float32')
x = paddle.fluid.data(name='input', shape=[10, 10], dtype='float32')
x2 = paddle.fluid.data(name='input2', shape=[2], dtype='float32')
result0 = F.normalize(x)
result1 = F.normalize(x, p=1.5)
result2 = F.normalize(x, axis=0)
......
......@@ -55,8 +55,8 @@ class TestNumelOoAPI(unittest.TestCase):
with fluid.program_guard(main_program, startup_program):
shape1 = [2, 1, 4, 5]
shape2 = [1, 4, 5]
x_1 = paddle.data(shape=shape1, dtype='int32', name='x_1')
x_2 = paddle.data(shape=shape2, dtype='int32', name='x_2')
x_1 = paddle.fluid.data(shape=shape1, dtype='int32', name='x_1')
x_2 = paddle.fluid.data(shape=shape2, dtype='int32', name='x_2')
input_1 = np.random.random(shape1).astype("int32")
input_2 = np.random.random(shape2).astype("int32")
out_1 = paddle.numel(x_1)
......
......@@ -25,7 +25,7 @@ from paddle.fluid import core, Program, program_guard
class TestOnesLikeAPIError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
x = paddle.data('x', [3, 4])
x = paddle.fluid.data('x', [3, 4])
self.assertRaises(TypeError, ones_like, x, 'int8')
......@@ -35,7 +35,7 @@ class TestOnesLikeAPI(unittest.TestCase):
startup_program = Program()
train_program = Program()
with program_guard(train_program, startup_program):
x = paddle.data('X', shape)
x = paddle.fluid.data('X', shape)
# 'bool', 'float32', 'float64', 'int32', 'int64'
out1 = ones_like(x)
......
......@@ -165,7 +165,7 @@ class TestPadAPI(unittest.TestCase):
mode = "constant"
value = 100
input_data = np.random.rand(*input_shape).astype(np.float32)
x = paddle.data(name="x", shape=input_shape)
x = paddle.fluid.data(name="x", shape=input_shape)
result = F.pad(x=x,
pad=pad,
value=value,
......@@ -186,7 +186,7 @@ class TestPadAPI(unittest.TestCase):
pad = [1, 2, 1, 1, 1, 2]
mode = "reflect"
input_data = np.random.rand(*input_shape).astype(np.float32)
x = paddle.data(name="x", shape=input_shape)
x = paddle.fluid.data(name="x", shape=input_shape)
result1 = F.pad(x=x, pad=pad, mode=mode, data_format="NCDHW")
result2 = F.pad(x=x, pad=pad, mode=mode, data_format="NDHWC")
exe = Executor(place)
......@@ -208,7 +208,7 @@ class TestPadAPI(unittest.TestCase):
pad = [1, 2, 1, 1, 3, 4]
mode = "replicate"
input_data = np.random.rand(*input_shape).astype(np.float32)
x = paddle.data(name="x", shape=input_shape)
x = paddle.fluid.data(name="x", shape=input_shape)
result1 = F.pad(x=x, pad=pad, mode=mode, data_format="NCDHW")
result2 = F.pad(x=x, pad=pad, mode=mode, data_format="NDHWC")
exe = Executor(place)
......@@ -230,7 +230,7 @@ class TestPadAPI(unittest.TestCase):
pad = [1, 2, 1, 1, 3, 4]
mode = "circular"
input_data = np.random.rand(*input_shape).astype(np.float32)
x = paddle.data(name="x", shape=input_shape)
x = paddle.fluid.data(name="x", shape=input_shape)
result1 = F.pad(x=x, pad=pad, mode=mode, data_format="NCDHW")
result2 = F.pad(x=x, pad=pad, mode=mode, data_format="NDHWC")
exe = Executor(place)
......@@ -637,7 +637,7 @@ class TestPad3dOpError(unittest.TestCase):
def test_reflect_1():
input_shape = (1, 2, 3, 4, 5)
data = np.random.rand(*input_shape).astype(np.float32)
x = paddle.data(name="x", shape=input_shape)
x = paddle.fluid.data(name="x", shape=input_shape)
y = F.pad(x, pad=[5, 6, 1, 1, 1, 1], value=1, mode='reflect')
place = paddle.CPUPlace()
exe = Executor(place)
......@@ -646,7 +646,7 @@ class TestPad3dOpError(unittest.TestCase):
def test_reflect_2():
input_shape = (1, 2, 3, 4, 5)
data = np.random.rand(*input_shape).astype(np.float32)
x = paddle.data(name="x", shape=input_shape)
x = paddle.fluid.data(name="x", shape=input_shape)
y = F.pad(x, pad=[1, 1, 4, 3, 1, 1], value=1, mode='reflect')
place = paddle.CPUPlace()
exe = Executor(place)
......@@ -655,7 +655,7 @@ class TestPad3dOpError(unittest.TestCase):
def test_reflect_3():
input_shape = (1, 2, 3, 4, 5)
data = np.random.rand(*input_shape).astype(np.float32)
x = paddle.data(name="x", shape=input_shape)
x = paddle.fluid.data(name="x", shape=input_shape)
y = F.pad(x, pad=[1, 1, 1, 1, 2, 3], value=1, mode='reflect')
place = paddle.CPUPlace()
exe = Executor(place)
......
......@@ -32,8 +32,8 @@ def test_static(x_np, y_np, p=2.0, epsilon=1e-6, keepdim=False):
) else fluid.CPUPlace()
with paddle.static.program_guard(prog, startup_prog):
x = paddle.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
y = paddle.data(name='y', shape=y_np.shape, dtype=x_np.dtype)
x = paddle.fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
y = paddle.fluid.data(name='y', shape=y_np.shape, dtype=x_np.dtype)
dist = paddle.nn.layer.distance.PairwiseDistance(
p=p, epsilon=epsilon, keepdim=keepdim)
distance = dist(x, y)
......
......@@ -97,8 +97,8 @@ class TestPixelShuffleAPI(unittest.TestCase):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.enable_static()
x_1 = paddle.data(name="x", shape=[2, 9, 4, 4], dtype="float64")
x_2 = paddle.data(name="x2", shape=[2, 4, 4, 9], dtype="float64")
x_1 = paddle.fluid.data(name="x", shape=[2, 9, 4, 4], dtype="float64")
x_2 = paddle.fluid.data(name="x2", shape=[2, 4, 4, 9], dtype="float64")
out_1 = F.pixel_shuffle(x_1, 3)
out_2 = F.pixel_shuffle(x_2, 3, "NHWC")
......@@ -123,8 +123,8 @@ class TestPixelShuffleAPI(unittest.TestCase):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.enable_static()
x_1 = paddle.data(name="x", shape=[2, 9, 4, 4], dtype="float64")
x_2 = paddle.data(name="x2", shape=[2, 4, 4, 9], dtype="float64")
x_1 = paddle.fluid.data(name="x", shape=[2, 9, 4, 4], dtype="float64")
x_2 = paddle.fluid.data(name="x2", shape=[2, 4, 4, 9], dtype="float64")
# init instance
ps_1 = paddle.nn.PixelShuffle(3)
ps_2 = paddle.nn.PixelShuffle(3, "NHWC")
......
......@@ -49,8 +49,8 @@ class TestFunctionalPReluAPI(unittest.TestCase):
def static_check(self, weight_np):
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.x_np.shape, 'float32')
weight = paddle.data('Alpha', weight_np.shape, 'float32')
x = paddle.fluid.data('X', self.x_np.shape, 'float32')
weight = paddle.fluid.data('Alpha', weight_np.shape, 'float32')
out = F.prelu(x, weight)
exe = paddle.static.Executor(self.place)
res = exe.run(feed={'X': self.x_np,
......@@ -78,15 +78,15 @@ class TestFunctionalPReluAPI(unittest.TestCase):
def test_error(self):
with paddle.static.program_guard(paddle.static.Program()):
weight_fp32 = paddle.data(
weight_fp32 = paddle.fluid.data(
name='weight_fp32', shape=[1], dtype='float32')
# The input type must be Variable.
self.assertRaises(TypeError, F.prelu, x=1, weight=weight_fp32)
# The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[2, 3], dtype='int32')
x_int32 = paddle.fluid.data(name='x_int32', shape=[2, 3], dtype='int32')
self.assertRaises(TypeError, F.prelu, x=x_int32, weight=weight_fp32)
# support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[2, 3], dtype='float16')
x_fp16 = paddle.fluid.data(name='x_fp16', shape=[2, 3], dtype='float16')
F.prelu(x=x_fp16, weight=weight_fp32)
......@@ -100,7 +100,7 @@ class TestNNPReluAPI(unittest.TestCase):
startup_program = paddle.static.Program()
train_program = paddle.static.Program()
with paddle.static.program_guard(train_program, startup_program):
x = paddle.data(name='X', shape=self.x_np.shape, dtype='float32')
x = paddle.fluid.data(name='X', shape=self.x_np.shape, dtype='float32')
m = paddle.nn.PReLU()
out = m(x)
exe = paddle.static.Executor(self.place)
......
......@@ -55,7 +55,7 @@ class TestProdOp(unittest.TestCase):
self.assertTrue(np.allclose(dy_result.numpy(), expected_result))
def run_static(self, use_gpu=False):
input = paddle.data(name='input', shape=[10, 10, 5], dtype='float32')
input = paddle.fluid.data(name='input', shape=[10, 10, 5], dtype='float32')
result0 = paddle.prod(input)
result1 = paddle.prod(input, axis=1)
result2 = paddle.prod(input, axis=-1)
......@@ -113,8 +113,8 @@ class TestProdOpError(unittest.TestCase):
def test_error(self):
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
x = paddle.data(name='x', shape=[2, 2, 4], dtype='float32')
bool_x = paddle.data(name='bool_x', shape=[2, 2, 4], dtype='bool')
x = paddle.fluid.data(name='x', shape=[2, 2, 4], dtype='float32')
bool_x = paddle.fluid.data(name='bool_x', shape=[2, 2, 4], dtype='bool')
# The argument x shoule be a Tensor
self.assertRaises(TypeError, paddle.prod, [1])
......
......@@ -125,8 +125,8 @@ class TestRandintAPI(unittest.TestCase):
out3 = paddle.randint(
low=-100, high=100, shape=(32, 32, 3), dtype='int64')
# shape is a tensorlist and dtype is 'float32'
dim_1 = paddle.fill_constant([1], "int64", 32)
dim_2 = paddle.fill_constant([1], "int32", 50)
dim_1 = paddle.fluid.layers.fill_constant([1], "int64", 32)
dim_2 = paddle.fluid.layers.fill_constant([1], "int32", 50)
out4 = paddle.randint(
low=-100, high=100, shape=[dim_1, 5, dim_2], dtype='int32')
# shape is a tensor and dtype is 'float64'
......
......@@ -30,8 +30,8 @@ class TestRandnOp(unittest.TestCase):
x1 = paddle.randn(shape, 'float32')
x2 = paddle.randn(shape, 'float64')
dim_1 = paddle.fill_constant([1], "int64", 20)
dim_2 = paddle.fill_constant([1], "int32", 50)
dim_1 = paddle.fluid.layers.fill_constant([1], "int64", 20)
dim_2 = paddle.fluid.layers.fill_constant([1], "int32", 50)
x3 = paddle.randn([dim_1, dim_2, 784])
var_shape = paddle.static.data('X', [2], 'int32')
......@@ -59,8 +59,8 @@ class TestRandnOpForDygraph(unittest.TestCase):
x1 = paddle.randn(shape, 'float32')
x2 = paddle.randn(shape, 'float64')
dim_1 = paddle.fill_constant([1], "int64", 20)
dim_2 = paddle.fill_constant([1], "int32", 50)
dim_1 = paddle.fluid.layers.fill_constant([1], "int64", 20)
dim_2 = paddle.fluid.layers.fill_constant([1], "int32", 50)
x3 = paddle.randn(shape=[dim_1, dim_2, 784])
var_shape = paddle.to_tensor(np.array(shape))
......
......@@ -229,8 +229,8 @@ class TestReshapeUint8Op(TestReshapeInt8Op):
# Test python API
class TestReshapeAPI(unittest.TestCase):
def _set_paddle_api(self):
self.fill_constant = paddle.fill_constant
self.data = paddle.data
self.fill_constant = paddle.fluid.layers.fill_constant
self.data = paddle.fluid.data
self.reshape = paddle.reshape
self.to_tensor = paddle.to_tensor
......@@ -305,7 +305,7 @@ class TestReshapeAPI(unittest.TestCase):
# Test Input Error
class TestReshapeOpError(unittest.TestCase):
def _set_paddle_api(self):
self.data = paddle.data
self.data = paddle.fluid.data
self.reshape = paddle.reshape
def _set_fluid_api(self):
......
......@@ -73,7 +73,7 @@ class TestRetainGraph(unittest.TestCase):
fake_AB = paddle.concat((real_data.detach(), interpolatesv), 1)
disc_interpolates = netD(fake_AB)
outs = paddle.fill_constant(disc_interpolates.shape,
outs = paddle.fluid.layers.fill_constant(disc_interpolates.shape,
disc_interpolates.dtype, 1.0)
gradients = paddle.grad(
outputs=disc_interpolates,
......@@ -85,7 +85,7 @@ class TestRetainGraph(unittest.TestCase):
gradients = paddle.reshape(gradients[0], [real_data.shape[0], -1])
gradient_penalty = paddle.reduce_mean((paddle.norm(
gradient_penalty = paddle.fluid.layers.reduce_mean((paddle.norm(
gradients + 1e-16, 2, 1) - constant)**
2) * lambda_gp # added eps
return gradient_penalty, gradients
......@@ -113,7 +113,7 @@ class TestRetainGraph(unittest.TestCase):
fake_AB = paddle.concat((realA, fakeB), 1)
G_pred_fake = d(fake_AB.detach())
false_target = paddle.fill_constant(G_pred_fake.shape, 'float32', 0.0)
false_target = paddle.fluid.layers.fill_constant(G_pred_fake.shape, 'float32', 0.0)
G_gradient_penalty, _ = self.cal_gradient_penalty(
d, realA, fakeB, lambda_gp=10.0)
......@@ -125,7 +125,7 @@ class TestRetainGraph(unittest.TestCase):
optim_g.clear_gradients()
fake_AB = paddle.concat((realA, fakeB), 1)
G_pred_fake = d(fake_AB)
true_target = paddle.fill_constant(G_pred_fake.shape, 'float32', 1.0)
true_target = paddle.fluid.layers.fill_constant(G_pred_fake.shape, 'float32', 1.0)
loss_g = l1_criterion(fakeB, realB) + gan_criterion(G_pred_fake,
true_target)
......
......@@ -69,7 +69,7 @@ class RowConvTestCase(unittest.TestCase):
x = fluid.data(
"input", [-1, -1, self.num_channels], dtype=self.dtype)
w = fluid.data("weight", self.weight_shape, dtype=self.dtype)
y = F.row_conv(x, w, act=self.act)
y = F.extension.row_conv(x, w, act=self.act)
exe = fluid.Executor(place)
exe.run(start)
y_np, = exe.run(main,
......@@ -82,7 +82,7 @@ class RowConvTestCase(unittest.TestCase):
with dg.guard(place):
x_var = dg.to_variable(self.input)
w_var = dg.to_variable(self.weight)
y_var = F.row_conv(x_var, w_var, act=self.act)
y_var = F.extension.row_conv(x_var, w_var, act=self.act)
y_np = y_var.numpy()
return y_np
......
......@@ -93,7 +93,7 @@ class TestSeluAPI(unittest.TestCase):
def test_static_api(self):
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.x_np.shape, self.x_np.dtype)
x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.selu(x, self.scale, self.alpha)
selu = paddle.nn.SELU(self.scale, self.alpha)
out2 = selu(x)
......@@ -128,15 +128,15 @@ class TestSeluAPI(unittest.TestCase):
# The input type must be Variable.
self.assertRaises(TypeError, F.selu, 1)
# The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32')
x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, F.selu, x_int32)
# The scale must be greater than 1.0
x_fp32 = paddle.data(name='x_fp32', shape=[12, 10], dtype='float32')
x_fp32 = paddle.fluid.data(name='x_fp32', shape=[12, 10], dtype='float32')
self.assertRaises(ValueError, F.selu, x_fp32, -1.0)
# The alpha must be no less than 0
self.assertRaises(ValueError, F.selu, x_fp32, 1.6, -1.0)
# support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16')
x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
F.selu(x_fp16)
......
......@@ -42,13 +42,13 @@ def test_static(place,
prog = paddle.static.Program()
startup_prog = paddle.static.Program()
with paddle.static.program_guard(prog, startup_prog):
logit = paddle.data(name='logit', shape=logit_np.shape, dtype='float64')
label = paddle.data(name='label', shape=label_np.shape, dtype='float64')
logit = paddle.fluid.data(name='logit', shape=logit_np.shape, dtype='float64')
label = paddle.fluid.data(name='label', shape=label_np.shape, dtype='float64')
feed_dict = {"logit": logit_np, "label": label_np}
normalizer = None
if normalizer_np is not None:
normalizer = paddle.data(
normalizer = paddle.fluid.data(
name='normalizer', shape=normalizer_np.shape, dtype='float64')
feed_dict["normalizer"] = normalizer_np
......
......@@ -315,7 +315,7 @@ class TestSoftmaxAPI(unittest.TestCase):
def test_static_check(self):
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.x_np.shape, 'float32')
x = paddle.fluid.data('X', self.x_np.shape, 'float32')
out1 = F.softmax(x)
m = paddle.nn.Softmax()
out2 = m(x)
......@@ -354,10 +354,10 @@ class TestSoftmaxAPI(unittest.TestCase):
# The input type must be Variable.
self.assertRaises(TypeError, F.softmax, 1)
# The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[2, 3], dtype='int32')
x_int32 = paddle.fluid.data(name='x_int32', shape=[2, 3], dtype='int32')
self.assertRaises(TypeError, F.softmax, x_int32)
# support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[2, 3], dtype='float16')
x_fp16 = paddle.fluid.data(name='x_fp16', shape=[2, 3], dtype='float16')
F.softmax(x_fp16)
......
......@@ -44,7 +44,7 @@ class TestStdAPI(unittest.TestCase):
def static(self):
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.shape, self.dtype)
x = paddle.fluid.data('X', self.shape, self.dtype)
out = paddle.std(x, self.axis, self.unbiased, self.keepdim)
exe = paddle.static.Executor(self.place)
res = exe.run(feed={'X': self.x}, fetch_list=[out])
......@@ -111,7 +111,7 @@ class TestStdAPI_alias(unittest.TestCase):
class TestStdError(unittest.TestCase):
def test_error(self):
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [2, 3, 4], 'int32')
x = paddle.fluid.data('X', [2, 3, 4], 'int32')
self.assertRaises(TypeError, paddle.std, x)
......
......@@ -81,7 +81,7 @@ class TestTemporalShift3(TestTemporalShift):
class TestTemporalShiftAPI(unittest.TestCase):
def test_api(self):
input = paddle.randn([6, 4, 2, 2])
out = paddle.nn.functional.temporal_shift(
out = paddle.fluid.layers.temporal_shift(
x=input, seg_num=2, shift_ratio=0.2)
......
......@@ -254,7 +254,7 @@ class TestUniqueAPI(unittest.TestCase):
def test_static_graph(self):
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
x = paddle.data(name='x', shape=[3, 2], dtype='float64')
x = paddle.fluid.data(name='x', shape=[3, 2], dtype='float64')
unique, inverse, counts = paddle.unique(
x, return_inverse=True, return_counts=True, axis=0)
place = paddle.CPUPlace()
......@@ -274,13 +274,13 @@ class TestUniqueError(unittest.TestCase):
def test_x_dtype():
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
x = paddle.data(name='x', shape=[10, 10], dtype='float16')
x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float16')
result = paddle.unique(x)
self.assertRaises(TypeError, test_x_dtype)
def test_attr(self):
x = paddle.data(name='x', shape=[10, 10], dtype='float64')
x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64')
def test_return_index():
result = paddle.unique(x, return_index=0)
......
......@@ -44,7 +44,7 @@ class TestVarAPI(unittest.TestCase):
def static(self):
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.shape, self.dtype)
x = paddle.fluid.data('X', self.shape, self.dtype)
out = paddle.var(x, self.axis, self.unbiased, self.keepdim)
exe = paddle.static.Executor(self.place)
res = exe.run(feed={'X': self.x}, fetch_list=[out])
......@@ -111,7 +111,7 @@ class TestVarAPI_alias(unittest.TestCase):
class TestVarError(unittest.TestCase):
def test_error(self):
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [2, 3, 4], 'int32')
x = paddle.fluid.data('X', [2, 3, 4], 'int32')
self.assertRaises(TypeError, paddle.var, x)
......
......@@ -25,7 +25,7 @@ from paddle.fluid import core, Program, program_guard
class TestZerosLikeAPIError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
x = paddle.data('x', [3, 4])
x = paddle.fluid.data('x', [3, 4])
self.assertRaises(TypeError, zeros_like, x, 'int8')
......@@ -35,7 +35,7 @@ class TestZerosLikeAPI(unittest.TestCase):
startup_program = Program()
train_program = Program()
with program_guard(train_program, startup_program):
x = paddle.data('X', shape)
x = paddle.fluid.data('X', shape)
# 'bool', 'float32', 'float64', 'int32', 'int64'
out1 = zeros_like(x)
......
......@@ -14,7 +14,7 @@
# TODO: import framework api under this directory
__all__ = [
'create_global_var', 'create_parameter', 'ParamAttr', 'Variable',
'create_parameter', 'ParamAttr',
'CPUPlace', 'CUDAPlace', 'CUDAPinnedPlace', 'get_default_dtype',
'set_default_dtype'
]
......@@ -29,10 +29,9 @@ from .random import manual_seed
from .framework import get_default_dtype
from .framework import set_default_dtype
from ..fluid.framework import Variable #DEFINE_ALIAS
from ..fluid.framework import ComplexVariable #DEFINE_ALIAS
from ..fluid.param_attr import ParamAttr #DEFINE_ALIAS
from ..fluid.layers.tensor import create_global_var #DEFINE_ALIAS
# from ..fluid.layers.tensor import create_global_var #DEFINE_ALIAS
from ..fluid.layers.tensor import create_parameter #DEFINE_ALIAS
from ..fluid.core import CPUPlace #DEFINE_ALIAS
from ..fluid.core import CUDAPlace #DEFINE_ALIAS
......
......@@ -37,10 +37,10 @@ from .clip import ClipGradByValue #DEFINE_ALIAS
# from .clip import set_gradient_clip #DEFINE_ALIAS
from .clip import clip #DEFINE_ALIAS
from .clip import clip_by_norm #DEFINE_ALIAS
from .control_flow import cond #DEFINE_ALIAS
# from .control_flow import cond #DEFINE_ALIAS
# from .control_flow import DynamicRNN #DEFINE_ALIAS
# from .control_flow import StaticRNN #DEFINE_ALIAS
from .control_flow import while_loop #DEFINE_ALIAS
# from .control_flow import while_loop #DEFINE_ALIAS
# from .control_flow import rnn #DEFINE_ALIAS
# from .decode import BeamSearchDecoder #DEFINE_ALIAS
# from .decode import Decoder #DEFINE_ALIAS
......@@ -49,7 +49,7 @@ from .control_flow import while_loop #DEFINE_ALIAS
# from .decode import crf_decoding #DEFINE_ALIAS
# from .decode import ctc_greedy_decoder #DEFINE_ALIAS
# from .decode import dynamic_decode #DEFINE_ALIAS
from .decode import gather_tree #DEFINE_ALIAS
# from .decode import gather_tree #DEFINE_ALIAS
# from .input import Input #DEFINE_ALIAS
from .layer.activation import ELU #DEFINE_ALIAS
from .layer.activation import GELU #DEFINE_ALIAS
......@@ -74,9 +74,6 @@ from .layer.activation import Tanhshrink #DEFINE_ALIAS
from .layer.activation import ThresholdedReLU #DEFINE_ALIAS
from .layer.activation import LogSoftmax #DEFINE_ALIAS
from .layer.activation import Maxout #DEFINE_ALIAS
from .layer.common import BilinearTensorProduct #DEFINE_ALIAS
from .layer.common import Pool2D #DEFINE_ALIAS
from .layer.common import Pad2D #DEFINE_ALIAS
from .layer.common import ReflectionPad1d #DEFINE_ALIAS
from .layer.common import ReplicationPad1d #DEFINE_ALIAS
from .layer.common import ConstantPad1d #DEFINE_ALIAS
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define the control flow api
from ..fluid.layers import cond #DEFINE_ALIAS
from ..fluid.layers import while_loop #DEFINE_ALIAS
__all__ = [
'cond',
# 'DynamicRNN',
# 'StaticRNN',
'while_loop',
# 'rnn'
]
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define api to implement decoding algorithm
# from ..fluid.layers import beam_search #DEFINE_ALIAS
# from ..fluid.layers import beam_search_decode #DEFINE_ALIAS
from ..fluid.layers import gather_tree #DEFINE_ALIAS
__all__ = [
# 'BeamSearchDecoder',
# 'Decoder',
# 'beam_search',
# 'beam_search_decode',
# 'crf_decoding',
# 'ctc_greedy_decoder',
# 'dynamic_decode',
'gather_tree'
]
......@@ -30,7 +30,7 @@ __all__ += pooling.__all__
from . import loss
__all__ += loss.__all__
from .activation import elu #DEFINE_ALIAS
from .activation import erf #DEFINE_ALIAS
# from .activation import erf #DEFINE_ALIAS
from .activation import gelu #DEFINE_ALIAS
from .activation import hardshrink #DEFINE_ALIAS
from .activation import hardtanh #DEFINE_ALIAS
......@@ -44,7 +44,7 @@ from .activation import relu #DEFINE_ALIAS
from .activation import relu6 #DEFINE_ALIAS
from .activation import selu #DEFINE_ALIAS
from .activation import sigmoid #DEFINE_ALIAS
from .activation import soft_relu #DEFINE_ALIAS
# from .activation import soft_relu #DEFINE_ALIAS
from .activation import softmax #DEFINE_ALIAS
from .activation import softplus #DEFINE_ALIAS
from .activation import softshrink #DEFINE_ALIAS
......@@ -61,10 +61,10 @@ from .common import alpha_dropout #DEFINE_ALIAS
# from .common import embedding #DEFINE_ALIAS
# from .common import fc #DEFINE_ALIAS
from .common import label_smooth
from .common import one_hot #DEFINE_ALIAS
# from .common import one_hot #DEFINE_ALIAS
from .common import pad #DEFINE_ALIAS
from .common import pad_constant_like #DEFINE_ALIAS
from .common import pad2d #DEFINE_ALIAS
# from .common import pad_constant_like #DEFINE_ALIAS
# from .common import pad2d #DEFINE_ALIAS
from .common import cosine_similarity #DEFINE_ALIAS
from .common import unfold #DEFINE_ALIAS
# from .common import bilinear_tensor_product #DEFINE_ALIAS
......@@ -79,21 +79,21 @@ from .conv import conv2d #DEFINE_ALIAS
from .conv import conv_transpose2d #DEFINE_ALIAS
from .conv import conv3d #DEFINE_ALIAS
from .conv import conv_transpose3d #DEFINE_ALIAS
from .extension import add_position_encoding #DEFINE_ALIAS
# from .extension import add_position_encoding #DEFINE_ALIAS
# from .extension import autoincreased_step_counter #DEFINE_ALIAS
from .extension import continuous_value_model #DEFINE_ALIAS
from .extension import filter_by_instag #DEFINE_ALIAS
# from .extension import continuous_value_model #DEFINE_ALIAS
# from .extension import filter_by_instag #DEFINE_ALIAS
# from .extension import linear_chain_crf #DEFINE_ALIAS
# from .extension import merge_selected_rows #DEFINE_ALIAS
from .extension import multiclass_nms #DEFINE_ALIAS
from .extension import polygon_box_transform #DEFINE_ALIAS
from .extension import random_crop #DEFINE_ALIAS
from .extension import row_conv #DEFINE_ALIAS
from .extension import rpn_target_assign #DEFINE_ALIAS
from .extension import similarity_focus #DEFINE_ALIAS
from .extension import target_assign #DEFINE_ALIAS
from .extension import temporal_shift #DEFINE_ALIAS
from .extension import warpctc #DEFINE_ALIAS
# from .extension import multiclass_nms #DEFINE_ALIAS
# from .extension import polygon_box_transform #DEFINE_ALIAS
# from .extension import random_crop #DEFINE_ALIAS
# from .extension import row_conv #DEFINE_ALIAS
# from .extension import rpn_target_assign #DEFINE_ALIAS
# from .extension import similarity_focus #DEFINE_ALIAS
# from .extension import target_assign #DEFINE_ALIAS
# from .extension import temporal_shift #DEFINE_ALIAS
# from .extension import warpctc #DEFINE_ALIAS
from .extension import diag_embed #DEFINE_ALIAS
# from .lod import sequence_concat #DEFINE_ALIAS
# from .lod import sequence_conv #DEFINE_ALIAS
......@@ -115,7 +115,7 @@ from .extension import diag_embed #DEFINE_ALIAS
# from .lod import array_read #DEFINE_ALIAS
# from .lod import array_write #DEFINE_ALIAS
# from .lod import create_array #DEFINE_ALIAS
from .lod import hash #DEFINE_ALIAS
# from .lod import hash #DEFINE_ALIAS
# from .lod import im2sequence #DEFINE_ALIAS
# from .lod import lod_append #DEFINE_ALIAS
# from .lod import lod_reset #DEFINE_ALIAS
......@@ -126,11 +126,10 @@ from .lod import hash #DEFINE_ALIAS
# from .lod import dynamic_lstmp #DEFINE_ALIAS
from .loss import binary_cross_entropy #DEFINE_ALIAS
from .loss import binary_cross_entropy_with_logits #DEFINE_ALIAS
from .loss import bpr_loss #DEFINE_ALIAS
from .loss import center_loss #DEFINE_ALIAS
# from .loss import bpr_loss #DEFINE_ALIAS
# from .loss import center_loss #DEFINE_ALIAS
from .loss import cross_entropy #DEFINE_ALIAS
from .loss import dice_loss #DEFINE_ALIAS
from .loss import edit_distance #DEFINE_ALIAS
from .loss import hsigmoid_loss #DEFINE_ALIAS
from .loss import iou_similarity #DEFINE_ALIAS
from .loss import kl_div #DEFINE_ALIAS
......@@ -141,15 +140,13 @@ from .loss import mse_loss #DEFINE_ALIAS
from .loss import nll_loss #DEFINE_ALIAS
# from .loss import nce #DEFINE_ALIAS
from .loss import npair_loss #DEFINE_ALIAS
from .loss import rank_loss #DEFINE_ALIAS
from .loss import sampled_softmax_with_cross_entropy #DEFINE_ALIAS
from .loss import sigmoid_focal_loss #DEFINE_ALIAS
from .loss import smooth_l1 #DEFINE_ALIAS
# from .loss import smooth_l1 #DEFINE_ALIAS
from .loss import smooth_l1_loss #DEFINE_ALIAS
from .loss import softmax_with_cross_entropy #DEFINE_ALIAS
from .loss import square_error_cost #DEFINE_ALIAS
from .loss import ssd_loss #DEFINE_ALIAS
from .loss import teacher_student_sigmoid_loss #DEFINE_ALIAS
# from .loss import teacher_student_sigmoid_loss #DEFINE_ALIAS
from .loss import ctc_loss #DEFINE_ALIAS
# from .norm import data_norm #DEFINE_ALIAS
# from .norm import group_norm #DEFINE_ALIAS
......@@ -159,8 +156,8 @@ from .norm import layer_norm #DEFINE_ALIAS
from .norm import local_response_norm #DEFINE_ALIAS
from .norm import normalize #DEFINE_ALIAS
# from .norm import spectral_norm #DEFINE_ALIAS
from .pooling import pool2d #DEFINE_ALIAS
from .pooling import pool3d #DEFINE_ALIAS
# from .pooling import pool2d #DEFINE_ALIAS
# from .pooling import pool3d #DEFINE_ALIAS
from .pooling import avg_pool1d #DEFINE_ALIAS
from .pooling import avg_pool2d #DEFINE_ALIAS
from .pooling import avg_pool3d #DEFINE_ALIAS
......@@ -175,43 +172,47 @@ from .pooling import adaptive_avg_pool1d #DEFINE_ALIAS
from .pooling import adaptive_avg_pool2d #DEFINE_ALIAS
from .pooling import adaptive_avg_pool3d #DEFINE_ALIAS
from .rnn import rnn #DEFINE_ALIAS
from .rnn import birnn #DEFINE_ALIAS
# from .rnn import rnn #DEFINE_ALIAS
# from .rnn import birnn #DEFINE_ALIAS
# from .rnn import gru_unit #DEFINE_ALIAS
# from .rnn import lstm #DEFINE_ALIAS
# from .rnn import lstm_unit #DEFINE_ALIAS
from .vision import affine_channel #DEFINE_ALIAS
# from .vision import affine_channel #DEFINE_ALIAS
from .vision import affine_grid #DEFINE_ALIAS
from .vision import anchor_generator #DEFINE_ALIAS
from .vision import bipartite_match #DEFINE_ALIAS
from .vision import box_clip #DEFINE_ALIAS
from .vision import box_coder #DEFINE_ALIAS
from .vision import box_decoder_and_assign #DEFINE_ALIAS
from .vision import collect_fpn_proposals #DEFINE_ALIAS
# from .vision import anchor_generator #DEFINE_ALIAS
# from .vision import bipartite_match #DEFINE_ALIAS
# from .vision import box_clip #DEFINE_ALIAS
# from .vision import box_coder #DEFINE_ALIAS
# from .vision import box_decoder_and_assign #DEFINE_ALIAS
# from .vision import collect_fpn_proposals #DEFINE_ALIAS
# from .vision import deformable_conv #DEFINE_ALIAS
from .vision import deformable_roi_pooling #DEFINE_ALIAS
from .vision import density_prior_box #DEFINE_ALIAS
from .vision import detection_output #DEFINE_ALIAS
from .vision import distribute_fpn_proposals #DEFINE_ALIAS
from .vision import fsp_matrix #DEFINE_ALIAS
from .vision import generate_mask_labels #DEFINE_ALIAS
from .vision import generate_proposal_labels #DEFINE_ALIAS
from .vision import generate_proposals #DEFINE_ALIAS
# from .vision import deformable_roi_pooling #DEFINE_ALIAS
# from .vision import density_prior_box #DEFINE_ALIAS
# from .vision import detection_output #DEFINE_ALIAS
# from .vision import distribute_fpn_proposals #DEFINE_ALIAS
# from .vision import fsp_matrix #DEFINE_ALIAS
# from .vision import generate_mask_labels #DEFINE_ALIAS
# from .vision import generate_proposal_labels #DEFINE_ALIAS
# from .vision import generate_proposals #DEFINE_ALIAS
from .vision import grid_sample #DEFINE_ALIAS
from .vision import image_resize_short #DEFINE_ALIAS
# from .vision import image_resize #DEFINE_ALIAS
# from .vision import image_resize_short #DEFINE_ALIAS
# from .vision import multi_box_head #DEFINE_ALIAS
from .vision import pixel_shuffle #DEFINE_ALIAS
from .vision import prior_box #DEFINE_ALIAS
from .vision import prroi_pool #DEFINE_ALIAS
from .vision import psroi_pool #DEFINE_ALIAS
from .vision import retinanet_detection_output #DEFINE_ALIAS
from .vision import retinanet_target_assign #DEFINE_ALIAS
from .vision import roi_align #DEFINE_ALIAS
from .vision import roi_perspective_transform #DEFINE_ALIAS
from .vision import roi_pool #DEFINE_ALIAS
from .vision import shuffle_channel #DEFINE_ALIAS
from .vision import space_to_depth #DEFINE_ALIAS
from .vision import yolo_box #DEFINE_ALIAS
from .vision import yolov3_loss #DEFINE_ALIAS
# from .vision import prior_box #DEFINE_ALIAS
# from .vision import prroi_pool #DEFINE_ALIAS
# from .vision import psroi_pool #DEFINE_ALIAS
# from .vision import resize_bilinear #DEFINE_ALIAS
# from .vision import resize_nearest #DEFINE_ALIAS
# from .vision import resize_trilinear #DEFINE_ALIAS
# from .vision import retinanet_detection_output #DEFINE_ALIAS
# from .vision import retinanet_target_assign #DEFINE_ALIAS
# from .vision import roi_align #DEFINE_ALIAS
# from .vision import roi_perspective_transform #DEFINE_ALIAS
# from .vision import roi_pool #DEFINE_ALIAS
# from .vision import shuffle_channel #DEFINE_ALIAS
# from .vision import space_to_depth #DEFINE_ALIAS
# from .vision import yolo_box #DEFINE_ALIAS
# from .vision import yolov3_loss #DEFINE_ALIAS
from .input import one_hot #DEFINE_ALIAS
from .input import embedding #DEFINE_ALIAS
......@@ -13,14 +13,18 @@
# limitations under the License.
# TODO: define activation functions of neural network
from ...fluid.layers import erf #DEFINE_ALIAS
from ...fluid.layers import soft_relu #DEFINE_ALIAS
from ...fluid.layers import brelu #DEFINE_ALIAS
# from ...fluid.layers import erf #DEFINE_ALIAS
from ...fluid.layers import hard_sigmoid #DEFINE_ALIAS
from ...fluid.layers import hard_swish #DEFINE_ALIAS
from ...fluid.layers import maxout #DEFINE_ALIAS
# from ...fluid.layers import soft_relu #DEFINE_ALIAS
from ...fluid.layers import swish #DEFINE_ALIAS
from ...fluid.layers import sigmoid #DEFINE_ALIAS
from ...tensor.math import tanh #DEFINE_ALIAS
__all__ = [
'elu',
'erf',
'gelu',
'hardshrink',
'hardtanh',
......@@ -33,7 +37,6 @@ __all__ = [
'relu',
'relu6',
'selu',
'soft_relu',
'softmax',
'softplus',
'softshrink',
......
......@@ -20,13 +20,12 @@ from paddle.fluid.layers.tensor import Variable, fill_constant, zeros, concat
from ...fluid.layers import core
from ...fluid import dygraph_utils
# TODO: define the common functions to build a neural network
from ...fluid import one_hot #DEFINE_ALIAS
from ...fluid.layers import pad2d #DEFINE_ALIAS
# from ...fluid import one_hot #DEFINE_ALIAS
# from ...fluid.layers import pad2d #DEFINE_ALIAS
from ...fluid.layers import unfold #DEFINE_ALIAS
from ...fluid.layers import assign #DEFINE_ALIAS
from ...fluid.layers import squeeze #DEFINE_ALIAS
from ...fluid.layers import unsqueeze #DEFINE_ALIAS
from ...fluid.layers import elementwise_mul #DEFINE_ALIAS
from ...tensor import clip
from ...tensor import sum
from ...tensor import sqrt
......@@ -36,7 +35,7 @@ from ...fluid.data_feeder import check_variable_and_dtype, check_dtype
from ...fluid.framework import Variable, in_dygraph_mode, _varbase_creator
#from ...fluid.layers import fc #DEFINE_ALIAS
from ...fluid.layers import pad_constant_like #DEFINE_ALIAS
# from ...fluid.layers import pad_constant_like #DEFINE_ALIAS
from ...fluid.framework import in_dygraph_mode
from ...fluid import core, dygraph_utils
from ...fluid import core, layers
......@@ -51,10 +50,7 @@ __all__ = [
# 'fc',
'label_smooth',
'linear',
'one_hot',
'pad',
'pad_constant_like',
'pad2d',
'unfold',
# 'bilinear_tensor_product',
'assign',
......@@ -1395,9 +1391,9 @@ def cosine_similarity(x1, x2, axis=1, eps=1e-8):
# [0.99806249 0.9817672 0.94987036]
"""
w12 = sum(elementwise_mul(x1, x2), axis=axis)
w1 = sum(elementwise_mul(x1, x1), axis=axis)
w2 = sum(elementwise_mul(x2, x2), axis=axis)
w12 = sum(paddle.multiply(x1, x2), axis=axis)
w1 = sum(paddle.multiply(x1, x1), axis=axis)
w2 = sum(paddle.multiply(x2, x2), axis=axis)
n12 = sqrt(clip(w1 * w2, min=eps * eps))
cos_sim = w12 / n12
return cos_sim
......
......@@ -13,36 +13,10 @@
# limitations under the License.
# TODO: define the extention functions
from ...fluid.layers import add_position_encoding #DEFINE_ALIAS
from ...fluid.layers import multiclass_nms #DEFINE_ALIAS
from ...fluid.layers import target_assign #DEFINE_ALIAS
from ...fluid.layers import temporal_shift #DEFINE_ALIAS
from ...fluid.layers import continuous_value_model #DEFINE_ALIAS
from ...fluid.layers import filter_by_instag #DEFINE_ALIAS
from ...fluid.layers import polygon_box_transform #DEFINE_ALIAS
from ...fluid.layers import random_crop #DEFINE_ALIAS
from ...fluid.layers import rpn_target_assign #DEFINE_ALIAS
from ...fluid.layers import similarity_focus #DEFINE_ALIAS
from ...fluid.layers import warpctc #DEFINE_ALIAS
__all__ = [
'add_position_encoding',
# 'autoincreased_step_counter',
'continuous_value_model',
'filter_by_instag',
# 'linear_chain_crf',
# 'merge_selected_rows',
'multiclass_nms',
'polygon_box_transform',
'random_crop',
'row_conv',
'rpn_target_assign',
'similarity_focus',
'target_assign',
'temporal_shift',
'warpctc',
'diag_embed'
'diag_embed',
'row_conv'
]
import numpy as np
......@@ -176,8 +150,6 @@ def diag_embed(input, offset=0, dim1=-2, dim2=-1):
@templatedoc()
def row_conv(input, weight, act=None):
"""
:alias_main: paddle.nn.functional.row_conv
:alias: paddle.nn.functional.row_conv,paddle.nn.functional.extension.row_conv
${comment}
......@@ -217,7 +189,7 @@ def row_conv(input, weight, act=None):
with dg.guard(place):
x_var = dg.to_variable(x)
w_var = dg.to_variable(weight)
y_var = F.row_conv(x_var, w_var)
y_var = F.extension.row_conv(x_var, w_var)
y_np = y_var.numpy()
print(y_np.shape)
......
......@@ -74,7 +74,7 @@ def one_hot(x, num_classes, name=None):
import paddle
# Correspond to the first example above, where label.shape is 4 and one_hot_label.shape is [4, 4].
label = paddle.data(name="label", shape=[4, 1], dtype="int64")
label = paddle.fluid.data(name="label", shape=[4, 1], dtype="int64")
# label.shape = [4]
# label.data = [1, 1, 3, 0]
one_hot_label = paddle.nn.functional.one_hot(x=label, num_classes=4)
......@@ -183,7 +183,7 @@ def embedding(x, weight, padding_idx=None, sparse=False, name=None):
weight = prog.global_block().create_parameter(
(128, 100), dtype="float32", default_initializer=Constant(1.0))
label = paddle.data(
label = paddle.fluid.data(
name="label",
shape=[4],
append_batch_size=False,
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define functions which accept only LoDTensor as input
from ...fluid.layers import hash #DEFINE_ALIAS
__all__ = [
# 'sequence_concat',
# 'sequence_conv',
# 'sequence_enumerate',
# 'sequence_expand_as',
# 'sequence_expand',
# 'sequence_first_step',
# 'sequence_last_step',
# 'sequence_mask',
# 'sequence_pad',
# 'sequence_pool',
# 'sequence_reshape',
# 'sequence_reverse',
# 'sequence_scatter',
# 'sequence_slice',
# 'sequence_softmax',
# 'sequence_unpad',
# 'array_length',
# 'array_read',
# 'array_write',
# 'create_array',
'hash',
# 'im2sequence',
# 'lod_append',
# 'lod_reset',
# 'reorder_lod_tensor_by_rank',
# 'tensor_array_to_tensor',
# 'dynamic_gru',
# 'dynamic_lstm',
# 'dynamic_lstmp'
]
......@@ -23,19 +23,14 @@ import paddle
import paddle.fluid as fluid
from ...fluid.framework import core, in_dygraph_mode
from ...fluid.layers.nn import _elementwise_op_in_dygraph
from ...fluid.layers import bpr_loss #DEFINE_ALIAS
from ...fluid.layers import center_loss #DEFINE_ALIAS
from ...fluid.layers import dice_loss #DEFINE_ALIAS
from ...fluid.layers import iou_similarity #DEFINE_ALIAS
from ...fluid.layers import log_loss #DEFINE_ALIAS
from ...fluid.layers import npair_loss #DEFINE_ALIAS
from ...fluid.layers import rank_loss #DEFINE_ALIAS
from ...fluid.layers import reshape
from ...fluid.layers import smooth_l1 #DEFINE_ALIAS
from ...fluid.layers import softmax_with_cross_entropy #DEFINE_ALIAS
from ...fluid.layers import square_error_cost #DEFINE_ALIAS
from ...fluid.layers import ssd_loss #DEFINE_ALIAS
from ...fluid.layers import teacher_student_sigmoid_loss #DEFINE_ALIAS
from ...fluid.layers import edit_distance #DEFINE_ALIAS
from ...fluid.layers import sampled_softmax_with_cross_entropy #DEFINE_ALIAS
......@@ -48,11 +43,8 @@ from ...fluid.framework import Variable
__all__ = [
'binary_cross_entropy',
'binary_cross_entropy_with_logits',
'bpr_loss',
'center_loss',
'cross_entropy',
'dice_loss',
'edit_distance',
'hsigmoid_loss',
'iou_similarity',
'kl_div',
......@@ -63,15 +55,11 @@ __all__ = [
# 'nce',
'nll_loss',
'npair_loss',
'rank_loss',
'sampled_softmax_with_cross_entropy',
'sigmoid_focal_loss',
'smooth_l1',
'smooth_l1_loss',
'softmax_with_cross_entropy',
'square_error_cost',
'ssd_loss',
'teacher_student_sigmoid_loss',
'ctc_loss',
]
......@@ -179,7 +167,7 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean',
outputs={'Out': [out]})
if weight is not None:
if isinstance(weight, paddle.framework.Variable):
if isinstance(weight, paddle.static.Variable):
weight_name = name if reduction is 'none' else None
out = paddle.multiply(out, weight, axis=-1, name=weight_name)
else:
......@@ -317,13 +305,13 @@ def binary_cross_entropy_with_logits(logit,
out = paddle.fluid.layers.sigmoid_cross_entropy_with_logits(
logit, label, name=sigmoid_name)
one = paddle.fill_constant(shape=[1], value=1.0, dtype=logit.dtype)
one = paddle.fluid.layers.fill_constant(shape=[1], value=1.0, dtype=logit.dtype)
if pos_weight is not None:
fluid.data_feeder.check_variable_and_dtype(
pos_weight, 'pos_weight', ['float32', 'float64'],
'binary_cross_entropy_with_logits')
log_weight = paddle.add(
paddle.multiply(label, paddle.elementwise_sub(pos_weight, one)),
paddle.multiply(label, paddle.fluid.layers.elementwise_sub(pos_weight, one)),
one)
pos_weight_name = name if reduction == 'none' and weight is None else None
out = paddle.multiply(out, log_weight, name=pos_weight_name)
......@@ -625,12 +613,12 @@ def margin_ranking_loss(input,
fluid.data_feeder.check_variable_and_dtype(
label, 'label', ['float32', 'float64'], 'margin_rank_loss')
out = paddle.elementwise_sub(other, input)
out = paddle.fluid.layers.elementwise_sub(other, input)
out = paddle.multiply(out, label)
if margin != 0.0:
margin_var = out.block.create_var(dtype=out.dtype)
paddle.fill_constant([1], out.dtype, margin, out=margin_var)
paddle.fluid.layers.fill_constant([1], out.dtype, margin, out=margin_var)
out = paddle.add(out, margin_var)
result_out = helper.create_variable_for_type_inference(input.dtype)
......@@ -735,13 +723,13 @@ def l1_loss(input, label, reduction='mean', name=None):
label, 'label', ['float32', 'float64', 'int32', 'int64'], 'l1_loss')
if reduction == 'sum':
unreduced = paddle.elementwise_sub(input, label, act='abs')
unreduced = paddle.fluid.layers.elementwise_sub(input, label, act='abs')
return paddle.sum(unreduced, name=name)
elif reduction == 'mean':
unreduced = paddle.elementwise_sub(input, label, act='abs')
unreduced = paddle.fluid.layers.elementwise_sub(input, label, act='abs')
return paddle.mean(unreduced, name=name)
else:
return paddle.elementwise_sub(input, label, act='abs', name=name)
return paddle.fluid.layers.elementwise_sub(input, label, act='abs', name=name)
def nll_loss(input,
......@@ -1008,8 +996,8 @@ def mse_loss(input, label, reduction='mean', name=None):
# static graph mode
paddle.enable_static()
mse_loss = paddle.nn.loss.MSELoss()
input = paddle.data(name="input", shape=[1])
label = paddle.data(name="label", shape=[1])
input = paddle.fluid.data(name="input", shape=[1])
label = paddle.fluid.data(name="label", shape=[1])
place = paddle.CPUPlace()
output = mse_loss(input,label)
......@@ -1354,7 +1342,7 @@ def sigmoid_focal_loss(logit,
label = paddle.to_tensor([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype='float32')
one = paddle.to_tensor([1.], dtype='float32')
fg_label = paddle.greater_equal(label, one)
fg_num = paddle.reduce_sum(paddle.cast(fg_label, dtype='float32'))
fg_num = paddle.fluid.layers.reduce_sum(paddle.cast(fg_label, dtype='float32'))
output = paddle.nn.functional.sigmoid_focal_loss(logit, label, normalizer=fg_num)
print(output.numpy()) # [0.65782464]
......
......@@ -109,8 +109,8 @@ def normalize(x, p=2, axis=1, epsilon=1e-12, name=None):
helper.append_op(
type='p_norm', inputs={'X': x}, outputs={'Out': out}, attrs=attrs)
eps = out.block.create_var(dtype=out.dtype)
paddle.fill_constant([1], out.dtype, epsilon, out=eps)
return paddle.elementwise_div(x, paddle.maximum(out, eps), name=name)
paddle.fluid.layers.fill_constant([1], out.dtype, epsilon, out=eps)
return paddle.fluid.layers.elementwise_div(x, paddle.maximum(out, eps), name=name)
def batch_norm(x,
......
......@@ -13,16 +13,12 @@
# limitations under the License.
# TODO: define pooling functions
from ...fluid.layers import pool2d #DEFINE_ALIAS
from ...fluid.layers import pool3d #DEFINE_ALIAS
from ...fluid import core
from ...fluid.framework import in_dygraph_mode
from ...fluid.layers import utils, LayerHelper, unsqueeze, squeeze
from ...fluid.data_feeder import check_type, check_variable_and_dtype
__all__ = [
'pool2d',
'pool3d',
'avg_pool1d',
'avg_pool2d',
'avg_pool3d',
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.fluid.layers.rnn import rnn, birnn
__all__ = ['rnn', 'birnn']
......@@ -20,71 +20,44 @@ from ...fluid import dygraph_utils
import numpy as np
# TODO: define specitial functions used in computer vision task
from ...fluid.layers import affine_channel #DEFINE_ALIAS
from ...fluid.layers import anchor_generator #DEFINE_ALIAS
from ...fluid.layers import bipartite_match #DEFINE_ALIAS
from ...fluid.layers import box_clip #DEFINE_ALIAS
from ...fluid.layers import box_coder #DEFINE_ALIAS
from ...fluid.layers import box_decoder_and_assign #DEFINE_ALIAS
from ...fluid.layers import collect_fpn_proposals #DEFINE_ALIAS
from ...fluid.layers import deformable_roi_pooling #DEFINE_ALIAS
from ...fluid.layers import density_prior_box #DEFINE_ALIAS
from ...fluid.layers import detection_output #DEFINE_ALIAS
from ...fluid.layers import distribute_fpn_proposals #DEFINE_ALIAS
from ...fluid.layers import generate_mask_labels #DEFINE_ALIAS
from ...fluid.layers import generate_proposal_labels #DEFINE_ALIAS
from ...fluid.layers import generate_proposals #DEFINE_ALIAS
from ...fluid.layers import prior_box #DEFINE_ALIAS
from ...fluid.layers import prroi_pool #DEFINE_ALIAS
from ...fluid.layers import psroi_pool #DEFINE_ALIAS
from ...fluid.layers import roi_align #DEFINE_ALIAS
from ...fluid.layers import roi_pool #DEFINE_ALIAS
from ...fluid.layers import space_to_depth #DEFINE_ALIAS
from ...fluid.layers import yolo_box #DEFINE_ALIAS
from ...fluid.layers import yolov3_loss #DEFINE_ALIAS
from ...fluid.layers import fsp_matrix #DEFINE_ALIAS
from ...fluid.layers import image_resize_short #DEFINE_ALIAS
# from ...fluid.layers import affine_channel #DEFINE_ALIAS
# from ...fluid.layers import anchor_generator #DEFINE_ALIAS
# from ...fluid.layers import bipartite_match #DEFINE_ALIAS
# from ...fluid.layers import box_clip #DEFINE_ALIAS
# from ...fluid.layers import box_coder #DEFINE_ALIAS
# from ...fluid.layers import box_decoder_and_assign #DEFINE_ALIAS
# from ...fluid.layers import collect_fpn_proposals #DEFINE_ALIAS
# from ...fluid.layers import deformable_roi_pooling #DEFINE_ALIAS
# from ...fluid.layers import density_prior_box #DEFINE_ALIAS
# from ...fluid.layers import detection_output #DEFINE_ALIAS
# from ...fluid.layers import distribute_fpn_proposals #DEFINE_ALIAS
# from ...fluid.layers import generate_mask_labels #DEFINE_ALIAS
# from ...fluid.layers import generate_proposal_labels #DEFINE_ALIAS
# from ...fluid.layers import generate_proposals #DEFINE_ALIAS
# from ...fluid.layers import image_resize #DEFINE_ALIAS
# from ...fluid.layers import prior_box #DEFINE_ALIAS
# from ...fluid.layers import prroi_pool #DEFINE_ALIAS
# from ...fluid.layers import psroi_pool #DEFINE_ALIAS
# from ...fluid.layers import resize_bilinear #DEFINE_ALIAS
# from ...fluid.layers import resize_nearest #DEFINE_ALIAS
# from ...fluid.layers import resize_trilinear #DEFINE_ALIAS
# from ...fluid.layers import roi_align #DEFINE_ALIAS
# from ...fluid.layers import roi_pool #DEFINE_ALIAS
# from ...fluid.layers import space_to_depth #DEFINE_ALIAS
# from ...fluid.layers import yolo_box #DEFINE_ALIAS
# from ...fluid.layers import yolov3_loss #DEFINE_ALIAS
# from ...fluid.layers import fsp_matrix #DEFINE_ALIAS
# from ...fluid.layers import image_resize_short #DEFINE_ALIAS
# from ...fluid.layers import pixel_shuffle #DEFINE_ALIAS
from ...fluid.layers import retinanet_detection_output #DEFINE_ALIAS
from ...fluid.layers import retinanet_target_assign #DEFINE_ALIAS
from ...fluid.layers import roi_perspective_transform #DEFINE_ALIAS
from ...fluid.layers import shuffle_channel #DEFINE_ALIAS
# from ...fluid.layers import retinanet_detection_output #DEFINE_ALIAS
# from ...fluid.layers import retinanet_target_assign #DEFINE_ALIAS
# from ...fluid.layers import roi_perspective_transform #DEFINE_ALIAS
# from ...fluid.layers import shuffle_channel #DEFINE_ALIAS
__all__ = [
'affine_channel',
'affine_grid',
'anchor_generator',
'bipartite_match',
'box_clip',
'box_coder',
'box_decoder_and_assign',
'collect_fpn_proposals',
# 'deformable_conv',
'deformable_roi_pooling',
'density_prior_box',
'detection_output',
'distribute_fpn_proposals',
'fsp_matrix',
'generate_mask_labels',
'generate_proposal_labels',
'generate_proposals',
'grid_sample',
'image_resize_short',
# 'multi_box_head',
'pixel_shuffle',
'prior_box',
'prroi_pool',
'psroi_pool',
'retinanet_detection_output',
'retinanet_target_assign',
'roi_align',
'roi_perspective_transform',
'roi_pool',
'shuffle_channel',
'space_to_depth',
'yolo_box',
'yolov3_loss'
'pixel_shuffle'
]
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册