未验证 提交 05335a28 编写于 作者: L Leo Chen 提交者: GitHub

[release-2.0] Move some APIs from paddle to fluid (#24188)

* update paddle/tensor, test=develop

* update linalg,py, test=develop

* update nn/functional, test=develop

* delete paddle/tensor/*, test=develop

* merge upstream, test=develop

* update __init__, test=develop

* pass ci, test=develop
上级 9220005c
......@@ -34,172 +34,11 @@ import paddle.compat
import paddle.distributed
batch = batch.batch
import paddle.sysconfig
import paddle.tensor
import paddle.nn
import paddle.framework
import paddle.imperative
import paddle.complex
# TODO: define alias in tensor and framework directory
# from .tensor.creation import create_.tensor #DEFINE_ALIAS
# from .tensor.creation import create_lod_.tensor #DEFINE_ALIAS
# from .tensor.creation import create_random_int_lod.tensor #DEFINE_ALIAS
# from .tensor.creation import crop_.tensor #DEFINE_ALIAS
# from .tensor.creation import diag #DEFINE_ALIAS
from .tensor.creation import eye #DEFINE_ALIAS
from .tensor.creation import fill_constant #DEFINE_ALIAS
# from .tensor.creation import get_.tensor_from_selected_rows #DEFINE_ALIAS
from .tensor.creation import linspace #DEFINE_ALIAS
from .tensor.creation import ones #DEFINE_ALIAS
from .tensor.creation import ones_like #DEFINE_ALIAS
# from .tensor.creation import range #DEFINE_ALIAS
from .tensor.creation import zeros #DEFINE_ALIAS
from .tensor.creation import zeros_like #DEFINE_ALIAS
from .tensor.creation import arange #DEFINE_ALIAS
# from .tensor.creation import eye #DEFINE_ALIAS
from .tensor.creation import full #DEFINE_ALIAS
# from .tensor.creation import linspace #DEFINE_ALIAS
# from .tensor.creation import full_like #DEFINE_ALIAS
# from .tensor.creation import triu #DEFINE_ALIAS
# from .tensor.creation import tril #DEFINE_ALIAS
from .tensor.creation import meshgrid #DEFINE_ALIAS
# from .tensor.stat import mean #DEFINE_ALIAS
# from .tensor.stat import reduce_mean #DEFINE_ALIAS
# from .tensor.stat import std #DEFINE_ALIAS
from .tensor.stat import var #DEFINE_ALIAS
from .tensor.logic import equal #DEFINE_ALIAS
# from .tensor.logic import greater_equal #DEFINE_ALIAS
# from .tensor.logic import greater_than #DEFINE_ALIAS
# from .tensor.logic import is_empty #DEFINE_ALIAS
# from .tensor.logic import isfinite #DEFINE_ALIAS
# from .tensor.logic import less_equal #DEFINE_ALIAS
# from .tensor.logic import less_than #DEFINE_ALIAS
# from .tensor.logic import logical_and #DEFINE_ALIAS
# from .tensor.logic import logical_not #DEFINE_ALIAS
# from .tensor.logic import logical_or #DEFINE_ALIAS
# from .tensor.logic import logical_xor #DEFINE_ALIAS
# from .tensor.logic import not_equal #DEFINE_ALIAS
# from .tensor.logic import reduce_all #DEFINE_ALIAS
# from .tensor.logic import reduce_any #DEFINE_ALIAS
from .tensor.logic import allclose #DEFINE_ALIAS
from .tensor.logic import elementwise_equal #DEFINE_ALIAS
# from .tensor.logic import isnan #DEFINE_ALIAS
# from .tensor..tensor import Tensor #DEFINE_ALIAS
# from .tensor..tensor import LoDTensor #DEFINE_ALIAS
# from .tensor..tensor import LoDTensorArray #DEFINE_ALIAS
# from .tensor.random import gaussin #DEFINE_ALIAS
# from .tensor.random import uniform #DEFINE_ALIAS
# from .tensor.random import shuffle #DEFINE_ALIAS
from .tensor.random import randn #DEFINE_ALIAS
from .tensor.random import randperm
# from .tensor.random import rand #DEFINE_ALIAS
from .tensor.random import randint #DEFINE_ALIAS
# from .tensor.math import abs #DEFINE_ALIAS
# from .tensor.math import acos #DEFINE_ALIAS
# from .tensor.math import asin #DEFINE_ALIAS
from .tensor.math import atan #DEFINE_ALIAS
# from .tensor.math import ceil #DEFINE_ALIAS
# from .tensor.math import cos #DEFINE_ALIAS
# from .tensor.math import cumsum #DEFINE_ALIAS
# from .tensor.math import elementwise_add #DEFINE_ALIAS
# from .tensor.math import elementwise_div #DEFINE_ALIAS
# from .tensor.math import elementwise_floordiv #DEFINE_ALIAS
# from .tensor.math import elementwise_max #DEFINE_ALIAS
# from .tensor.math import elementwise_min #DEFINE_ALIAS
# from .tensor.math import elementwise_mod #DEFINE_ALIAS
# from .tensor.math import elementwise_mul #DEFINE_ALIAS
# from .tensor.math import elementwise_pow #DEFINE_ALIAS
# from .tensor.math import elementwise_sub #DEFINE_ALIAS
# from .tensor.math import exp #DEFINE_ALIAS
# from .tensor.math import floor #DEFINE_ALIAS
# from .tensor.math import increment #DEFINE_ALIAS
# from .tensor.math import log #DEFINE_ALIAS
from .tensor.math import mul #DEFINE_ALIAS
# from .tensor.math import multiplex #DEFINE_ALIAS
from .tensor.math import pow #DEFINE_ALIAS
# from .tensor.math import reciprocal #DEFINE_ALIAS
# from .tensor.math import reduce_max #DEFINE_ALIAS
# from .tensor.math import reduce_min #DEFINE_ALIAS
# from .tensor.math import reduce_prod #DEFINE_ALIAS
# from .tensor.math import reduce_sum #DEFINE_ALIAS
# from .tensor.math import round #DEFINE_ALIAS
# from .tensor.math import rsqrt #DEFINE_ALIAS
# from .tensor.math import scale #DEFINE_ALIAS
# from .tensor.math import sign #DEFINE_ALIAS
from .tensor.math import sin #DEFINE_ALIAS
from .tensor.math import sqrt #DEFINE_ALIAS
# from .tensor.math import square #DEFINE_ALIAS
# from .tensor.math import stanh #DEFINE_ALIAS
from .tensor.math import sum #DEFINE_ALIAS
# from .tensor.math import sums #DEFINE_ALIAS
from .tensor.math import tanh #DEFINE_ALIAS
from .tensor.math import elementwise_sum #DEFINE_ALIAS
from .tensor.math import max #DEFINE_ALIAS
from .tensor.math import min #DEFINE_ALIAS
from .tensor.math import mm #DEFINE_ALIAS
from .tensor.math import div #DEFINE_ALIAS
from .tensor.math import add #DEFINE_ALIAS
# from .tensor.math import atan #DEFINE_ALIAS
from .tensor.math import logsumexp #DEFINE_ALIAS
# from .tensor.math import inverse #DEFINE_ALIAS
from .tensor.math import log1p #DEFINE_ALIAS
# from .tensor.math import erf #DEFINE_ALIAS
from .tensor.math import addcmul #DEFINE_ALIAS
from .tensor.math import addmm #DEFINE_ALIAS
from .tensor.math import clamp #DEFINE_ALIAS
# from .tensor.attribute import rank #DEFINE_ALIAS
# from .tensor.attribute import shape #DEFINE_ALIAS
# from .tensor.io import save #DEFINE_ALIAS
# from .tensor.io import load #DEFINE_ALIAS
from .tensor.linalg import matmul #DEFINE_ALIAS
from .tensor.linalg import dot #DEFINE_ALIAS
from .tensor.linalg import bmm #DEFINE_ALIAS
# from .tensor.linalg import einsum #DEFINE_ALIAS
from .tensor.linalg import norm #DEFINE_ALIAS
# from .tensor.linalg import transpose #DEFINE_ALIAS
from .tensor.linalg import dist #DEFINE_ALIAS
from .tensor.linalg import t #DEFINE_ALIAS
from .tensor.linalg import cross #DEFINE_ALIAS
# from .tensor.linalg import cholesky #DEFINE_ALIAS
# from .tensor.linalg import .tensordot #DEFINE_ALIAS
# from .tensor.manipulation import cast #DEFINE_ALIAS
# from .tensor.manipulation import concat #DEFINE_ALIAS
# from .tensor.manipulation import expand #DEFINE_ALIAS
# from .tensor.manipulation import expand_as #DEFINE_ALIAS
# from .tensor.manipulation import flatten #DEFINE_ALIAS
from .tensor.manipulation import gather #DEFINE_ALIAS
# from .tensor.manipulation import gather_nd #DEFINE_ALIAS
# from .tensor.manipulation import reshape #DEFINE_ALIAS
# from .tensor.manipulation import reverse #DEFINE_ALIAS
# from .tensor.manipulation import scatter #DEFINE_ALIAS
# from .tensor.manipulation import scatter_nd_add #DEFINE_ALIAS
# from .tensor.manipulation import scatter_nd #DEFINE_ALIAS
# from .tensor.manipulation import shard_index #DEFINE_ALIAS
# from .tensor.manipulation import slice #DEFINE_ALIAS
from .tensor.manipulation import split #DEFINE_ALIAS
from .tensor.manipulation import squeeze #DEFINE_ALIAS
from .tensor.manipulation import stack #DEFINE_ALIAS
# from .tensor.manipulation import strided_slice #DEFINE_ALIAS
# from .tensor.manipulation import transpose #DEFINE_ALIAS
# from .tensor.manipulation import unique #DEFINE_ALIAS
# from .tensor.manipulation import unique_with_counts #DEFINE_ALIAS
from .tensor.manipulation import unsqueeze #DEFINE_ALIAS
# from .tensor.manipulation import unstack #DEFINE_ALIAS
from .tensor.manipulation import flip #DEFINE_ALIAS
# from .tensor.manipulation import unbind #DEFINE_ALIAS
from .tensor.manipulation import roll #DEFINE_ALIAS
from .tensor.search import argmax #DEFINE_ALIAS
# from .tensor.search import argmin #DEFINE_ALIAS
# from .tensor.search import argsort #DEFINE_ALIAS
# from .tensor.search import has_inf #DEFINE_ALIAS
# from .tensor.search import has_nan #DEFINE_ALIAS
# from .tensor.search import masked_select #DEFINE_ALIAS
# from .tensor.search import topk #DEFINE_ALIAS
from .tensor.search import where #DEFINE_ALIAS
from .tensor.search import index_select #DEFINE_ALIAS
from .tensor.search import index_sample #DEFINE_ALIAS
from .tensor.search import nonzero #DEFINE_ALIAS
from .tensor.search import sort #DEFINE_ALIAS
# from .framework.framework import set_default_dtype #DEFINE_ALIAS
# from .framework.framework import get_default_dtype #DEFINE_ALIAS
from .framework.random import manual_seed #DEFINE_ALIAS
......
此差异已折叠。
......@@ -73,7 +73,7 @@ class TestParameter(object):
def test_out(self):
with fluid.program_guard(fluid.Program()):
data = fluid.layers.data(name="X", shape=[1])
out = eval("paddle.%s(data, out=data)" % self.op_type)
out = eval("fluid.layers.%s(data, out=data)" % self.op_type)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result = exe.run(feed={"X": np.array([0.1])},
......@@ -83,7 +83,8 @@ class TestParameter(object):
def test_out_name(self):
with fluid.program_guard(fluid.Program()):
data = fluid.layers.data(name="X", shape=[1])
out = eval("paddle.%s(data, name='Y', out=data)" % self.op_type)
out = eval("fluid.layers.%s(data, name='Y', out=data)" %
self.op_type)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result = exe.run(feed={"X": np.array([0.1])},
......@@ -94,7 +95,7 @@ class TestParameter(object):
with fluid.dygraph.guard():
np_x = np.array([0.1])
x = fluid.dygraph.to_variable(np_x)
z = eval("paddle.%s(x).numpy()" % self.op_type)
z = eval("fluid.layers.%s(x).numpy()" % self.op_type)
z_expected = eval("np.%s(np_x)" % self.op_type)
self.assertEqual(z, z_expected)
......@@ -136,7 +137,7 @@ class TestLogSigmoid(TestActivation):
self.check_grad(['X'], 'Out', max_relative_error=0.008)
class TestTanh(TestActivation, TestParameter):
class TestTanh(TestActivation):
def setUp(self):
self.op_type = "tanh"
self.init_dtype()
......@@ -152,37 +153,12 @@ class TestTanh(TestActivation, TestParameter):
self.check_grad(['X'], 'Out')
def init_dtype(self):
#TODO If dtype is float64, the output (Out) has diff at CPUPlace
# when using and not using inplace. Therefore, set dtype as float32
# for now.
#TODO If dtype is float64, the output (Out) has diff at CPUPlace
# when using and not using inplace. Therefore, set dtype as float32
# for now.
self.dtype = np.float32
class TestAtan(TestActivation, TestParameter):
def setUp(self):
self.op_type = "atan"
self.init_dtype()
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
out = np.arctan(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out')
def test_dygraph(self):
with fluid.dygraph.guard():
np_x = np.array([0.1])
x = fluid.dygraph.to_variable(np_x)
z = paddle.atan(x).numpy()
z_expected = np.arctan(np_x)
self.assertEqual(z, z_expected)
class TestTanhShrink(TestActivation):
def setUp(self):
self.op_type = "tanh_shrink"
......@@ -267,7 +243,7 @@ class TestSoftShrinkOpError(unittest.TestCase):
fluid.layers.softshrink(x_fp16)
class TestSqrt(TestActivation, TestParameter):
class TestSqrt(TestActivation):
def setUp(self):
self.op_type = "sqrt"
self.init_dtype()
......@@ -391,7 +367,7 @@ class TestAcos(TestActivation):
self.check_grad(['X'], 'Out')
class TestSin(TestActivation, TestParameter):
class TestSin(TestActivation):
def setUp(self):
self.op_type = "sin"
self.init_dtype()
......@@ -805,8 +781,8 @@ class TestLog1p(TestActivation):
append_batch_size=False,
dtype="float64")
out1 = paddle.log1p(data_x)
out2 = paddle.log1p(data_x, out=res_log1p)
out1 = fluid.layers.log1p(data_x)
out2 = fluid.layers.log1p(data_x, out=res_log1p)
exe = fluid.Executor(place=fluid.CPUPlace())
exe.run(fluid.default_startup_program())
res1, res_in = exe.run(fluid.default_main_program(),
......@@ -820,7 +796,7 @@ class TestLog1p(TestActivation):
with fluid.dygraph.guard():
np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
data_x = fluid.dygraph.to_variable(np_x)
z = paddle.log1p(data_x)
z = fluid.layers.log1p(data_x)
np_z = z.numpy()
z_expected = np.array(np.log1p(np_x))
np.testing.assert_allclose(np_z, z_expected)
......@@ -899,22 +875,14 @@ class TestPow_factor_tensor(TestActivation):
factor_2 = fluid.layers.fill_constant([1], "float32", 3.0)
out_1 = fluid.layers.pow(x, factor=factor_1)
out_2 = fluid.layers.pow(x, factor=factor_2)
out_3 = paddle.pow(x, factor_1, out=res)
out_4 = paddle.pow(x, factor_1, name='pow_res')
out_5 = paddle.pow(x, factor_1, out=res, name='pow_res')
out_6 = paddle.pow(x, factor_2)
self.assertEqual(('pow_res' in out_4.name), True)
exe = fluid.Executor(place=fluid.CPUPlace())
res_1, res_2, res_3, res, res_6 = exe.run(
fluid.default_main_program(),
feed={"x": input},
fetch_list=[out_1, out_2, out_3, res, out_6])
res_1, res_2 = exe.run(fluid.default_main_program(),
feed={"x": input},
fetch_list=[out_1, out_2])
assert np.array_equal(res_1, np.power(input, 2))
assert np.array_equal(res_2, np.power(input, 3))
assert np.array_equal(res_3, res)
assert np.array_equal(res_6, np.power(input, 3))
def test_error(self):
in1 = fluid.layers.data(
......@@ -1214,7 +1182,6 @@ create_test_act_fp16_class(TestCos, grad_atol=0.85)
create_test_act_fp16_class(TestAcos, grad_atol=0.85)
create_test_act_fp16_class(TestSin)
create_test_act_fp16_class(TestAsin)
create_test_act_fp16_class(TestAtan)
create_test_act_fp16_class(TestRound, grad_check=False)
create_test_act_fp16_class(TestRelu)
create_test_act_fp16_class(TestGelu)
......
......@@ -37,7 +37,7 @@ class TestAddcmulLayer(unittest.TestCase):
tensor1 = fluid.data(name="tensor1", dtype=self._dtype, shape=[100])
tensor2 = fluid.data(
name="tensor2", dtype=self._dtype, shape=[3, 100])
out = paddle.addcmul(input, tensor1, tensor2, value)
out = fluid.layers.addcmul(input, tensor1, tensor2, value)
exe = fluid.Executor(self._place)
return exe.run(feed={
......@@ -53,7 +53,7 @@ class TestAddcmulLayer(unittest.TestCase):
input = fluid.dygraph.to_variable(self.input)
tensor1 = fluid.dygraph.to_variable(self.tensor1)
tensor2 = fluid.dygraph.to_variable(self.tensor2)
out = paddle.addcmul(input, tensor1, tensor2, value)
out = fluid.layers.addcmul(input, tensor1, tensor2, value)
return out.numpy()
def numpy(self, value=1.0):
......@@ -85,7 +85,7 @@ class TestAddcmul(unittest.TestCase):
tensor1 = fluid.data(name='t1', shape=data_shape, dtype='float32')
tensor2 = fluid.data(name='t2', shape=data_shape, dtype='float32')
out = paddle.addcmul(input, tensor1, tensor2)
out = fluid.layers.addcmul(input, tensor1, tensor2)
self.assertEqual(out.shape, input.shape)
def test_addcmul_with_broadcast0(self):
......@@ -95,7 +95,7 @@ class TestAddcmul(unittest.TestCase):
tensor1 = fluid.data(name='t1', shape=[3, 100], dtype='float32')
tensor2 = fluid.data(name='t2', shape=[100], dtype='float32')
out = paddle.addcmul(input, tensor1, tensor2)
out = fluid.layers.addcmul(input, tensor1, tensor2)
self.assertEqual(out.shape, input.shape)
def test_addcmul_with_broadcast1(self):
......@@ -105,7 +105,7 @@ class TestAddcmul(unittest.TestCase):
tensor1 = fluid.data(name='t1', shape=[100], dtype='float32')
tensor2 = fluid.data(name='t2', shape=[4, 100], dtype='float32')
out = paddle.addcmul(input, tensor1, tensor2)
out = fluid.layers.addcmul(input, tensor1, tensor2)
self.assertEqual(out.shape, input.shape)
def test_addcmul_with_broadcast2(self):
......@@ -115,7 +115,7 @@ class TestAddcmul(unittest.TestCase):
tensor1 = fluid.data(name='t1', shape=[100], dtype='float32')
tensor2 = fluid.data(name='t2', shape=[100], dtype='float32')
out = paddle.addcmul(input, tensor1, tensor2)
out = fluid.layers.addcmul(input, tensor1, tensor2)
self.assertEqual(out.shape, input.shape)
def test_addcmul_has_out(self):
......@@ -126,7 +126,7 @@ class TestAddcmul(unittest.TestCase):
tensor2 = fluid.data(name='t2', shape=[100], dtype='float32')
out = fluid.data(name='out', shape=[4, 100], dtype='float32')
out = paddle.addcmul(input, tensor1, tensor2, out=out)
out = fluid.layers.addcmul(input, tensor1, tensor2, out=out)
self.assertEqual(out.shape, input.shape)
......@@ -140,7 +140,7 @@ class InvalidInputTest(unittest.TestCase):
name='tensor1', shape=[20, 20], dtype='float32')
tensor2 = fluid.data(
name='tensor2', shape=[20, 20], dtype='float32')
out = paddle.addcmul(input, tensor1, tensor2)
out = fluid.layers.addcmul(input, tensor1, tensor2)
self.assertRaises(TypeError, test_invalid_input)
......@@ -152,7 +152,7 @@ class InvalidInputTest(unittest.TestCase):
tensor1 = [20, 20]
tensor2 = fluid.data(
name='tensor2', shape=[20, 20], dtype='float32')
out = paddle.addcmul(input, tensor1, tensor2)
out = fluid.layers.addcmul(input, tensor1, tensor2)
self.assertRaises(TypeError, test_invalid_tensor1)
......@@ -164,7 +164,7 @@ class InvalidInputTest(unittest.TestCase):
tensor1 = fluid.data(
name='tensor1', shape=[20, 20], dtype='float32')
tensor2 = [20, 20]
out = paddle.addcmul(input, tensor1, tensor2)
out = fluid.layers.addcmul(input, tensor1, tensor2)
self.assertRaises(TypeError, test_invalid_tensor2)
......@@ -177,7 +177,7 @@ class InvalidInputTest(unittest.TestCase):
name='tensor1', shape=[20, 20], dtype='float32')
tensor2 = fluid.data(
name='tensor2', shape=[20, 20], dtype='float32')
out = paddle.addcmul(input, tensor1, tensor2, value=1)
out = fluid.layers.addcmul(input, tensor1, tensor2, value=1)
self.assertRaises(TypeError, test_invalid_value_int)
......@@ -189,7 +189,7 @@ class InvalidInputTest(unittest.TestCase):
name='tensor1', shape=[20, 20], dtype='int32')
tensor2 = fluid.data(
name='tensor2', shape=[20, 20], dtype='int32')
out = paddle.addcmul(input, tensor1, tensor2, value=1.0)
out = fluid.layers.addcmul(input, tensor1, tensor2, value=1.0)
self.assertRaises(TypeError, test_invalid_value_float)
......
......@@ -69,12 +69,12 @@ class TestAddMMOpError(unittest.TestCase):
np.array([[-1]]), [[1]], fluid.CPUPlace())
x2 = fluid.create_lod_tensor(
np.array([[-1]]), [[1]], fluid.CPUPlace())
self.assertRaises(TypeError, paddle.addmm, input, x1, x2)
self.assertRaises(TypeError, fluid.layers.addmm, input, x1, x2)
# The input dtype of mul_op must be float32 or float64.
input = fluid.layers.data(name='input', shape=[4], dtype="int32")
x3 = fluid.layers.data(name='x3', shape=[4], dtype="int32")
x4 = fluid.layers.data(name='x4', shape=[4], dtype="int32")
self.assertRaises(TypeError, paddle.addmm, input, x3, x4)
self.assertRaises(TypeError, fluid.layers.addmm, input, x3, x4)
class TestAddMMOp2(TestAddMMOp):
......@@ -143,7 +143,7 @@ class TestAddMMOp4(unittest.TestCase):
input = fluid.dygraph.to_variable(np_input)
x = fluid.dygraph.to_variable(np_x)
y = fluid.dygraph.to_variable(np_y)
out = paddle.tensor.addmm(input, x, y)
out = fluid.layers.addmm(input, x, y)
assert np.allclose(np_input + np.dot(np_x, np_y), out.numpy())
......
......@@ -201,107 +201,5 @@ class BaseTestComplex2_2(OpTest):
}
class APT_ArgMaxTest(unittest.TestCase):
def test_output_result(self):
with fluid.program_guard(fluid.Program()):
data1 = fluid.data(name="X", shape=[3, 4], dtype="float32")
data2 = fluid.data(name="Y", shape=[3], dtype="int64")
out = paddle.argmax(input=data1, out=data2)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result = exe.run(
feed={"X": np.random.rand(3, 4).astype("float32")},
fetch_list=[data2, out])
self.assertEqual((result[0] == result[1]).all(), True)
def test_basic(self):
with fluid.program_guard(fluid.Program()):
data = fluid.data(name="X", shape=[3, 4], dtype="float32")
out = paddle.argmax(input=data)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
np_input = np.random.rand(3, 4).astype("float32")
expected_result = np.argmax(np_input, axis=1)
result, = exe.run(feed={"X": np_input}, fetch_list=[out])
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
data = fluid.data(name="X", shape=[3, 4], dtype="float32")
out = paddle.argmax(input=data, axis=0)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
np_input = np.random.rand(3, 4).astype("float32")
expected_result = np.argmax(np_input, axis=0)
result = exe.run(feed={"X": np_input}, fetch_list=[out])
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
data = fluid.data(name="X", shape=[3, 4], dtype="float32")
out = paddle.argmax(input=data, dtype="int32")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
np_input = np.random.rand(3, 4).astype("float32")
expected_result = np.argmax(np_input, axis=1).astype(np.int32)
result = exe.run(feed={"X": np_input}, fetch_list=[out])
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
data1 = fluid.data(name="X", shape=[3, 4], dtype="float32")
data2 = fluid.data(name="Y", shape=[3], dtype="int64")
out = paddle.argmax(input=data, out=data2)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result = exe.run(
feed={"X": np.random.rand(3, 4).astype("float32")},
fetch_list=[data2, out])
self.assertEqual((result[0] == result[1]).all(), True)
def test_name(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[100], dtype="float32")
y_1 = paddle.argmax(x, name='arg_max_res')
self.assertEqual(('arg_max_res' in y_1.name), True)
def test_errors(self):
def test_dtype1():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data(name="data", shape=[10], dtype="float32")
paddle.argmax(data, dtype="float32")
self.assertRaises(TypeError, test_dtype1)
def test_dtype2():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data(name="data", shape=[10], dtype="float64")
paddle.argmax(data, dtype="float32")
self.assertRaises(TypeError, test_dtype2)
class TestArgMinMaxOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
def test_argmax_x_type():
x1 = [1, 2, 3]
output = fluid.layers.argmax(x=x1)
self.assertRaises(TypeError, test_argmax_x_type)
def test_argmin_x_type():
x2 = [1, 2, 3]
output = fluid.layers.argmin(x=x2)
self.assertRaises(TypeError, test_argmin_x_type)
if __name__ == '__main__':
unittest.main()
......@@ -321,36 +321,6 @@ class TestArgsortOpDescendingAxisNeg2GPU(TestArgsortOpAxisNeg2GPU):
self.descending = True
class TestSortOnCPU(TestArgsortOpCPU):
def init_place(self):
self.place = core.CPUPlace()
def test_out(self):
self.init_place()
with fluid.program_guard(fluid.Program()):
input = fluid.data(name="input", shape=[2, 3, 4], dtype="float32")
res = fluid.data(name="output", shape=[2, 3, 4], dtype="float32")
output = paddle.tensor.sort(input=input, out=res)
exe = fluid.Executor(self.place)
data = np.array(
[[[5, 8, 9, 5], [0, 0, 1, 7], [6, 9, 2, 4]],
[[5, 2, 4, 2], [4, 7, 7, 9], [1, 7, 0, 6]]],
dtype='float32')
result = exe.run(feed={'input': data}, fetch_list=[res, output[0]])
self.assertEqual((result[0] == result[1]).all(), True)
class TestSortOnGPU(TestSortOnCPU):
def init_place(self):
if core.is_compiled_with_cuda():
self.place = core.CUDAPlace(0)
else:
self.place = core.CPUPlace()
class TestArgsortErrorOnCPU(unittest.TestCase):
def init_place(self):
self.place = core.CPUPlace()
......
......@@ -21,7 +21,7 @@ import paddle.fluid.core as core
import paddle.fluid as fluid
import paddle
from paddle.fluid import Program, program_guard
from paddle.nn.functional import interpolate
from paddle.fluid.layers import interpolate
def cubic_1(x, a):
......
......@@ -19,7 +19,6 @@ import numpy as np
from op_test import OpTest
import paddle
import paddle.fluid as fluid
import paddle.tensor as tensor
from paddle.fluid import Program, program_guard
......@@ -46,7 +45,7 @@ class API_TestBmm(unittest.TestCase):
'data1', shape=[-1, 3, 4], dtype='float64')
data2 = fluid.layers.data(
'data2', shape=[-1, 4, 5], dtype='float64')
result_bmm = paddle.bmm(data1, data2)
result_bmm = fluid.layers.bmm(data1, data2)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
input1 = np.random.random([10, 3, 4]).astype('float64')
......@@ -67,7 +66,7 @@ class API_TestDygraphBmm(unittest.TestCase):
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(input1)
y = fluid.dygraph.to_variable(input2)
out = paddle.bmm(x, y)
out = fluid.layers.bmm(x, y)
out_np = out.numpy()
expected_result = np.matmul(input1, input2)
self.assertTrue(np.allclose(expected_result, out_np))
......
......@@ -13,7 +13,6 @@
# limitations under the License.
from __future__ import print_function
import paddle.tensor as tensor
import paddle.fluid as fluid
import numpy as np
import unittest
......@@ -31,12 +30,12 @@ class TestClampAPI(unittest.TestCase):
) else fluid.CPUPlace()
exe = fluid.Executor(place)
out_1 = tensor.clamp(images, min=min, max=max)
out_2 = tensor.clamp(images, min=0.2, max=0.9)
out_3 = tensor.clamp(images, min=0.3)
out_4 = tensor.clamp(images, max=0.7)
out_5 = tensor.clamp(images, min=min)
out_6 = tensor.clamp(images, max=max)
out_1 = fluid.layers.clamp(images, min=min, max=max)
out_2 = fluid.layers.clamp(images, min=0.2, max=0.9)
out_3 = fluid.layers.clamp(images, min=0.3)
out_4 = fluid.layers.clamp(images, max=0.7)
out_5 = fluid.layers.clamp(images, min=min)
out_6 = fluid.layers.clamp(images, max=max)
res1, res2, res3, res4, res5, res6 = exe.run(
fluid.default_main_program(),
......@@ -59,8 +58,8 @@ class TestClampError(unittest.TestCase):
def test_errors(self):
x1 = fluid.layers.data(name='x1', shape=[1], dtype="int16")
x2 = fluid.layers.data(name='x2', shape=[1], dtype="int8")
self.assertRaises(TypeError, tensor.clamp, x=x1, min=0.2, max=0.8)
self.assertRaises(TypeError, tensor.clamp, x=x2, min=0.2, max=0.8)
self.assertRaises(TypeError, fluid.layers.clamp, x=x1, min=0.2, max=0.8)
self.assertRaises(TypeError, fluid.layers.clamp, x=x2, min=0.2, max=0.8)
if __name__ == '__main__':
......
......@@ -143,14 +143,5 @@ def np_broadcast_equal(_x, _y):
for args in broadcast_args:
create_test_broadcast_class('equal_reduce', args, np_broadcast_equal)
class TestEqualReduceAPI(unittest.TestCase):
def test_name(self):
x = fluid.layers.assign(np.array([3, 4], dtype="int32"))
y = fluid.layers.assign(np.array([3, 4], dtype="int32"))
out = paddle.equal(x, y, name='equal_res')
assert 'equal_res' in out.name
if __name__ == '__main__':
unittest.main()
......@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle import fluid, tensor
from paddle import fluid
import paddle.complex as cpx
import paddle.fluid.dygraph as dg
import numpy as np
......
......@@ -79,7 +79,7 @@ class TestCrossAPI(unittest.TestCase):
with program_guard(Program(), Program()):
x = fluid.layers.data(name='x', shape=[-1, 3])
y = fluid.layers.data(name='y', shape=[-1, 3])
z = paddle.cross(x, y, dim=1)
z = fluid.layers.cross(x, y, dim=1)
exe = fluid.Executor(fluid.CPUPlace())
res, = exe.run(feed={'x': self.data_x,
'y': self.data_y},
......@@ -93,7 +93,7 @@ class TestCrossAPI(unittest.TestCase):
with program_guard(Program(), Program()):
x = fluid.layers.data(name='x', shape=[-1, 3])
y = fluid.layers.data(name='y', shape=[-1, 3])
z = paddle.cross(x, y)
z = fluid.layers.cross(x, y)
exe = fluid.Executor(fluid.CPUPlace())
res, = exe.run(feed={'x': self.data_x,
'y': self.data_y},
......@@ -109,7 +109,7 @@ class TestCrossAPI(unittest.TestCase):
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(self.data_x)
y = fluid.dygraph.to_variable(self.data_y)
z = paddle.cross(x, y)
z = fluid.layers.cross(x, y)
np_z = z.numpy()
expect_out = np.array([[-1.0, -1.0, -1.0], [2.0, 2.0, 2.0],
[-1.0, -1.0, -1.0]])
......@@ -119,7 +119,7 @@ class TestCrossAPI(unittest.TestCase):
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(self.data_x)
y = fluid.dygraph.to_variable(self.data_y)
z = paddle.cross(x, y, dim=1)
z = fluid.layers.cross(x, y, dim=1)
np_z = z.numpy()
expect_out = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]])
......
......@@ -17,7 +17,6 @@ from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle.nn.functional as F
import paddle.fluid as fluid
import paddle.fluid.dygraph as dg
import paddle.fluid.core as core
......@@ -52,8 +51,8 @@ class TestDiagEmbedAPICase(unittest.TestCase):
def test_case1(self):
diag_embed = np.random.randn(2, 3, 4).astype('float32')
data1 = fluid.data(name='data1', shape=[2, 3, 4], dtype='float32')
out1 = F.diag_embed(data1)
out2 = F.diag_embed(data1, offset=1, dim1=-2, dim2=3)
out1 = fluid.layers.diag_embed(data1)
out2 = fluid.layers.diag_embed(data1, offset=1, dim1=-2, dim2=3)
place = core.CPUPlace()
exe = fluid.Executor(place)
......
......@@ -150,7 +150,7 @@ class TestDistAPI(unittest.TestCase):
p = 2
x_i = np.random.random((2, 3, 4, 5)).astype("float64")
y_i = np.random.random((3, 1, 5)).astype("float64")
result = paddle.dist(x, y, p)
result = fluid.layers.dist(x, y, p)
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
......
......@@ -73,15 +73,15 @@ class TestDotOpError(unittest.TestCase):
# float16 only can be set on GPU place
x1 = fluid.layers.data(name='x1', shape=[120], dtype="uint8")
y1 = fluid.layers.data(name='y1', shape=[120], dtype="uint8")
self.assertRaises(Exception, paddle.dot, x1, y1)
self.assertRaises(Exception, fluid.layers.dot, x1, y1)
x2 = fluid.layers.data(name='x2', shape=[2, 3], dtype="float32")
y2 = fluid.layers.data(name='y2', shape=[2, 3], dtype="float32")
self.assertRaises(Exception, paddle.dot, x2, y2)
self.assertRaises(Exception, fluid.layers.dot, x2, y2)
x3 = fluid.layers.data(name='x3', shape=[3], dtype="float32")
y3 = fluid.layers.data(name='y3', shape=[2, 3], dtype="float32")
self.assertRaises(Exception, paddle.dot, x2, y3)
self.assertRaises(Exception, fluid.layers.dot, x2, y3)
class TestDygraph(unittest.TestCase):
......@@ -90,7 +90,7 @@ class TestDygraph(unittest.TestCase):
x1 = fluid.dygraph.to_variable(np.array([1, 3]).astype(np.float32))
y1 = fluid.dygraph.to_variable(np.array([2, 5]).astype(np.float32))
self.assertTrue(
np.allclose(paddle.dot(x1, y1).numpy(), np.array([17])))
np.allclose(fluid.layers.dot(x1, y1).numpy(), np.array([17])))
x1 = fluid.dygraph.to_variable(
np.array([[1, 3], [3, 5]]).astype(np.float32))
......@@ -98,7 +98,7 @@ class TestDygraph(unittest.TestCase):
np.array([[2, 5], [6, 8]]).astype(np.float32))
self.assertTrue(
np.array_equal(
paddle.dot(x1, y1).numpy(), np.array([[17], [58]])))
fluid.layers.dot(x1, y1).numpy(), np.array([[17], [58]])))
if __name__ == '__main__':
......
......@@ -381,104 +381,5 @@ class TestElementwiseAddOpError(unittest.TestCase):
self.assertRaises(TypeError, fluid.layers.elementwise_add, x2, y2)
class TestAddOp(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float32")
y = fluid.data(name='y', shape=[3], dtype='float32')
res = fluid.data(name="output", shape=[3], dtype="float32")
y_1 = paddle.add(x, y, out=res)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
data1 = np.array([2, 3, 4], dtype='float32')
data2 = np.array([1, 5, 2], dtype='float32')
np_res, np_y_1 = exe.run(feed={'x': data1,
'y': data2},
fetch_list=[res, y_1])
self.assertEqual((np_res == np_y_1).all(), True)
def test_out_gpu(self):
if not fluid.core.is_compiled_with_cuda():
return
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float32")
y = fluid.data(name='y', shape=[3], dtype='float32')
res = fluid.data(name="output", shape=[3], dtype="float32")
y_1 = paddle.add(x, y, out=res)
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
data1 = np.array([2, 3, 4], dtype='float32')
data2 = np.array([1, 5, 2], dtype='float32')
np_res, np_y_1 = exe.run(feed={'x': data1,
'y': data2},
fetch_list=[res, y_1])
self.assertEqual((np_res == np_y_1).all(), True)
def test_name(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[2, 3], dtype="float32")
y = fluid.data(name='y', shape=[2, 3], dtype='float32')
y_1 = paddle.add(x, y, name='add_res')
self.assertEqual(('add_res' in y_1.name), True)
def test_alpha(self):
with fluid.program_guard(fluid.Program()):
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = paddle.add(x, y, alpha=10)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(), fetch_list=[z.name])
z_expected = np.array([12., 53., 24.])
self.assertEqual((z_value == z_expected).all(), True)
def test_alpha_gpu(self):
if not fluid.core.is_compiled_with_cuda():
return
with fluid.program_guard(fluid.Program()):
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = paddle.add(x, y, alpha=-0.5)
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(), fetch_list=[z.name])
z_expected = np.array([1.5, 0.5, 3.])
self.assertEqual((z_value == z_expected).all(), True)
def test_dygraph(self):
with fluid.dygraph.guard():
np_x = np.array([2, 3, 4]).astype('float64')
np_y = np.array([1, 5, 2]).astype('float64')
x = fluid.dygraph.to_variable(np_x)
y = fluid.dygraph.to_variable(np_y)
z = paddle.add(x, y, alpha=-0.5)
np_z = z.numpy()
z_expected = np.array([1.5, 0.5, 3.])
self.assertEqual((np_z == z_expected).all(), True)
if __name__ == '__main__':
unittest.main()
......@@ -227,64 +227,5 @@ class TestElementwiseDivOpFp16(ElementwiseDivOp):
['X'], 'Out', max_relative_error=1, no_grad_set=set('Y'))
class TestDivOp(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float32")
y = fluid.data(name='y', shape=[3], dtype='float32')
res = fluid.data(name="output", shape=[3], dtype="float32")
y_1 = paddle.div(x, y, out=res)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
data1 = np.array([2, 3, 4], dtype='float32')
data2 = np.array([1, 5, 2], dtype='float32')
np_res, np_y_1 = exe.run(feed={'x': data1,
'y': data2},
fetch_list=[res, y_1])
self.assertEqual((np_res == np_y_1).all(), True)
def test_out_gpu(self):
if not fluid.core.is_compiled_with_cuda():
return
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float32")
y = fluid.data(name='y', shape=[3], dtype='float32')
res = fluid.data(name="output", shape=[3], dtype="float32")
y_1 = paddle.div(x, y, out=res)
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
data1 = np.array([2, 3, 4], dtype='float32')
data2 = np.array([1, 5, 2], dtype='float32')
np_res, np_y_1 = exe.run(feed={'x': data1,
'y': data2},
fetch_list=[res, y_1])
self.assertEqual((np_res == np_y_1).all(), True)
def test_name(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[2, 3], dtype="float32")
y = fluid.data(name='y', shape=[2, 3], dtype='float32')
y_1 = paddle.div(x, y, name='div_res')
self.assertEqual(('div_res' in y_1.name), True)
def test_dygraph(self):
with fluid.dygraph.guard():
np_x = np.array([2, 3, 4]).astype('float64')
np_y = np.array([1, 5, 2]).astype('float64')
x = fluid.dygraph.to_variable(np_x)
y = fluid.dygraph.to_variable(np_y)
z = paddle.div(x, y)
np_z = z.numpy()
z_expected = np.array([2., 0.6, 2.])
self.assertEqual((np_z == z_expected).all(), True)
if __name__ == '__main__':
unittest.main()
......@@ -75,7 +75,7 @@ class TestEyeOp2(OpTest):
class API_TestTensorEye(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
data = paddle.eye(10)
data = fluid.layers.eye(10)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(fetch_list=[data])
......@@ -83,7 +83,7 @@ class API_TestTensorEye(unittest.TestCase):
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
data = paddle.eye(10, num_columns=7, dtype="float64")
data = fluid.layers.eye(10, num_columns=7, dtype="float64")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(fetch_list=[data])
......@@ -91,7 +91,7 @@ class API_TestTensorEye(unittest.TestCase):
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
data = paddle.eye(10, dtype="int64")
data = fluid.layers.eye(10, dtype="int64")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(fetch_list=[data])
......@@ -102,12 +102,12 @@ class API_TestTensorEye(unittest.TestCase):
with fluid.program_guard(fluid.Program()):
def test_num_rows_type_check():
paddle.eye(-1, dtype="int64")
fluid.layers.eye(-1, dtype="int64")
self.assertRaises(TypeError, test_num_rows_type_check)
def test_num_columns_type_check():
paddle.eye(10, num_columns=5.2, dtype="int64")
fluid.layers.eye(10, num_columns=5.2, dtype="int64")
self.assertRaises(TypeError, test_num_columns_type_check)
......
......@@ -153,130 +153,5 @@ class TestFillAnyLikeOpError(unittest.TestCase):
dtype='int16')
class ApiOnesLikeTest(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
data = fluid.data(shape=[10], dtype="float64", name="data")
ones = paddle.ones_like(data, device="cpu")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(feed={"data": np.random.rand(10)},
fetch_list=[ones])
expected_result = np.ones(10, dtype="float64")
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
data = fluid.data(shape=[10], dtype="float64", name="data")
ones = paddle.ones_like(data, device="cpu", dtype="float32")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(feed={"data": np.random.rand(10)},
fetch_list=[ones])
expected_result = np.ones(10, dtype="float32")
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
data = fluid.data(shape=[10], dtype="float64", name="data")
ones = paddle.ones_like(data)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(feed={"data": np.random.rand(10)},
fetch_list=[ones])
expected_result = np.ones(10, dtype="float32")
self.assertEqual((result == expected_result).all(), True)
class ApiZerosLikeTest(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
data = fluid.data(shape=[10], dtype="float64", name="data")
zeros = paddle.zeros_like(data, device="cpu")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(feed={"data": np.random.rand(10)},
fetch_list=[zeros])
expected_result = np.zeros(10, dtype="float64")
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
data = fluid.data(shape=[10], dtype="float64", name="data")
zeros = paddle.zeros_like(data, device="cpu", dtype="float32")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(feed={"data": np.random.rand(10)},
fetch_list=[zeros])
expected_result = np.zeros(10, dtype="float32")
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
data = fluid.data(shape=[10], dtype="float64", name="data")
zeros = paddle.zeros_like(data)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(feed={"data": np.random.rand(10)},
fetch_list=[zeros])
expected_result = np.zeros(10, dtype="float32")
self.assertEqual((result == expected_result).all(), True)
class TestOnesZerosError(unittest.TestCase):
def test_errors(self):
def test_device_error1():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data(name="data", shape=[10], dtype="float32")
paddle.ones_like(data, device="opu")
self.assertRaises(ValueError, test_device_error1)
def test_device_error2():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data(name="data", shape=[10], dtype="float32")
paddle.ones_like(data, dtype="float")
self.assertRaises(ValueError, test_device_error2)
def test_device_error3():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data(name="data", shape=[10], dtype="float32")
paddle.zeros_like(data, device="opu")
self.assertRaises(ValueError, test_device_error3)
def test_device_error4():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data(name="data", shape=[10], dtype="float32")
paddle.zeros_like(data, dtype="float")
self.assertRaises(ValueError, test_device_error4)
def test_ones_like_type_error():
with fluid.program_guard(fluid.Program(), fluid.Program()):
fluid.layers.ones_like([10], dtype="float")
self.assertRaises(TypeError, test_ones_like_type_error)
def test_ones_like_dtype_error():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data(name="data", shape=[10], dtype="float16")
fluid.layers.ones_like(data, dtype="float32")
self.assertRaises(TypeError, test_ones_like_dtype_error)
def test_ones_like_out_type_error():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data(name="data", shape=[10], dtype="float32")
fluid.layers.ones_like(data, dtype="float32", out=[10])
self.assertRaises(TypeError, test_ones_like_out_type_error)
def test_ones_like_out_dtype_error():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data(name="data", shape=[10], dtype="float32")
out = fluid.data(name="out", shape=[10], dtype="float16")
fluid.layers.ones_like(data, dtype="float32", out=out)
self.assertRaises(TypeError, test_ones_like_out_dtype_error)
if __name__ == "__main__":
unittest.main()
......@@ -83,28 +83,6 @@ class TestFillConstantOp4(OpTest):
self.check_output()
class TestFillConstantOp5(unittest.TestCase):
def test_errors(self):
with fluid.program_guard(fluid.Program()):
data = fluid.data(name="X", shape=[1], dtype="float32")
out = paddle.zeros(shape=[1], out=data, dtype="float32")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result = exe.run(feed={"X": np.array(
[0.1], dtype="float32")},
fetch_list=[data, out])
self.assertEqual(result[0], result[1])
with fluid.program_guard(fluid.Program()):
data = fluid.data(name="X", shape=[1], dtype="float32")
out = paddle.ones(shape=[1], out=data, dtype="float32")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result = exe.run(feed={"X": np.array(
[0.1], dtype="float32")},
fetch_list=[data, out])
self.assertEqual(result[0], result[1])
class TestFillConstantOpWithSelectedRows(unittest.TestCase):
def check_with_place(self, place):
scope = core.Scope()
......@@ -389,98 +367,5 @@ class TestFillConstantOpError(unittest.TestCase):
self.assertRaises(TypeError, test_shape_tensor_list_dtype)
class ApiZerosTest(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
zeros = paddle.zeros(shape=[10], dtype="float64")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(fetch_list=[zeros])
expected_result = np.zeros(10, dtype="float64")
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
zeros = paddle.zeros(shape=[10], dtype="int64")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(fetch_list=[zeros])
expected_result = np.zeros(10, dtype="int64")
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
zeros = paddle.zeros(shape=[10], dtype="int64", device="cpu")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(fetch_list=[zeros])
expected_result = np.zeros(10, dtype="int64")
self.assertEqual((result == expected_result).all(), True)
class ApiOnesTest(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
ones = paddle.ones(shape=[10], dtype="float64")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(fetch_list=[ones])
expected_result = np.ones(10, dtype="float64")
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
ones = paddle.ones(shape=[10], dtype="int64")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(fetch_list=[ones])
expected_result = np.ones(10, dtype="int64")
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
ones = paddle.ones(shape=[10], dtype="int64", device="cpu")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(fetch_list=[ones])
expected_result = np.ones(10, dtype="int64")
self.assertEqual((result == expected_result).all(), True)
class ApiOnesZerosError(unittest.TestCase):
def test_errors(self):
def test_error1():
with fluid.program_guard(fluid.Program()):
ones = paddle.ones(shape=10, dtype="int64", device="opu")
self.assertRaises(ValueError, test_error1)
def test_error2():
with fluid.program_guard(fluid.Program()):
ones = paddle.ones(shape=10, dtype="int64", device="opu")
self.assertRaises(ValueError, test_error2)
def test_error3():
with fluid.program_guard(fluid.Program()):
ones = fluid.layers.ones(shape=10, dtype="int64")
self.assertRaises(TypeError, test_error3)
def test_error4():
with fluid.program_guard(fluid.Program()):
ones = fluid.layers.ones(shape=[10], dtype="int8")
self.assertRaises(TypeError, test_error4)
def test_error5():
with fluid.program_guard(fluid.Program()):
ones = fluid.layers.zeros(shape=10, dtype="int64")
self.assertRaises(TypeError, test_error5)
def test_error6():
with fluid.program_guard(fluid.Program()):
ones = fluid.layers.zeros(shape=[10], dtype="int8")
self.assertRaises(TypeError, test_error6)
if __name__ == "__main__":
unittest.main()
......@@ -113,7 +113,7 @@ class API_TestGather(unittest.TestCase):
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = fluid.layers.data('data1', shape=[-1, 2], dtype='float64')
index = fluid.layers.data('index', shape=[-1, 1], dtype='float64')
out = paddle.gather(data1, index)
out = fluid.layers.gather(data1, index)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
input = np.array([[1, 2], [3, 4], [5, 6]])
......
......@@ -96,33 +96,5 @@ class TestCase4(TestIndexSampleOp):
self.index_type = "int64"
class TestIndexSampleShape(unittest.TestCase):
def test_shape(self):
import paddle.fluid as fluid
import paddle
# create x value
x_shape = (2, 5)
x_type = "float64"
x_np = np.random.random(x_shape).astype(x_type)
# create index value
index_shape = (2, 3)
index_type = "int32"
index_np = np.random.randint(
low=0, high=x_shape[1], size=index_shape).astype(index_type)
x = fluid.data(name='x', shape=[-1, 5], dtype='float64')
index = fluid.data(name='index', shape=[-1, 3], dtype='int32')
output = paddle.index_sample(x=x, index=index)
place = fluid.CPUPlace()
exe = fluid.Executor(place=place)
exe.run(fluid.default_startup_program())
feed = {'x': x_np, 'index': index_np}
res = exe.run(feed=feed, fetch_list=[output])
if __name__ == "__main__":
unittest.main()
......@@ -83,7 +83,7 @@ class TestIndexSelectAPI(unittest.TestCase):
x = fluid.layers.data(name='x', shape=[-1, 4])
index = fluid.layers.data(
name='index', shape=[3], dtype='int32', append_batch_size=False)
z = paddle.index_select(x, index, dim=1)
z = fluid.layers.index_select(x, index, dim=1)
exe = fluid.Executor(fluid.CPUPlace())
res, = exe.run(feed={'x': self.data_x,
'index': self.data_index},
......@@ -98,7 +98,7 @@ class TestIndexSelectAPI(unittest.TestCase):
x = fluid.layers.data(name='x', shape=[-1, 4])
index = fluid.layers.data(
name='index', shape=[3], dtype='int32', append_batch_size=False)
z = paddle.index_select(x, index)
z = fluid.layers.index_select(x, index)
exe = fluid.Executor(fluid.CPUPlace())
res, = exe.run(feed={'x': self.data_x,
'index': self.data_index},
......@@ -114,7 +114,7 @@ class TestIndexSelectAPI(unittest.TestCase):
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(self.data_x)
index = fluid.dygraph.to_variable(self.data_index)
z = paddle.index_select(x, index)
z = fluid.layers.index_select(x, index)
np_z = z.numpy()
expect_out = np.array(
[[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], [5.0, 6.0, 7.0, 8.0]])
......@@ -124,7 +124,7 @@ class TestIndexSelectAPI(unittest.TestCase):
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(self.data_x)
index = fluid.dygraph.to_variable(self.data_index)
z = paddle.index_select(x, index, dim=1)
z = fluid.layers.index_select(x, index, dim=1)
np_z = z.numpy()
expect_out = np.array([[1.0, 2.0, 2.0], [5.0, 6.0, 6.0],
[9.0, 10.0, 10.0]])
......
......@@ -3488,7 +3488,7 @@ class TestBook(LayerTest):
append_batch_size=False,
dtype='float32')
out = paddle.addmm(input=input, x=x, y=y)
out = fluid.layers.addmm(input=input, x=x, y=y)
return (out)
def test_retinanet_detection_output(self):
......
......@@ -70,68 +70,5 @@ class TestLinspaceOpNumOneCase(OpTest):
self.check_output()
class TestLinspaceAPI(unittest.TestCase):
def test_out(self):
with program_guard(fluid.Program()):
out_1 = fluid.data(name="out_1", shape=[5], dtype="float32")
out_2 = paddle.tensor.linspace(0, 10, 5, dtype='float32', out=out_1)
exe = fluid.Executor(place=fluid.CPUPlace())
ipt = {'out_1': np.random.random([5]).astype('float32')}
res_1, res_2 = exe.run(fluid.default_main_program(),
feed=ipt,
fetch_list=[out_1, out_2])
assert np.array_equal(res_1, res_2)
def test_name(self):
with fluid.program_guard(fluid.Program()):
out = paddle.linspace(
0, 10, 5, dtype='float32', name='linspace_res')
assert 'linspace_res' in out.name
class TestLinspaceOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# for ci coverage
# The device of fill_constant must be in 'cpu', 'gpu' or None
def test_device_value():
paddle.linspace(0, 10, 1, dtype="float32", device='xxxpu')
self.assertRaises(ValueError, test_device_value)
def test_start_type():
fluid.layers.linspace([0], 10, 1, dtype="float32")
self.assertRaises(TypeError, test_start_type)
def test_end_dtype():
fluid.layers.linspace(0, [10], 1, dtype="float32")
self.assertRaises(TypeError, test_end_dtype)
def test_step_dtype():
fluid.layers.linspace(0, 10, [0], dtype="float32")
self.assertRaises(TypeError, test_step_dtype)
def test_start_dtype():
start = fluid.data(shape=[1], type="int32", name="start")
fluid.layers.linspace(start, 10, 1, dtype="float32")
self.assertRaises(TypeError, test_start_dtype)
def test_end_dtype():
end = fluid.data(shape=[1], type="int32", name="end")
fluid.layers.linspace(0, end, 1, dtype="float32")
self.assertRaises(TypeError, test_end_dtype)
def test_step_dtype():
step = fluid.data(shape=[1], type="int32", name="step")
fluid.layers.linspace(0, 10, step, dtype="float32")
self.assertRaises(TypeError, test_step_dtype)
if __name__ == "__main__":
unittest.main()
......@@ -27,15 +27,15 @@ class TestLogSumOpError(unittest.TestCase):
with program_guard(Program(), Program()):
x1 = fluid.layers.data(name='x1', shape=[120], dtype="uint8")
self.assertRaises(Exception, paddle.logsumexp, x1)
self.assertRaises(Exception, fluid.layers.logsumexp, x1)
x2 = fluid.layers.data(name='x2', shape=[2, 3], dtype="int")
self.assertRaises(Exception, paddle.logsumexp, x2)
self.assertRaises(Exception, fluid.layers.logsumexp, x2)
x3 = fluid.layers.data(name='x3', shape=[3], dtype="float16")
self.assertRaises(Exception, paddle.logsumexp, x3)
self.assertRaises(Exception, fluid.layers.logsumexp, x3)
self.assertRaises(AssertionError, paddle.logsumexp, None)
self.assertRaises(AssertionError, fluid.layers.logsumexp, None)
class TestLogSumExpOp(unittest.TestCase):
......@@ -45,13 +45,14 @@ class TestLogSumExpOp(unittest.TestCase):
x = fluid.dygraph.to_variable(np_x)
self.assertTrue(
np.allclose(
paddle.logsumexp(x).numpy(), np.log(np.sum(np.exp(np_x)))))
fluid.layers.logsumexp(x).numpy(),
np.log(np.sum(np.exp(np_x)))))
np_x = np.random.uniform(0.1, 1, [2, 3, 4]).astype(np.float32)
x = fluid.dygraph.to_variable(np_x)
self.assertTrue(
np.allclose(
paddle.logsumexp(
fluid.layers.logsumexp(
x, dim=[1, 2]).numpy(),
np.log(np.sum(np.exp(np_x), axis=(1, 2)))))
......@@ -59,7 +60,7 @@ class TestLogSumExpOp(unittest.TestCase):
x = fluid.dygraph.to_variable(np_x)
self.assertTrue(
np.allclose(
paddle.logsumexp(
fluid.layers.logsumexp(
x, dim=[2]).numpy(),
np.log(np.sum(np.exp(np_x), axis=(2)))))
......@@ -67,7 +68,7 @@ class TestLogSumExpOp(unittest.TestCase):
x = fluid.dygraph.to_variable(np_x)
self.assertTrue(
np.allclose(
paddle.logsumexp(
fluid.layers.logsumexp(
x, keepdim=True).numpy(),
np.log(np.sum(np.exp(np_x), keepdims=True))))
......@@ -76,7 +77,7 @@ class TestLogSumExpOp(unittest.TestCase):
helper = LayerHelper("test_logsumexp")
out = helper.create_variable(
type=x.type, name='out', dtype=x.dtype, persistable=False)
paddle.logsumexp(x, out=out)
fluid.layers.logsumexp(x, out=out)
self.assertTrue(
np.allclose(out.numpy(), np.log(np.sum(np.exp(np_x)))))
......
......@@ -243,67 +243,6 @@ for dim in [4]:
})
class API_TestMm(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[3, 2], dtype="float64")
y = fluid.data(name='y', shape=[2, 3], dtype='float64')
res = fluid.data(name="output", shape=[3, 3], dtype="float64")
y_1 = paddle.mm(x, y, out=res)
exe = fluid.Executor(fluid.CPUPlace())
data1 = np.random.rand(3, 2)
data2 = np.random.rand(2, 3)
np_res, expected_result = exe.run(feed={'x': data1,
'y': data2},
fetch_list=[res, y_1])
self.assertTrue(
np.allclose(
np.array(np_res), np.array(expected_result), atol=1e-5),
"two value is\
{}\n{}, check diff!".format(np_res, expected_result))
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[2], dtype="float64")
y = fluid.data(name='y', shape=[2], dtype='float64')
res = fluid.data(name="output", shape=[1], dtype="float64")
result = paddle.mm(x, y)
exe = fluid.Executor(fluid.CPUPlace())
data1 = np.random.rand(2)
data2 = np.random.rand(2)
np_res = exe.run(feed={'x': data1, 'y': data2}, fetch_list=[result])
expected_result = np.matmul(
data1.reshape(1, 2), data2.reshape(2, 1))
self.assertTrue(
np.allclose(
np_res, expected_result, atol=1e-5),
"two value is\
{}\n{}, check diff!".format(np_res, expected_result))
def test_dygraph_with_out(self):
device = fluid.CPUPlace()
with fluid.dygraph.guard(device):
input_array1 = np.random.rand(3, 4).astype("float64")
input_array2 = np.random.rand(4, 3).astype("float64")
out_array = np.random.rand(3, 3).astype("float64")
data1 = fluid.dygraph.to_variable(input_array1)
data2 = fluid.dygraph.to_variable(input_array2)
paddle_out_holder = fluid.dygraph.to_variable(out_array)
out = paddle.mm(data1, data2, out=paddle_out_holder)
self.assertTrue(np.allclose(paddle_out_holder.numpy(), out.numpy()))
def test_dygraph_without_out(self):
device = fluid.CPUPlace()
with fluid.dygraph.guard(device):
input_array1 = np.random.rand(3, 4).astype("float64")
input_array2 = np.random.rand(4, 3).astype("float64")
data1 = fluid.dygraph.to_variable(input_array1)
data2 = fluid.dygraph.to_variable(input_array2)
out = paddle.mm(data1, data2)
expected_result = np.matmul(input_array1, input_array2)
self.assertTrue(np.allclose(expected_result, out.numpy()))
class Test_API_Matmul(unittest.TestCase):
def test_dygraph_without_out(self):
device = fluid.CPUPlace()
......@@ -312,41 +251,10 @@ class Test_API_Matmul(unittest.TestCase):
input_array2 = np.random.rand(4, 3).astype("float64")
data1 = fluid.dygraph.to_variable(input_array1)
data2 = fluid.dygraph.to_variable(input_array2)
out = paddle.matmul(data1, data2)
out = fluid.layers.matmul(data1, data2)
expected_result = np.matmul(input_array1, input_array2)
self.assertTrue(np.allclose(expected_result, out.numpy()))
class API_TestMmError(unittest.TestCase):
def test_errors(self):
def test_error1():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = fluid.data(name="data1", shape=[10, 2], dtype="float32")
data2 = fluid.data(name="data2", shape=[3, 10], dtype="float32")
paddle.mm(data1, data2)
self.assertRaises(ValueError, test_error1)
def test_error2():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = fluid.data(
name="data1", shape=[-1, 10, 2], dtype="float32")
data2 = fluid.data(
name="data2", shape=[-1, 2, 10], dtype="float32")
paddle.mm(data1, data2)
test_error2()
def test_error3():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = fluid.data(
name="data1", shape=[10, 10, 2], dtype="float32")
data2 = fluid.data(
name="data2", shape=[3, 2, 10], dtype="float32")
paddle.mm(data1, data2)
self.assertRaises(ValueError, test_error3)
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
......@@ -79,7 +79,7 @@ class TestMeshgridOp3(unittest.TestCase):
out_2 = np.broadcast_to(out_2, [100, 200])
exe = fluid.Executor(place=fluid.CPUPlace())
grid_x, grid_y = paddle.meshgrid([x, y])
grid_x, grid_y = fluid.layers.meshgrid([x, y])
res_1, res_2 = exe.run(fluid.default_main_program(),
feed={'x': input_1,
'y': input_2},
......@@ -95,7 +95,7 @@ class TestMeshgridOp4(unittest.TestCase):
def test_input_type():
x = fluid.data(shape=[200], dtype='float32', name='x2')
paddle.meshgrid(x)
fluid.layers.meshgrid(x)
self.assertRaises(TypeError, test_input_type)
......@@ -108,7 +108,7 @@ class TestMeshgridOp5(unittest.TestCase):
with fluid.dygraph.guard():
tensor_3 = fluid.dygraph.to_variable(input_3)
tensor_4 = fluid.dygraph.to_variable(input_4)
res_3, res_4 = paddle.meshgrid([tensor_3, tensor_4])
res_3, res_4 = fluid.layers.meshgrid([tensor_3, tensor_4])
assert np.array_equal(res_3.shape, [100, 200])
assert np.array_equal(res_4.shape, [100, 200])
......
......@@ -175,35 +175,5 @@ class TestFP16MulOp2(TestMulOp2):
no_grad_set=set('Y'))
class TestMulOpAttr(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[2, 3], dtype="float32")
y = fluid.data(name='y', shape=[3, 2], dtype='float32')
res = fluid.data(name="output", shape=[2, 2], dtype="float32")
y_1 = paddle.mul(x, y, out=res)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
data1 = np.array([[1, 2, 3], [4, 5, 6]], dtype='float32')
data2 = np.array([[1, 2], [1, 2], [1, 2]], dtype='float32')
np_res, np_y_1 = exe.run(feed={'x': data1,
'y': data2},
fetch_list=[res, y_1])
self.assertEqual((np_res == np_y_1).all(), True)
def test_name(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[2, 3], dtype="float32")
y = fluid.data(name='y', shape=[3, 2], dtype='float32')
res = fluid.data(name="output", shape=[2, 2], dtype="float32")
y_1 = paddle.mul(x, y, name='mul_res')
y_2 = paddle.mul(x, y, out=res, name='mul_res')
self.assertEqual(('mul_res' in y_1.name), True)
if __name__ == "__main__":
unittest.main()
......@@ -27,7 +27,7 @@ class TestNonZeroAPI(unittest.TestCase):
data = np.array([[True, False], [False, True]])
with program_guard(Program(), Program()):
x = fluid.layers.data(name='x', shape=[-1, 2])
y = paddle.nonzero(x, as_tuple=True)
y = fluid.layers.nonzero(x, as_tuple=True)
self.assertEqual(type(y), tuple)
self.assertEqual(len(y), 2)
z = fluid.layers.concat(list(y), axis=1)
......@@ -42,7 +42,7 @@ class TestNonZeroAPI(unittest.TestCase):
data = np.array([True, True, False])
with program_guard(Program(), Program()):
x = fluid.layers.data(name='x', shape=[-1])
y = paddle.nonzero(x, as_tuple=True)
y = fluid.layers.nonzero(x, as_tuple=True)
self.assertEqual(type(y), tuple)
self.assertEqual(len(y), 1)
z = fluid.layers.concat(list(y), axis=1)
......@@ -57,7 +57,7 @@ class TestNonZeroAPI(unittest.TestCase):
data = np.array([[True, False], [False, True]])
with program_guard(Program(), Program()):
x = fluid.layers.data(name='x', shape=[-1, 2])
y = paddle.nonzero(x)
y = fluid.layers.nonzero(x)
exe = fluid.Executor(fluid.CPUPlace())
res, = exe.run(feed={'x': data},
fetch_list=[y.name],
......@@ -68,7 +68,7 @@ class TestNonZeroAPI(unittest.TestCase):
data = np.array([True, True, False])
with program_guard(Program(), Program()):
x = fluid.layers.data(name='x', shape=[-1])
y = paddle.nonzero(x)
y = fluid.layers.nonzero(x)
exe = fluid.Executor(fluid.CPUPlace())
res, = exe.run(feed={'x': data},
fetch_list=[y.name],
......
......@@ -118,93 +118,5 @@ class TestPnormOp2(TestPnormOp):
self.check_grad(['X'], 'Out')
def run_out(self, p, axis, shape_x, shape_y, dtype):
with fluid.program_guard(fluid.Program()):
data1 = fluid.data(name="X", shape=shape_x, dtype=dtype)
data2 = fluid.data(name="Y", shape=shape_y, dtype=dtype)
out = paddle.norm(input=data1, p=p, axis=axis, out=data2)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result = exe.run(feed={"X": np.random.rand(*shape_x).astype(dtype)},
fetch_list=[data2, out])
self.assertEqual((result[0] == result[1]).all(), True)
def run_fro(self, p, axis, shape_x, dtype):
with fluid.program_guard(fluid.Program()):
data = fluid.data(name="X", shape=shape_x, dtype=dtype)
out = paddle.norm(input=data, p=p, axis=axis)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
np_input = (np.random.rand(*shape_x) + 1.0).astype(dtype)
expected_result = frobenius_norm(np_input, axis=axis)
result, = exe.run(feed={"X": np_input}, fetch_list=[out])
self.assertEqual((np.abs(result - expected_result) < 1e-6).all(), True)
def run_pnorm(self, p, axis, shape_x, dtype):
with fluid.program_guard(fluid.Program()):
data = fluid.data(name="X", shape=shape_x, dtype=dtype)
out = paddle.norm(input=data, p=p, axis=axis)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
np_input = (np.random.rand(*shape_x) + 1.0).astype(dtype)
expected_result = p_norm(np_input, porder=p, axis=axis).astype(dtype)
result, = exe.run(feed={"X": np_input}, fetch_list=[out])
self.assertEqual((np.abs(result - expected_result) < 1e-6).all(), True)
class API_NormTest(unittest.TestCase):
def test_output_result(self):
run_out(self, p=2, axis=1, shape_x=[3, 4], shape_y=[3], dtype="float32")
run_out(
self,
p='fro',
axis=None,
shape_x=[3, 4],
shape_y=[1],
dtype="float32")
def test_basic(self):
run_fro(self, p='fro', axis=None, shape_x=[3, 3, 4], dtype="float32")
run_fro(self, p='fro', axis=[0, 1], shape_x=[3, 3, 4], dtype="float64")
run_pnorm(self, p=2, axis=None, shape_x=[3, 4], dtype="float32")
run_pnorm(self, p=2, axis=1, shape_x=[3, 4], dtype="float64")
def test_name(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[10, 10], dtype="float32")
y_1 = paddle.norm(x, p='fro', name='frobenius_name')
y_2 = paddle.norm(x, p=2, name='pnorm_name')
self.assertEqual(('frobenius_name' in y_1.name), True)
self.assertEqual(('pnorm_name' in y_2.name), True)
def test_errors(self):
with fluid.program_guard(fluid.Program(), fluid.Program()):
def err_dtype(p, shape_x, xdtype, out=None):
data = fluid.data(shape=shape_x, dtype=xdtype)
paddle.norm(data, p=p, out=out)
self.assertRaises(TypeError, err_dtype, "fro", [2, 2], "int64")
out = fluid.data(name="out", shape=[1], dtype="int64")
self.assertRaises(TypeError, err_dtype, "fro", [2, 2], "float64",
out)
self.assertRaises(TypeError, err_dtype, 2, [10], "int64")
self.assertRaises(TypeError, err_dtype, 2, [10], "float64", out)
data = fluid.data(name="data_2d", shape=[2, 2], dtype="float64")
self.assertRaises(ValueError, paddle.norm, data, p="unsupport norm")
self.assertRaises(ValueError, paddle.norm, data, p=[1])
self.assertRaises(ValueError, paddle.norm, data, p=[1], axis=-1)
self.assertRaises(
ValueError, paddle.norm, data, p='unspport', axis=[-2, -1])
data = fluid.data(name="data_3d", shape=[2, 2, 2], dtype="float64")
self.assertRaises(
ValueError, paddle.norm, data, p='unspport', axis=[-2, -1])
self.assertRaises(
ValueError, paddle.norm, data, p='unspport', axis=[-3, -2, -1])
if __name__ == '__main__':
unittest.main()
......@@ -518,149 +518,5 @@ class TestReduceMeanOpError(unittest.TestCase):
self.assertRaises(TypeError, fluid.layers.reduce_mean, x2)
class API_TestSumOpError(unittest.TestCase):
def test_errors(self):
def test_dtype1():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data(name="data", shape=[10], dtype="float32")
paddle.sum(data, dtype="int32")
self.assertRaises(ValueError, test_dtype1)
def test_dtype2():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data(name="data", shape=[10], dtype="float32")
paddle.sum(data, dtype="float32")
self.assertRaises(ValueError, test_dtype2)
def test_dtype3():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data(name="data", shape=[10], dtype="int32")
paddle.sum(data, dtype="bool")
self.assertRaises(ValueError, test_dtype3)
def test_dtype4():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data(name="data", shape=[10], dtype="int32")
paddle.sum(data, dtype="int32")
self.assertRaises(ValueError, test_dtype3)
class API_TestSumOp(unittest.TestCase):
def test_1(self):
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data("data", shape=[10, 10], dtype="float32")
result_sum = paddle.sum(input=data, dim=1, dtype="float64")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
input_data = np.random.rand(10, 10).astype(np.float32)
res, = exe.run(feed={"data": input_data}, fetch_list=[result_sum])
self.assertEqual(
(res == np.sum(input_data.astype(np.float64), axis=1)).all(), True)
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data("data", shape=[10, 10], dtype="int32")
result_sum = paddle.sum(input=data, dim=1, dtype="int64")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
input_data = np.random.randint(10, size=(10, 10)).astype(np.int32)
res, = exe.run(feed={"data": input_data}, fetch_list=[result_sum])
self.assertEqual(
(res == np.sum(input_data.astype(np.int64), axis=1)).all(), True)
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data("data", shape=[10, 10], dtype="int32")
result_sum = paddle.sum(input=data, dim=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
input_data = np.random.randint(10, size=(10, 10)).astype(np.int32)
res, = exe.run(feed={"data": input_data}, fetch_list=[result_sum])
self.assertEqual((res == np.sum(input_data, axis=1)).all(), True)
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data("data", shape=[10, 10], dtype="int32")
result_sum = paddle.sum(input=data, dim=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
input_data = np.random.randint(10, size=(10, 10)).astype(np.int32)
res, = exe.run(feed={"data": input_data}, fetch_list=[result_sum])
self.assertEqual((res == np.sum(input_data, axis=1)).all(), True)
with fluid.dygraph.guard():
np_x = np.array([10, 10]).astype('float64')
x = fluid.dygraph.to_variable(np_x)
z = paddle.sum(x, dim=0)
np_z = z.numpy()
z_expected = np.array(np.sum(np_x, axis=0))
self.assertEqual((np_z == z_expected).all(), True)
class API_TestMaxOp(unittest.TestCase):
def test_1(self):
# type: float
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data("data", shape=[10, 10], dtype="float32")
result_max = paddle.max(input=data, dim=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
input_data = np.random.rand(10, 10).astype(np.float32)
res, = exe.run(feed={"data": input_data}, fetch_list=[result_max])
self.assertEqual((res == np.max(input_data, axis=1)).all(), True)
# type: int
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data("data", shape=[10, 10], dtype="int64")
result_max = paddle.max(input=data, dim=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
input_data = np.random.randint(10, size=(10, 10)).astype(np.int64)
res, = exe.run(feed={"data": input_data}, fetch_list=[result_max])
self.assertEqual((res == np.max(input_data, axis=1)).all(), True)
# dygraph
with fluid.dygraph.guard():
np_x = np.array([10, 10]).astype('float64')
x = fluid.dygraph.to_variable(np_x)
z = paddle.max(x, dim=0)
np_z = z.numpy()
z_expected = np.array(np.max(np_x, axis=0))
self.assertEqual((np_z == z_expected).all(), True)
class API_TestMinOp(unittest.TestCase):
def test_1(self):
# type: float
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data("data", shape=[10, 10], dtype="float32")
result_min = paddle.min(input=data, dim=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
input_data = np.random.rand(10, 10).astype(np.float32)
res, = exe.run(feed={"data": input_data}, fetch_list=[result_min])
self.assertEqual((res == np.min(input_data, axis=1)).all(), True)
# type: int
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data("data", shape=[10, 10], dtype="int64")
result_min = paddle.min(input=data, dim=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
input_data = np.random.randint(10, size=(10, 10)).astype(np.int64)
res, = exe.run(feed={"data": input_data}, fetch_list=[result_min])
self.assertEqual((res == np.min(input_data, axis=1)).all(), True)
# dygraph
with fluid.dygraph.guard():
np_x = np.array([10, 10]).astype('float64')
x = fluid.dygraph.to_variable(np_x)
z = paddle.min(x, dim=0)
np_z = z.numpy()
z_expected = np.array(np.min(np_x, axis=0))
self.assertEqual((np_z == z_expected).all(), True)
if __name__ == '__main__':
unittest.main()
......@@ -59,7 +59,7 @@ class TestRollAPI(unittest.TestCase):
self.data_x = np.array(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]])
def test_index_select_api(self):
def test_roll_api(self):
self.input_data()
# case 1:
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from paddle import fluid, nn
import paddle.fluid.dygraph as dg
import paddle.fluid.initializer as I
import paddle.nn.functional as F
import unittest
class RowConvTestCase(unittest.TestCase):
def __init__(self,
methodName='runTest',
batch_size=4,
num_channels=8,
time_steps=12,
context_size=3,
act=None,
dtype="float32"):
super(RowConvTestCase, self).__init__(methodName=methodName)
self.batch_size = batch_size
self.num_channels = num_channels
self.time_steps = time_steps
self.context_size = context_size
self.act = act
self.dtype = dtype
def setUp(self):
input_shape = (self.batch_size, self.time_steps, self.num_channels)
self.input = np.random.uniform(size=input_shape).astype(self.dtype)
self.weight_shape = weight_shape = (self.context_size + 1,
self.num_channels)
self.weight = np.random.uniform(size=weight_shape).astype(self.dtype)
def fluid_layer(self, place):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
x = fluid.data(
"input", [-1, -1, self.num_channels], dtype=self.dtype)
y = fluid.layers.row_conv(
x,
self.context_size,
param_attr=I.NumpyArrayInitializer(self.weight),
act=self.act)
exe = fluid.Executor(place)
exe.run(start)
y_np, = exe.run(main, feed={"input": self.input}, fetch_list=[y])
return y_np
def functional_declarative(self, place):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
x = fluid.data(
"input", [-1, -1, self.num_channels], dtype=self.dtype)
w = fluid.data("weight", self.weight_shape, dtype=self.dtype)
y = F.row_conv(x, w, act=self.act)
exe = fluid.Executor(place)
exe.run(start)
y_np, = exe.run(main,
feed={"input": self.input,
"weight": self.weight},
fetch_list=[y])
return y_np
def functional_imperative(self, place):
with dg.guard(place):
x_var = dg.to_variable(self.input)
w_var = dg.to_variable(self.weight)
y_var = F.row_conv(x_var, w_var, act=self.act)
y_np = y_var.numpy()
return y_np
def nn_layer(self, place):
with dg.guard(place):
x_var = dg.to_variable(self.input)
conv = nn.RowConv(
self.num_channels,
self.context_size,
param_attr=I.NumpyArrayInitializer(self.weight),
act=self.act,
dtype=self.dtype)
y_var = conv(x_var)
y_np = y_var.numpy()
return y_np
def _test_equivalence(self, place):
result1 = self.fluid_layer(place)
result2 = self.functional_declarative(place)
result3 = self.functional_imperative(place)
result4 = self.nn_layer(place)
np.testing.assert_array_almost_equal(result1, result2)
np.testing.assert_array_almost_equal(result2, result3)
np.testing.assert_array_almost_equal(result3, result4)
def runTest(self):
place = fluid.CPUPlace()
self._test_equivalence(place)
if fluid.core.is_compiled_with_cuda():
palce = fluid.CUDAPlace(0)
self._test_equivalence(place)
def load_tests(loader, standard_tests, pattern):
suite = unittest.TestSuite()
suite.addTest(RowConvTestCase(methodName="runTest"))
suite.addTest(RowConvTestCase(methodName="runTest", act="sigmoid"))
suite.addTest(
RowConvTestCase(
methodName="runTest", context_size=5, act="sigmoid"))
return suite
if __name__ == "__main__":
unittest.main()
......@@ -280,13 +280,13 @@ class TestSplitOpError(unittest.TestCase):
def test_num_or_sections_type_tensor():
x7 = fluid.layers.data(shape=[4], dtype='float16', name='x5')
paddle.split(input=x7, num_or_sections=2.1, dim=3)
fluid.layers.split(input=x7, num_or_sections=2.1, dim=3)
self.assertRaises(TypeError, test_num_or_sections_type_tensor)
def test_axis_type_tensor():
x8 = fluid.layers.data(shape=[4], dtype='float16', name='x6')
paddle.split(input=x8, num_or_sections=2, dim=3.2)
fluid.layers.split(input=x8, num_or_sections=2, dim=3.2)
self.assertRaises(TypeError, test_axis_type_tensor)
......@@ -296,7 +296,7 @@ class API_TestSplit(unittest.TestCase):
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = fluid.layers.data('data1', shape=[4, 6, 6], dtype='float64')
data2 = fluid.layers.data('data2', shape=[1], dtype='int32')
x0, x1, x2 = paddle.split(data1, num_or_sections=3, dim=data2)
x0, x1, x2 = fluid.layers.split(data1, num_or_sections=3, dim=data2)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
input1 = np.random.random([4, 6, 6]).astype('float64')
......@@ -314,7 +314,7 @@ class API_TestSplit2(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = fluid.layers.data('data1', shape=[4, 6, 6], dtype='float64')
x0, x1, x2 = paddle.split(data1, num_or_sections=3, dim=2)
x0, x1, x2 = fluid.layers.split(data1, num_or_sections=3, dim=2)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
input1 = np.random.random([4, 6, 6]).astype('float64')
......@@ -330,7 +330,7 @@ class API_TestSplit3(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.layers.data('data', shape=[-1, 10], dtype='float64')
x0, x1 = paddle.split(data, num_or_sections=(3, 7), dim=1)
x0, x1 = fluid.layers.split(data, num_or_sections=(3, 7), dim=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
input1 = np.random.random([1, 10]).astype('float64')
......@@ -345,7 +345,7 @@ class API_TestSplit4(unittest.TestCase):
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.layers.data('data', shape=[-1, 10], dtype='float64')
index = fluid.layers.data('index', shape=[1], dtype='int32')
x0, x1 = paddle.split(data, num_or_sections=(3, index), dim=1)
x0, x1 = fluid.layers.split(data, num_or_sections=(3, index), dim=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
input1 = np.random.random([1, 10]).astype('float64')
......@@ -364,7 +364,7 @@ class API_TestDygraphSplit(unittest.TestCase):
input_1 = np.random.random([4, 6, 6]).astype("int32")
# input is a variable which shape is [4, 6, 6]
input = fluid.dygraph.to_variable(input_1)
x0, x1, x2 = paddle.split(input, num_or_sections=3, dim=1)
x0, x1, x2 = fluid.layers.split(input, num_or_sections=3, dim=1)
x0_out = x0.numpy()
x1_out = x1.numpy()
x2_out = x2.numpy()
......
......@@ -90,7 +90,7 @@ class API_TestSqueeze(unittest.TestCase):
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = fluid.layers.data(
'data1', shape=[-1, 1, 10], dtype='float64')
result_squeeze = paddle.squeeze(data1, axes=[1])
result_squeeze = fluid.layers.squeeze(data1, axes=[1])
place = fluid.CPUPlace()
exe = fluid.Executor(place)
input1 = np.random.random([5, 1, 10]).astype('float64')
......@@ -105,7 +105,7 @@ class API_TestDygraphSqueeze(unittest.TestCase):
with fluid.dygraph.guard():
input_1 = np.random.random([5, 1, 10]).astype("int32")
input = fluid.dygraph.to_variable(input_1)
output = paddle.squeeze(input, axes=[1])
output = fluid.layers.squeeze(input, axes=[1])
out_np = output.numpy()
expected_out = np.squeeze(input_1, axis=1)
self.assertTrue(np.allclose(expected_out, out_np))
......
......@@ -150,7 +150,7 @@ class TestTensorStackAPIWithLoDTensorArray(unittest.TestCase):
for i in range(self.iter_num):
fluid.layers.array_write(input, zero + i, tensor_array)
self.out_var = paddle.stack(tensor_array, axis=self.axis)
self.out_var = fluid.layers.stack(tensor_array, axis=self.axis)
def test_case(self):
self.assertTrue(self.out_var.shape[self.axis] == -1)
......@@ -168,7 +168,7 @@ class API_test(unittest.TestCase):
data1 = fluid.layers.data('data1', shape=[1, 2], dtype='float64')
data2 = fluid.layers.data('data2', shape=[1, 2], dtype='float64')
data3 = fluid.layers.data('data3', shape=[1, 2], dtype='float64')
result_stack = paddle.stack([data1, data2, data3], axis=0)
result_stack = fluid.layers.stack([data1, data2, data3], axis=0)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
input1 = np.random.random([1, 2]).astype('float64')
......@@ -192,14 +192,14 @@ class API_DygraphTest(unittest.TestCase):
x1 = fluid.dygraph.to_variable(data1)
x2 = fluid.dygraph.to_variable(data2)
x3 = fluid.dygraph.to_variable(data3)
result = paddle.stack([x1, x2, x3], axis=0)
result = fluid.layers.stack([x1, x2, x3], axis=0)
result_np = result.numpy()
expected_result = np.stack([data1, data2, data3], axis=0)
self.assertTrue(np.allclose(expected_result, result_np))
with fluid.dygraph.guard():
y1 = fluid.dygraph.to_variable(data1)
result = paddle.stack(y1, axis=0)
result = fluid.layers.stack(y1, axis=0)
result_np_2 = result.numpy()
expected_result_2 = np.stack(data1, axis=0)
self.assertTrue(np.allclose(expected_result_2, result_np_2))
......
......@@ -225,22 +225,6 @@ def create_test_sum_fp16_class(parent):
globals()[cls_name] = TestSumFp16Case
class API_Test_Elementwise_Sum(unittest.TestCase):
def test_api(self):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input0 = fluid.layers.fill_constant(
shape=[2, 3], dtype='int64', value=5)
input1 = fluid.layers.fill_constant(
shape=[2, 3], dtype='int64', value=3)
expected_result = np.empty((2, 3))
expected_result.fill(8)
sum_value = paddle.elementwise_sum([input0, input1])
exe = fluid.Executor(fluid.CPUPlace())
result = exe.run(fetch_list=[sum_value])
self.assertEqual((result == expected_result).all(), True)
class TestRaiseSumError(unittest.TestCase):
def test_errors(self):
def test_type():
......
......@@ -142,7 +142,7 @@ class TestTAPI(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
data = fluid.data(shape=[10], dtype="float64", name="data")
data_t = paddle.t(data)
data_t = fluid.layers.t(data)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
data_np = np.random.random([10]).astype("float64")
......@@ -152,7 +152,7 @@ class TestTAPI(unittest.TestCase):
with fluid.program_guard(fluid.Program()):
data = fluid.data(shape=[10, 5], dtype="float64", name="data")
data_t = paddle.t(data)
data_t = fluid.layers.t(data)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
data_np = np.random.random([10, 5]).astype("float64")
......@@ -162,7 +162,7 @@ class TestTAPI(unittest.TestCase):
with fluid.program_guard(fluid.Program()):
data = fluid.data(shape=[1, 5], dtype="float64", name="data")
data_t = paddle.t(data)
data_t = fluid.layers.t(data)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
data_np = np.random.random([1, 5]).astype("float64")
......@@ -173,7 +173,7 @@ class TestTAPI(unittest.TestCase):
with fluid.dygraph.guard():
np_x = np.random.random([10]).astype("float64")
data = fluid.dygraph.to_variable(np_x)
z = paddle.t(data)
z = fluid.layers.t(data)
np_z = z.numpy()
z_expected = np.array(np.transpose(np_x))
self.assertEqual((np_z == z_expected).all(), True)
......@@ -181,7 +181,7 @@ class TestTAPI(unittest.TestCase):
with fluid.dygraph.guard():
np_x = np.random.random([10, 5]).astype("float64")
data = fluid.dygraph.to_variable(np_x)
z = paddle.t(data)
z = fluid.layers.t(data)
np_z = z.numpy()
z_expected = np.array(np.transpose(np_x))
self.assertEqual((np_z == z_expected).all(), True)
......@@ -189,7 +189,7 @@ class TestTAPI(unittest.TestCase):
with fluid.dygraph.guard():
np_x = np.random.random([1, 5]).astype("float64")
data = fluid.dygraph.to_variable(np_x)
z = paddle.t(data)
z = fluid.layers.t(data)
np_z = z.numpy()
z_expected = np.array(np.transpose(np_x))
self.assertEqual((np_z == z_expected).all(), True)
......@@ -199,7 +199,7 @@ class TestTAPI(unittest.TestCase):
x = fluid.data(name='x', shape=[10, 5, 3], dtype='float64')
def test_x_dimension_check():
paddle.t(x)
fluid.layers.t(x)
self.assertRaises(ValueError, test_x_dimension_check)
......
......@@ -19,7 +19,7 @@ import numpy as np
from op_test import OpTest
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.nn.functional import interpolate
from paddle.fluid.layers import interpolate
def trilinear_interp_np(input,
......
......@@ -81,7 +81,7 @@ class API_TestUnsqueeze(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = fluid.layers.data('data1', shape=[-1, 10], dtype='float64')
result_squeeze = paddle.unsqueeze(data1, axes=[1])
result_squeeze = fluid.layers.unsqueeze(data1, axes=[1])
place = fluid.CPUPlace()
exe = fluid.Executor(place)
input1 = np.random.random([5, 1, 10]).astype('float64')
......@@ -98,7 +98,7 @@ class TestUnsqueezeOpError(unittest.TestCase):
def test_axes_type():
x6 = fluid.layers.data(
shape=[-1, 10], dtype='float16', name='x3')
paddle.unsqueeze(x6, axes=3.2)
fluid.layers.unsqueeze(x6, axes=3.2)
self.assertRaises(TypeError, test_axes_type)
......@@ -108,7 +108,7 @@ class API_TestUnsqueeze2(unittest.TestCase):
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = fluid.data('data1', shape=[-1, 10], dtype='float64')
data2 = fluid.data('data2', shape=[1], dtype='int32')
result_squeeze = paddle.unsqueeze(data1, axes=data2)
result_squeeze = fluid.layers.unsqueeze(data1, axes=data2)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
input1 = np.random.random([5, 1, 10]).astype('float64')
......@@ -125,7 +125,7 @@ class API_TestUnsqueeze3(unittest.TestCase):
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = fluid.data('data1', shape=[-1, 10], dtype='float64')
data2 = fluid.data('data2', shape=[1], dtype='int32')
result_squeeze = paddle.unsqueeze(data1, axes=[data2, 3])
result_squeeze = fluid.layers.unsqueeze(data1, axes=[data2, 3])
place = fluid.CPUPlace()
exe = fluid.Executor(place)
input1 = np.random.random([5, 1, 10, 1]).astype('float64')
......@@ -143,7 +143,7 @@ class API_TestDyUnsqueeze(unittest.TestCase):
input_1 = np.random.random([5, 1, 10]).astype("int32")
input1 = np.squeeze(input_1, axis=1)
input = fluid.dygraph.to_variable(input_1)
output = paddle.unsqueeze(input, axes=[1])
output = fluid.layers.unsqueeze(input, axes=[1])
out_np = output.numpy()
self.assertTrue(np.allclose(input1, out_np))
......@@ -154,7 +154,7 @@ class API_TestDyUnsqueeze2(unittest.TestCase):
input_1 = np.random.random([5, 1, 10]).astype("int32")
input1 = np.squeeze(input_1, axis=1)
input = fluid.dygraph.to_variable(input_1)
output = paddle.unsqueeze(input, axes=1)
output = fluid.layers.unsqueeze(input, axes=1)
out_np = output.numpy()
self.assertTrue(np.allclose(input1, out_np))
......
......@@ -23,58 +23,6 @@ class TestVarianceLayer(unittest.TestCase):
self._dtype = "float64"
self._input = np.random.random([2, 3, 4, 5]).astype(self._dtype)
def static(self, axis=None, keepdim=False, unbiased=True):
prog = fluid.Program()
with fluid.program_guard(prog):
data = fluid.data(
name="data", dtype=self._dtype, shape=[None, 3, 4, 5])
out = prog.current_block().create_var(
dtype=self._dtype, shape=[2, 3, 4, 5])
paddle.var(input=data,
axis=axis,
keepdim=keepdim,
unbiased=unbiased,
out=out)
exe = fluid.Executor(self._place)
return exe.run(feed={"data": self._input},
program=prog,
fetch_list=[out])[0]
def dynamic(self, axis=None, keepdim=False, unbiased=True):
with fluid.dygraph.guard(self._place):
data = fluid.dygraph.to_variable(self._input)
out = paddle.var(input=data,
axis=axis,
keepdim=keepdim,
unbiased=unbiased)
return out.numpy()
def numpy(self, axis=None, keepdim=False, unbiased=True):
ddof = 1 if unbiased else 0
axis = tuple(axis) if isinstance(axis, list) else axis
return np.var(self._input, axis=axis, keepdims=keepdim, ddof=ddof)
def test_equal(self):
places = []
if fluid.core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for place in places:
self._place = place
self.assertTrue(np.allclose(self.numpy(), self.static()))
self.assertTrue(
np.allclose(
self.numpy(axis=[0, 2]), self.dynamic(axis=[0, 2])))
self.assertTrue(
np.allclose(
self.numpy(
axis=[1, 3], keepdim=True),
self.dynamic(
axis=[1, 3], keepdim=True)))
self.assertTrue(
np.allclose(
self.numpy(unbiased=False), self.dynamic(unbiased=False)))
if __name__ == '__main__':
unittest.main()
......@@ -59,121 +59,5 @@ class TestWhereOp3(TestWhereOp):
self.cond = np.array(np.random.randint(2, size=(20, 2, 4)), dtype=bool)
class TestWhereAPI(unittest.TestCase):
def setUp(self):
self.init_data()
def init_data(self):
self.shape = [10, 15]
self.cond = np.array(np.random.randint(2, size=self.shape), dtype=bool)
self.x = np.random.uniform(-2, 3, self.shape).astype(np.float32)
self.y = np.random.uniform(-2, 3, self.shape).astype(np.float32)
self.out = np.where(self.cond, self.x, self.y)
def ref_x_backward(self, dout):
return np.where(self.cond == True, dout, 0)
def ref_y_backward(self, dout):
return np.where(self.cond == False, dout, 0)
def test_api(self, use_cuda=False):
for x_stop_gradient in [False, True]:
for y_stop_gradient in [False, True]:
with fluid.program_guard(Program(), Program()):
cond = fluid.layers.data(
name='cond', shape=self.shape, dtype='bool')
x = fluid.layers.data(
name='x', shape=self.shape, dtype='float32')
y = fluid.layers.data(
name='y', shape=self.shape, dtype='float32')
x.stop_gradient = x_stop_gradient
y.stop_gradient = y_stop_gradient
result = paddle.where(cond, x, y)
append_backward(layers.mean(result))
for use_cuda in [False, True]:
if use_cuda and not fluid.core.is_compiled_with_cuda():
break
place = fluid.CUDAPlace(
0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
fetch_list = [result, result.grad_name]
if x_stop_gradient is False:
fetch_list.append(x.grad_name)
if y_stop_gradient is False:
fetch_list.append(y.grad_name)
out = exe.run(
fluid.default_main_program(),
feed={'cond': self.cond,
'x': self.x,
'y': self.y},
fetch_list=fetch_list)
assert np.array_equal(out[0], self.out)
if x_stop_gradient is False:
assert np.array_equal(out[2],
self.ref_x_backward(out[1]))
if y.stop_gradient is False:
assert np.array_equal(
out[3], self.ref_y_backward(out[1]))
elif y.stop_gradient is False:
assert np.array_equal(out[2],
self.ref_y_backward(out[1]))
def test_api_broadcast(self, use_cuda=False):
main_program = Program()
with fluid.program_guard(main_program):
x = fluid.layers.data(name='x', shape=[4, 1], dtype='float32')
y = fluid.layers.data(name='y', shape=[4, 2], dtype='float32')
x_i = np.array([[0.9383, 0.1983, 3.2, 1.2]]).astype("float32")
y_i = np.array([[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0]]).astype("float32")
result = paddle.where(x > 1, x=x, y=y)
for use_cuda in [False, True]:
if use_cuda and not fluid.core.is_compiled_with_cuda():
return
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
out = exe.run(fluid.default_main_program(),
feed={'x': x_i,
'y': y_i},
fetch_list=[result])
assert np.array_equal(out[0], np.where(x_i > 1, x_i, y_i))
class TestWhereDygraphAPI(unittest.TestCase):
def test_api(self):
with fluid.dygraph.guard():
x_i = np.array([0.9383, 0.1983, 3.2, 1.2]).astype("float64")
y_i = np.array([1.0, 1.0, 1.0, 1.0]).astype("float64")
cond_i = np.array([False, False, True, True]).astype("bool")
x = fluid.dygraph.to_variable(x_i)
y = fluid.dygraph.to_variable(y_i)
cond = fluid.dygraph.to_variable(cond_i)
out = paddle.where(cond, x, y)
assert np.array_equal(out.numpy(), np.where(cond_i, x_i, y_i))
class TestWhereOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
x_i = np.array([0.9383, 0.1983, 3.2, 1.2]).astype("float64")
y_i = np.array([1.0, 1.0, 1.0, 1.0]).astype("float64")
cond_i = np.array([False, False, True, True]).astype("bool")
def test_Variable():
paddle.where(cond_i, x_i, y_i)
self.assertRaises(TypeError, test_Variable)
def test_type():
x = fluid.layers.data(name='x', shape=[4], dtype='bool')
y = fluid.layers.data(name='y', shape=[4], dtype='float16')
cond = fluid.layers.data(name='cond', shape=[4], dtype='int32')
paddle.where(cond, x, y)
self.assertRaises(TypeError, test_type)
if __name__ == '__main__':
unittest.main()
......@@ -16,11 +16,9 @@
# including layers, linear, conv, rnn etc.
from .layer import norm
from .functional import extension
__all__ = []
__all__ += norm.__all__
__all__ += extension.__all__
# TODO: define alias in nn directory
# from .clip import ErrorClipByValue #DEFINE_ALIAS
......@@ -206,22 +204,6 @@ from .functional.activation import sigmoid #DEFINE_ALIAS
# from .functional.activation import tanh_shrink #DEFINE_ALIAS
# from .functional.activation import thresholded_relu #DEFINE_ALIAS
from .functional.activation import log_softmax #DEFINE_ALIAS
# from .functional.extension import add_position_encoding #DEFINE_ALIAS
# from .functional.extension import autoincreased_step_counter #DEFINE_ALIAS
# from .functional.extension import continuous_value_model #DEFINE_ALIAS
# from .functional.extension import filter_by_instag #DEFINE_ALIAS
# from .functional.extension import linear_chain_crf #DEFINE_ALIAS
# from .functional.extension import merge_selected_rows #DEFINE_ALIAS
# from .functional.extension import multiclass_nms #DEFINE_ALIAS
# from .functional.extension import polygon_box_transform #DEFINE_ALIAS
# from .functional.extension import random_crop #DEFINE_ALIAS
from .functional.extension import row_conv #DEFINE_ALIAS
# from .functional.extension import rpn_target_assign #DEFINE_ALIAS
# from .functional.extension import similarity_focus #DEFINE_ALIAS
# from .functional.extension import target_assign #DEFINE_ALIAS
# from .functional.extension import temporal_shift #DEFINE_ALIAS
# from .functional.extension import warpctc #DEFINE_ALIAS
from .functional.extension import diag_embed #DEFINE_ALIAS
# from .functional.rnn import gru_unit #DEFINE_ALIAS
# from .functional.rnn import lstm #DEFINE_ALIAS
# from .functional.rnn import lstm_unit #DEFINE_ALIAS
......
......@@ -130,24 +130,6 @@ from .activation import sigmoid #DEFINE_ALIAS
# from .activation import tanh_shrink #DEFINE_ALIAS
# from .activation import thresholded_relu #DEFINE_ALIAS
from .activation import log_softmax #DEFINE_ALIAS
from . import extension
__all__ += extension.__all__
# from .extension import add_position_encoding #DEFINE_ALIAS
# from .extension import autoincreased_step_counter #DEFINE_ALIAS
# from .extension import continuous_value_model #DEFINE_ALIAS
# from .extension import filter_by_instag #DEFINE_ALIAS
# from .extension import linear_chain_crf #DEFINE_ALIAS
# from .extension import merge_selected_rows #DEFINE_ALIAS
# from .extension import multiclass_nms #DEFINE_ALIAS
# from .extension import polygon_box_transform #DEFINE_ALIAS
# from .extension import random_crop #DEFINE_ALIAS
from .extension import row_conv #DEFINE_ALIAS
# from .extension import rpn_target_assign #DEFINE_ALIAS
# from .extension import similarity_focus #DEFINE_ALIAS
# from .extension import target_assign #DEFINE_ALIAS
# from .extension import temporal_shift #DEFINE_ALIAS
# from .extension import warpctc #DEFINE_ALIAS
from .extension import diag_embed #DEFINE_ALIAS
# from .rnn import gru_unit #DEFINE_ALIAS
# from .rnn import lstm #DEFINE_ALIAS
# from .rnn import lstm_unit #DEFINE_ALIAS
......@@ -180,17 +162,3 @@ from .extension import diag_embed #DEFINE_ALIAS
# from .lod import dynamic_gru #DEFINE_ALIAS
# from .lod import dynamic_lstm #DEFINE_ALIAS
# from .lod import dynamic_lstmp #DEFINE_ALIAS
from . import common
#__all__ += common.__all__
# from .common import dropout #DEFINE_ALIAS
# from .common import embedding #DEFINE_ALIAS
# from .common import fc #DEFINE_ALIAS
# from .common import label_smooth #DEFINE_ALIAS
# from .common import one_hot #DEFINE_ALIAS
# from .common import pad #DEFINE_ALIAS
# from .common import pad_constant_like #DEFINE_ALIAS
# from .common import pad2d #DEFINE_ALIAS
# from .common import unfold #DEFINE_ALIAS
# from .common import bilinear_tensor_product #DEFINE_ALIAS
# from .common import assign #DEFINE_ALIAS
from .common import interpolate #DEFINE_ALIAS
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.layers.tensor import Variable, fill_constant
# TODO: define the common functions to build a neural network
# __all__ = ['dropout',
# 'embedding',
# 'fc',
# 'label_smooth',
# 'one_hot',
# 'pad',
# 'pad_constant_like',
# 'pad2d',
# 'unfold',
# 'bilinear_tensor_product',
# 'assign',
# 'interpolate']
__all__ = ['interpolate']
def interpolate(input,
out_shape=None,
scale=None,
name=None,
resample='BILINEAR',
actual_shape=None,
align_corners=True,
align_mode=1,
data_format='NCHW'):
"""
This op resizes a batch of images.
The input must be a 4-D Tensor of the shape (num_batches, channels, in_h, in_w)
or (num_batches, in_h, in_w, channels), or a 5-D Tensor of the shape
(num_batches, channels, in_d, in_h, in_w) or (num_batches, in_d, in_h, in_w, channels),
and the resizing only applies on the three dimensions(depth, height and width).
**Warning:** the parameter :attr:`actual_shape` will be deprecated in the
future and only use :attr:`out_shape` instead.
Supporting resample methods:
'BILINEAR' : Bilinear interpolation
'TRILINEAR' : Trilinear interpolation
'NEAREST' : Nearest neighbor interpolation
'BICUBIC' : Bicubic interpolation
Nearest neighbor interpolation is to perform nearest neighbor interpolation
in both the 3rd dimension(in height direction) and the 4th dimension(in width
direction) on input tensor.
Bilinear interpolation is an extension of linear interpolation for
interpolating functions of two variables (e.g. H-direction and
W-direction in this op) on a rectilinear 2D grid. The key idea is
to perform linear interpolation first in one direction, and then
again in the other direction.
Trilinear interpolation is an extension of linear interpolation for
interpolating functions of three variables (e.g. D-direction,
H-direction and W-direction in this op) on a rectilinear 3D grid.
The linear interpolation is performed on three directions.
Align_corners and align_mode are optional parameters,the calculation method
of interpolation can be selected by them.
Bicubic interpolation is an extension of cubic interpolation for interpolating
data points on a two-dimensional regular grid. The interpolated surface is
smoother than corresponding surfaces obtained by bilinear interpolation or
nearest-neighbor interpolation.
Example:
.. code-block:: text
For scale:
if align_corners = True && out_size > 1 :
scale_factor = (in_size-1.0)/(out_size-1.0)
else:
scale_factor = float(in_size/out_size)
Nearest neighbor interpolation:
if:
align_corners = False
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = floor (H_{in} * scale_{factor})
W_out = floor (W_{in} * scale_{factor})
else:
align_corners = True
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = round(H_{in} * scale_{factor})
W_out = round(W_{in} * scale_{factor})
Bilinear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
Bicubic interpolation:
if:
align_corners = False
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
Trilinear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = (D_{in}+0.5) * scale_{factor} - 0.5
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = D_{in} * scale_{factor}
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
For details of nearest neighbor interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation.
For details of bilinear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Bilinear_interpolation.
For details of trilinear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Trilinear_interpolation.
For details of bicubic interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Bicubic_interpolation
Parameters:
input (Variable): 4-D or 5-D Tensor, its data type is float32, float64, or uint8,
its data format is specified by :attr:`data_format`.
out_shape(list|tuple|Variable|None): Output shape of image resize
layer, the shape is (out_h, out_w) when input is a 4-D Tensor and is
(out_d, out_h, out_w) when input is a 5-D Tensor. Default: None. If
a list, each element can be an integer or a Tensor Variable of shape: [1].
If a Tensor Variable, its dimensions size should be a 1.
scale(float|Variable|None): The multiplier for the input height or width. At
least one of :attr:`out_shape` or :attr:`scale` must be set.
And :attr:`out_shape` has a higher priority than :attr:`scale`.
Default: None.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
resample(str): The resample method. It supports 'BILINEAR', 'TRILINEAR' ,
'BICUBIC' and 'NEAREST' currently. Default: 'BILINEAR'
actual_shape(Variable): An optional input to specify output shape
dynamically. If provided, image resize
according to this given shape rather than
:attr:`out_shape` and :attr:`scale` specifying
shape. That is to say actual_shape has the
highest priority. It is recommended to use
:attr:`out_shape` if you want to specify output
shape dynamically, because :attr:`actual_shape`
will be deprecated. When using actual_shape to
specify output shape, one of :attr:`out_shape`
and :attr:`scale` should also be set, otherwise
errors would be occurred in graph constructing stage.
Default: None
align_corners(bool) : An optional bool, If True, the centers of the 4 corner pixels of the
input and output tensors are aligned, preserving the values at the
corner pixels.
Default: True
align_mode(int) : An optional for bilinear interpolation. can be \'0\'
for src_idx = scale*(dst_indx+0.5)-0.5 , can be \'1\' for
src_idx = scale*dst_index.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`, `"NCDHW"`,
`"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored
in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
Returns:
A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels),
or 5-D Tensor of the shape (num_batches, channels, out_d, out_h, out_w) or (num_batches, out_d, out_h, out_w, channels).
Raises:
TypeError: out_shape should be a list or tuple or Variable.
TypeError: actual_shape should either be Variable or None.
ValueError: The 'resample' of image_resize can only be 'BILINEAR',
'TRILINEAR', 'BICUBIC', or 'NEAREST' currently.
ValueError: 'BILINEAR', 'BICUBIC' and 'NEAREST' only support 4-D tensor.
ValueError: 'TRILINEAR' only support 5-D tensor.
ValueError: One of out_shape and scale must not be None.
ValueError: out_shape length should be 2 for input 4-D tensor.
ValueError: out_shape length should be 3 for input 5-D tensor.
ValueError: scale should be greater than zero.
TypeError: align_corners should be a bool value
ValueError: align_mode can only be '0' or '1'
ValueError: data_format can only be 'NCHW', 'NHWC', 'NCDHW' or 'NDHWC'.
Examples:
.. code-block:: python
#declarative mode
import paddle
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,10])
#1
output = paddle.nn.functional.interpolate(input=input,out_shape=[12,12])
#2
#x = np.array([2]).astype("int32")
#dim1 = fluid.data(name="dim1", shape=[1], dtype="int32")
#fluid.layers.assign(input=x, output=dim1)
#output = paddle.nn.functional.interpolate(input=input,out_shape=[12,dim1])
#3
#x = np.array([3,12]).astype("int32")
#shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32")
#fluid.layers.assign(input=x, output=shape_tensor)
#output = paddle.nn.functional.interpolate(input=input,out_shape=shape_tensor)
#4
#x = np.array([0.5]).astype("float32")
#scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32")
#fluid.layers.assign(x,scale_tensor)
#output = paddle.nn.functional.interpolate(input=input,scale=scale_tensor)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(2,3,6,10).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
print(output_data[0].shape)
#1
# (2, 3, 12, 12)
#2
# (2, 3, 12, 2)
#3
# (2, 3, 3, 12)
#4
# (2, 3, 3, 5)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
output = paddle.nn.functional.interpolate(input=input, out_shape=[12,12])
print(output.shape)
# [2L, 3L, 12L, 12L]
"""
resample_methods = {
'BILINEAR': 'bilinear',
'TRILINEAR': 'trilinear',
'NEAREST': 'nearest',
'BICUBIC': 'bicubic',
}
if resample not in resample_methods:
raise ValueError(
"The 'resample' of image_resize can only be 'BILINEAR', 'TRILINEAR', "
" 'BICUBIC' or 'NEAREST' currently.")
resample_type = resample_methods[resample]
if resample in ['BILINEAR', 'NEAREST', 'BICUBIC'] and len(input.shape) != 4:
raise ValueError(
"'BILINEAR', 'BICUBIC' and 'NEAREST' only support 4-D tensor.")
if resample == 'TRILINEAR' and len(input.shape) != 5:
raise ValueError("'TRILINEAR'only support 5-D tensor.")
if not isinstance(align_corners, bool):
raise TypeError("Attr align_corners should be a bool value")
if align_mode != 0 and align_mode != 1:
raise ValueError("align_mode can only be 0 or 1")
if out_shape is None and scale is None:
raise ValueError("One of out_shape and scale must not be None.")
helper = LayerHelper('{}_interp'.format(resample_type), **locals())
dtype = helper.input_dtype()
if len(input.shape) == 4 and data_format not in ['NCHW', 'NHWC']:
raise ValueError(
"Got wrong value for param `data_format`: " + data_format +
" received but only `NCHW` or `NHWC` supported for 4-D input.")
elif len(input.shape) == 5 and data_format not in ['NCDHW', 'NDHWC']:
raise ValueError(
"Got wrong value for param `data_format`: " + data_format +
" received but only `NCDHW` or `NDHWC` supported for 5-D input.")
def _is_list_or_turple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if data_format == 'NCHW' or data_format == 'NCDHW':
data_layout = 'NCHW'
if data_format == 'NHWC' or data_format == 'NDHWC':
data_layout = 'NHWC'
inputs = {"X": input}
attrs = {
"out_d": -1,
"out_h": -1,
"out_w": -1,
"interp_method": resample_type,
"align_corners": align_corners,
"align_mode": align_mode,
"data_layout": data_layout
}
if out_shape is not None:
if isinstance(out_shape, Variable):
out_shape.stop_gradient = True
inputs['OutSize'] = out_shape
else:
if not (_is_list_or_turple_(out_shape)):
raise TypeError(
"out_shape should be a list or tuple or Variable.")
# Validate the shape
contain_var = False
for dim_idx, dim_size in enumerate(out_shape):
if isinstance(dim_size, Variable):
contain_var = True
continue
assert dim_size > 0, (
"Each dimension size given in out_shape must be greater than 0."
)
if contain_var:
new_size_tensor = []
size_list = []
for dim in out_shape:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_size_tensor.append(dim)
size_list.append(-1)
else:
assert (isinstance(dim, int))
temp_out = helper.create_variable_for_type_inference(
'int32')
fill_constant(
[1], 'int32', dim, force_cpu=True, out=temp_out)
new_size_tensor.append(temp_out)
size_list.append(dim)
inputs['SizeTensor'] = new_size_tensor
if len(input.shape) == 4:
if len(out_shape) != 2:
raise ValueError("out_shape length should be 2 for "
"input 4-D tensor.")
if contain_var:
attrs['out_h'] = size_list[0]
attrs['out_w'] = size_list[1]
else:
out_shape = list(map(int, out_shape))
attrs['out_h'] = out_shape[0]
attrs['out_w'] = out_shape[1]
if len(input.shape) == 5:
if len(out_shape) != 3:
raise ValueError("out_shape length should be 3 for "
"input 5-D tensor.")
if contain_var:
attrs['out_d'] = size_list[0]
attrs['out_h'] = size_list[1]
attrs['out_w'] = size_list[2]
else:
out_shape = list(map(int, out_shape))
attrs['out_d'] = out_shape[0]
attrs['out_h'] = out_shape[1]
attrs['out_w'] = out_shape[2]
else:
if isinstance(scale, Variable):
scale.stop_gradient = True
inputs["Scale"] = scale
elif isinstance(scale, float) or isinstance(scale, int):
if scale <= 0:
raise ValueError("Attr(scale) should be greater than zero.")
attrs['scale'] = float(scale)
else:
raise TypeError(
"Attr(scale)'s type should be float, int or Variable.")
if isinstance(actual_shape, Variable):
warnings.warn(
"actual_shape will be deprecated, it is recommended to use "
"out_shape instead of actual_shape to specify output shape dynamically."
)
actual_shape.stop_gradient = True
inputs["OutSize"] = actual_shape
elif actual_shape is not None:
raise TypeError("actual_shape should either be Variable or None.")
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='{}_interp'.format(resample_type),
inputs=inputs,
outputs={"Out": out},
attrs=attrs)
return out
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define the extention functions
__all__ = [
# 'add_position_encoding',
# 'autoincreased_step_counter',
# 'continuous_value_model',
# 'filter_by_instag',
# 'linear_chain_crf',
# 'merge_selected_rows',
# 'multiclass_nms',
# 'polygon_box_transform',
# 'random_crop',
'row_conv',
# 'rpn_target_assign',
# 'similarity_focus',
# 'target_assign',
# 'temporal_shift',
# 'warpctc',
'diag_embed'
]
import numpy as np
from ...fluid.data_feeder import check_dtype
from ...fluid.layer_helper import LayerHelper
from ...fluid.framework import Variable, in_dygraph_mode
from ...fluid.layers.tensor import assign
from ...fluid import core, dygraph_utils
from ...fluid.layers.layer_function_generator import templatedoc
def diag_embed(input, offset=0, dim1=-2, dim2=-1):
"""
This OP creates a tensor whose diagonals of certain 2D planes (specified by dim1 and dim2)
are filled by ``input``. By default, a 2D plane formed by the last two dimensions
of the returned tensor will be selected.
The argument ``offset`` determines which diagonal is generated:
- If offset = 0, it is the main diagonal.
- If offset > 0, it is above the main diagonal.
- If offset < 0, it is below the main diagonal.
Args:
input(Variable|numpy.ndarray): The input tensor. Must be at least 1-dimensional. The input data type should be float32, float64, int32, int64.
offset(int, optional): Which diagonal to consider. Default: 0 (main diagonal).
dim1(int, optional): The first dimension with respect to which to take diagonal. Default: -2.
dim2(int, optional): The second dimension with respect to which to take diagonal. Default: -1.
Returns:
Variable, the output data type is the same as input data type.
Examples:
.. code-block:: python
import paddle.nn.functional as F
import paddle.fluid.dygraph as dg
import numpy as np
diag_embed = np.random.randn(2, 3).astype('float32')
# [[ 0.7545889 , -0.25074545, 0.5929117 ],
# [-0.6097662 , -0.01753256, 0.619769 ]]
with dg.guard():
data1 = F.diag_embed(diag_embed)
data1.numpy()
# [[[ 0.7545889 , 0. , 0. ],
# [ 0. , -0.25074545, 0. ],
# [ 0. , 0. , 0.5929117 ]],
# [[-0.6097662 , 0. , 0. ],
# [ 0. , -0.01753256, 0. ],
# [ 0. , 0. , 0.619769 ]]]
data2 = F.diag_embed(diag_embed, offset=-1, dim1=0, dim2=2)
data2.numpy()
# [[[ 0. , 0. , 0. , 0. ],
# [ 0.7545889 , 0. , 0. , 0. ],
# [ 0. , -0.25074545, 0. , 0. ],
# [ 0. , 0. , 0.5929117 , 0. ]],
#
# [[ 0. , 0. , 0. , 0. ],
# [-0.6097662 , 0. , 0. , 0. ],
# [ 0. , -0.01753256, 0. , 0. ],
# [ 0. , 0. , 0.619769 , 0. ]]]
data3 = F.diag_embed(diag_embed, offset=1, dim1=0, dim2=2)
data3.numpy()
# [[[ 0. , 0.7545889 , 0. , 0. ],
# [ 0. , -0.6097662 , 0. , 0. ]],
#
# [[ 0. , 0. , -0.25074545, 0. ],
# [ 0. , 0. , -0.01753256, 0. ]],
#
# [[ 0. , 0. , 0. , 0.5929117 ],
# [ 0. , 0. , 0. , 0.619769 ]],
#
# [[ 0. , 0. , 0. , 0. ],
# [ 0. , 0. , 0. , 0. ]]]
"""
inputs = {'Input': [input]}
attrs = {'offset': offset, 'dim1': dim1, 'dim2': dim2}
if not isinstance(input, Variable):
input = assign(input)
def __check_input(input, offset, dim1, dim2):
check_dtype(input.dtype, 'Input',
['int32', 'int64', 'float16', 'float32', 'float64'],
'diag_embed')
input_shape = list(input.shape)
assert len(input_shape) >= 1, \
"Input must be at least 1-dimensional, " \
"But received Input's dimensional: %s.\n" % \
len(input_shape)
assert np.abs(dim1) <= len(input_shape), \
"Dim1 is out of range (expected to be in range of [%d, %d], but got %d).\n" \
% (-(len(input_shape) + 1), len(input_shape), dim1)
assert np.abs(dim2) <= len(input_shape), \
"Dim2 is out of range (expected to be in range of [%d, %d], but got %d).\n" \
% (-(len(input_shape) + 1), len(input_shape), dim2)
dim1_ = dim1 if dim1 >= 0 else len(input_shape) + dim1 + 1
dim2_ = dim2 if dim2 >= 0 else len(input_shape) + dim2 + 1
assert dim1_ != dim2_, \
"dim1 and dim2 cannot be the same dimension." \
"But received dim1 = %d, dim2 = %d\n"%(dim1, dim2)
if not in_dygraph_mode():
__check_input(input, offset, dim1, dim2)
helper = LayerHelper("diag_embed", **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type='diag_embed',
inputs={'Input': [input]},
attrs={'offset': offset,
'dim1': dim1,
'dim2': dim2},
outputs={'Out': [out]})
out.stop_gradient = True
return out
@templatedoc()
def row_conv(input, weight, act=None):
"""
${comment}
Args:
input (Variable): the input(X) is a LodTensor or tensor, LodTensor(X)
supports variable time-length input sequences. The underlying
tensor in this LoDTensor is a matrix with shape (T, D), where
T is the total time steps in this mini-batch and D is the input
data dimension.
If the input is a padded minibatch, the shape of the input is
(N, T, D), N is batch size, T is the max time steps in the batch,
D is the input data dimension.
weight (Variable): The weight. A Tensor with shape
(future_context_size + 1, D), where future_context_size is the
context size of the RowConv operator.
act (str): Non-linear activation to be applied to output variable.
Returns:
${out_comment}.
Examples:
.. code-block:: python
from paddle import fluid, nn
import paddle.fluid.dygraph as dg
import paddle.nn.functional as F
import numpy as np
batch_size = 4
time_steps = 8
feature_size = 6
context_size = 4
x = np.random.randn(batch_size, time_steps, feature_size).astype(np.float32)
weight = np.random.randn(context_size + 1, feature_size).astype(np.float32)
place = fluid.CPUPlace()
with dg.guard(place):
x_var = dg.to_variable(x)
w_var = dg.to_variable(weight)
y_var = F.row_conv(x_var, w_var)
y_np = y_var.numpy()
print(y_np.shape)
# (4, 8, 6)
"""
if in_dygraph_mode():
pre_act = core.ops.row_conv(input, weight)
out = dygraph_utils._append_activation_in_dygraph(pre_act, act)
return out
else:
helper = LayerHelper('row_conv', **locals())
dtype = helper.input_dtype()
inputs = {'X': [input], 'Filter': [weight]}
pre_act = helper.create_variable_for_type_inference(dtype)
outputs = {'Out': [pre_act]}
helper.append_op(type='row_conv', inputs=inputs, outputs=outputs)
out = helper.append_activation(pre_act)
return out
此差异已折叠。
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define functions to get tensor attributes
# __all__ = ['rank', 'shape']
此差异已折叠。
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define functions to save & load a tensor
# __all__ = ['save', 'load']
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define statistical functions of a tensor
__all__ = [ #'mean',
#'reduce_mean',
#'std',
'var'
]
import numpy as np
from ..fluid.layer_helper import LayerHelper
from ..fluid.framework import in_dygraph_mode
from ..fluid import layers
from .search import where
from ..fluid.data_feeder import convert_dtype
def var(input, axis=None, keepdim=False, unbiased=True, out=None, name=None):
"""
Computes the variance of the input Variable's elements along the specified
axis.
Args:
input (Variable): The input Variable to be computed variance, with data
type float32 and float64 supported.
axis (list|int, optional): The axis along which the variance is computed.
If `None`, compute the variance over all elements of :attr:`input`
and return a Variable with a single element, otherwise it must be in
the range :math:`[-rank(input), rank(input))`. If :math:`axis[i] < 0`,
the axis to compute is :math:`rank(input) + axis[i]`.
keepdim (bool, optional): Whether to reserve the reduced dimensions in
the output Variable. The dimensions in :attr:`axis` will be squeezed
and the result Variable will have :attr:`len(axis)` fewer dimensions
than the :attr:`input` unless :attr:`keepdim` is true, default False.
unbiased (bool, optional): Whether to compute variance via the unbiased
estimator, in which the divisor used in the computation is
:math:`N - 1`, where :math:`N` represents the number of elements
along :attr:`axis`, otherwise the divisor is :math:`N`. Default True.
out (Variable, optional): Alternate output Variable to store the result
variance. Default None.
name (str, optional): The name for this layer. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`. Default None.
Returns:
Variable: The result variance with the same dtype as :attr:`input`.
If :attr:`out = None`, returns a new Variable containing the
variance, otherwise returns a reference to the output Variable.
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.fluid.dygraph as dg
a = np.array([[1.0, 2.0], [3.0, 4.0]]).astype("float32")
with dg.guard():
data = dg.to_variable(a)
variance = paddle.var(data, axis=[1])
print(variance.numpy())
# [0.5 0.5]
"""
dtype = convert_dtype(input.dtype)
if dtype not in ["float32", "float64"]:
raise ValueError("Layer tensor.var() only supports floating-point "
"dtypes, but received {}.".format(dtype))
rank = len(input.shape)
axes = axis if axis != None and axis != [] else range(rank)
axes = [e if e >= 0 else e + rank for e in axes]
inp_shape = input.shape if in_dygraph_mode() else layers.shape(input)
mean = layers.reduce_mean(input, dim=axis, keep_dim=True, name=name)
tmp = layers.reduce_mean(
(input - mean)**2, dim=axis, keep_dim=keepdim, name=name)
if unbiased:
n = 1
for i in axes:
n *= inp_shape[i]
if not in_dygraph_mode():
n = layers.cast(n, dtype)
zero_const = layers.fill_constant(shape=[1], dtype=dtype, value=0.0)
factor = where(n > 1.0, n / (n - 1.0), zero_const)
else:
factor = n / (n - 1.0) if n > 1.0 else 0.0
tmp *= factor
if out:
layers.assign(input=tmp, output=out)
return out
else:
return tmp
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define the basic tensor classes
# __all__ = ['Tensor', 'LoDTensor', 'LoDTensorArray']
......@@ -182,7 +182,6 @@ packages=['paddle',
'paddle.nn.functional',
'paddle.nn.layer',
'paddle.imperative',
'paddle.tensor',
]
with open('@PADDLE_SOURCE_DIR@/python/requirements.txt') as f:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册