未验证 提交 97229944 编写于 作者: Z Zhanlue Yang 提交者: GitHub

Fixed get_tensor method for EagerTensor (#39414)

* Enabled Eager OpTest #1

* Enabled Eager OpTest #1

* Fixed get_tensor method for EagerTensor
上级 ec8a0c1d
...@@ -506,7 +506,7 @@ PyObject* ToPyObject(const paddle::framework::proto::VarType& type) { ...@@ -506,7 +506,7 @@ PyObject* ToPyObject(const paddle::framework::proto::VarType& type) {
} }
PyObject* ToPyObject(const paddle::framework::LoDTensor* value) { PyObject* ToPyObject(const paddle::framework::LoDTensor* value) {
auto obj = ::pybind11::cast(value, py::return_value_policy::copy); auto obj = ::pybind11::cast(value, py::return_value_policy::reference);
obj.inc_ref(); obj.inc_ref();
return obj.ptr(); return obj.ptr();
} }
......
#Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
# You may obtain a copy of the License at # You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0 # http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, # distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
# limitations under the License. # limitations under the License.
from __future__ import print_function from __future__ import print_function
import unittest import unittest
import numpy as np import numpy as np
import paddle import paddle
...@@ -24,38 +23,39 @@ from op_test import OpTest ...@@ -24,38 +23,39 @@ from op_test import OpTest
from paddle.fluid import compiler, Program, program_guard from paddle.fluid import compiler, Program, program_guard
from paddle.fluid.op import Operator from paddle.fluid.op import Operator
from paddle.fluid.backward import append_backward from paddle.fluid.backward import append_backward
from paddle.fluid.framework import _test_eager_guard
class TestWhereOp(OpTest): class TestWhereOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "where" self.op_type = 'where'
self.init_config() self.init_config()
self.inputs = {'Condition': self.cond, 'X': self.x, 'Y': self.y} self.inputs = {'Condition': self.cond, 'X': self.x, 'Y': self.y}
self.outputs = {'Out': np.where(self.cond, self.x, self.y)} self.outputs = {'Out': np.where(self.cond, self.x, self.y)}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Y'], 'Out') self.check_grad(['X', 'Y'], 'Out', check_eager=True)
def init_config(self): def init_config(self):
self.x = np.random.uniform(-3, 5, (100)).astype("float64") self.x = np.random.uniform((-3), 5, 100).astype('float64')
self.y = np.random.uniform(-3, 5, (100)).astype("float64") self.y = np.random.uniform((-3), 5, 100).astype('float64')
self.cond = np.zeros((100)).astype("bool") self.cond = np.zeros(100).astype('bool')
class TestWhereOp2(TestWhereOp): class TestWhereOp2(TestWhereOp):
def init_config(self): def init_config(self):
self.x = np.random.uniform(-5, 5, (60, 2)).astype("float64") self.x = np.random.uniform((-5), 5, (60, 2)).astype('float64')
self.y = np.random.uniform(-5, 5, (60, 2)).astype("float64") self.y = np.random.uniform((-5), 5, (60, 2)).astype('float64')
self.cond = np.ones((60, 2)).astype("bool") self.cond = np.ones((60, 2)).astype('bool')
class TestWhereOp3(TestWhereOp): class TestWhereOp3(TestWhereOp):
def init_config(self): def init_config(self):
self.x = np.random.uniform(-3, 5, (20, 2, 4)).astype("float64") self.x = np.random.uniform((-3), 5, (20, 2, 4)).astype('float64')
self.y = np.random.uniform(-3, 5, (20, 2, 4)).astype("float64") self.y = np.random.uniform((-3), 5, (20, 2, 4)).astype('float64')
self.cond = np.array(np.random.randint(2, size=(20, 2, 4)), dtype=bool) self.cond = np.array(np.random.randint(2, size=(20, 2, 4)), dtype=bool)
...@@ -66,15 +66,15 @@ class TestWhereAPI(unittest.TestCase): ...@@ -66,15 +66,15 @@ class TestWhereAPI(unittest.TestCase):
def init_data(self): def init_data(self):
self.shape = [10, 15] self.shape = [10, 15]
self.cond = np.array(np.random.randint(2, size=self.shape), dtype=bool) self.cond = np.array(np.random.randint(2, size=self.shape), dtype=bool)
self.x = np.random.uniform(-2, 3, self.shape).astype(np.float32) self.x = np.random.uniform((-2), 3, self.shape).astype(np.float32)
self.y = np.random.uniform(-2, 3, self.shape).astype(np.float32) self.y = np.random.uniform((-2), 3, self.shape).astype(np.float32)
self.out = np.where(self.cond, self.x, self.y) self.out = np.where(self.cond, self.x, self.y)
def ref_x_backward(self, dout): def ref_x_backward(self, dout):
return np.where(self.cond == True, dout, 0) return np.where((self.cond == True), dout, 0)
def ref_y_backward(self, dout): def ref_y_backward(self, dout):
return np.where(self.cond == False, dout, 0) return np.where((self.cond == False), dout, 0)
def test_api(self, use_cuda=False): def test_api(self, use_cuda=False):
for x_stop_gradient in [False, True]: for x_stop_gradient in [False, True]:
...@@ -90,17 +90,17 @@ class TestWhereAPI(unittest.TestCase): ...@@ -90,17 +90,17 @@ class TestWhereAPI(unittest.TestCase):
y.stop_gradient = y_stop_gradient y.stop_gradient = y_stop_gradient
result = paddle.where(cond, x, y) result = paddle.where(cond, x, y)
append_backward(layers.mean(result)) append_backward(layers.mean(result))
for use_cuda in [False, True]: for use_cuda in [False, True]:
if use_cuda and not fluid.core.is_compiled_with_cuda(): if (use_cuda and
(not fluid.core.is_compiled_with_cuda())):
break break
place = fluid.CUDAPlace( place = (fluid.CUDAPlace(0)
0) if use_cuda else fluid.CPUPlace() if use_cuda else fluid.CPUPlace())
exe = fluid.Executor(place) exe = fluid.Executor(place)
fetch_list = [result, result.grad_name] fetch_list = [result, result.grad_name]
if x_stop_gradient is False: if (x_stop_gradient is False):
fetch_list.append(x.grad_name) fetch_list.append(x.grad_name)
if y_stop_gradient is False: if (y_stop_gradient is False):
fetch_list.append(y.grad_name) fetch_list.append(y.grad_name)
out = exe.run( out = exe.run(
fluid.default_main_program(), fluid.default_main_program(),
...@@ -109,13 +109,13 @@ class TestWhereAPI(unittest.TestCase): ...@@ -109,13 +109,13 @@ class TestWhereAPI(unittest.TestCase):
'y': self.y}, 'y': self.y},
fetch_list=fetch_list) fetch_list=fetch_list)
assert np.array_equal(out[0], self.out) assert np.array_equal(out[0], self.out)
if x_stop_gradient is False: if (x_stop_gradient is False):
assert np.array_equal(out[2], assert np.array_equal(out[2],
self.ref_x_backward(out[1])) self.ref_x_backward(out[1]))
if y.stop_gradient is False: if (y.stop_gradient is False):
assert np.array_equal( assert np.array_equal(
out[3], self.ref_y_backward(out[1])) out[3], self.ref_y_backward(out[1]))
elif y.stop_gradient is False: elif (y.stop_gradient is False):
assert np.array_equal(out[2], assert np.array_equal(out[2],
self.ref_y_backward(out[1])) self.ref_y_backward(out[1]))
...@@ -124,44 +124,38 @@ class TestWhereAPI(unittest.TestCase): ...@@ -124,44 +124,38 @@ class TestWhereAPI(unittest.TestCase):
with fluid.program_guard(main_program): with fluid.program_guard(main_program):
x = fluid.layers.data(name='x', shape=[4, 1], dtype='float32') x = fluid.layers.data(name='x', shape=[4, 1], dtype='float32')
y = fluid.layers.data(name='y', shape=[4, 2], dtype='float32') y = fluid.layers.data(name='y', shape=[4, 2], dtype='float32')
x_i = np.array([[0.9383, 0.1983, 3.2, 1.2]]).astype("float32") x_i = np.array([[0.9383, 0.1983, 3.2, 1.2]]).astype('float32')
y_i = np.array([[1.0, 1.0, 1.0, 1.0], y_i = np.array(
[1.0, 1.0, 1.0, 1.0]]).astype("float32") [[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]).astype('float32')
result = paddle.where(x > 1, x=x, y=y) result = paddle.where((x > 1), x=x, y=y)
for use_cuda in [False, True]: for use_cuda in [False, True]:
if use_cuda and not fluid.core.is_compiled_with_cuda(): if (use_cuda and (not fluid.core.is_compiled_with_cuda())):
return return
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() place = (fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace())
exe = fluid.Executor(place) exe = fluid.Executor(place)
out = exe.run(fluid.default_main_program(), out = exe.run(fluid.default_main_program(),
feed={'x': x_i, feed={'x': x_i,
'y': y_i}, 'y': y_i},
fetch_list=[result]) fetch_list=[result])
assert np.array_equal(out[0], np.where(x_i > 1, x_i, y_i)) assert np.array_equal(out[0], np.where((x_i > 1), x_i, y_i))
def __test_where_with_broadcast_static(self, cond_shape, x_shape, y_shape): def __test_where_with_broadcast_static(self, cond_shape, x_shape, y_shape):
paddle.enable_static() paddle.enable_static()
main_program = Program() main_program = Program()
with fluid.program_guard(main_program): with fluid.program_guard(main_program):
cond = fluid.layers.data( cond = fluid.layers.data(
name='cond', shape=cond_shape, dtype='bool') name='cond', shape=cond_shape, dtype='bool')
x = fluid.layers.data(name='x', shape=x_shape, dtype='float32') x = fluid.layers.data(name='x', shape=x_shape, dtype='float32')
y = fluid.layers.data(name='y', shape=y_shape, dtype='float32') y = fluid.layers.data(name='y', shape=y_shape, dtype='float32')
cond_data_tmp = np.random.random(size=cond_shape).astype('float32')
cond_data_tmp = np.random.random(size=cond_shape).astype("float32") cond_data = (cond_data_tmp < 0.3)
cond_data = cond_data_tmp < 0.3 x_data = np.random.random(size=x_shape).astype('float32')
x_data = np.random.random(size=x_shape).astype("float32") y_data = np.random.random(size=y_shape).astype('float32')
y_data = np.random.random(size=y_shape).astype("float32")
result = paddle.where(condition=cond, x=x, y=y) result = paddle.where(condition=cond, x=x, y=y)
for use_cuda in [False, True]: for use_cuda in [False, True]:
if use_cuda and not fluid.core.is_compiled_with_cuda(): if (use_cuda and (not fluid.core.is_compiled_with_cuda())):
return return
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() place = (fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace())
exe = fluid.Executor(place) exe = fluid.Executor(place)
out = exe.run( out = exe.run(
fluid.default_main_program(), fluid.default_main_program(),
...@@ -169,9 +163,7 @@ class TestWhereAPI(unittest.TestCase): ...@@ -169,9 +163,7 @@ class TestWhereAPI(unittest.TestCase):
'x': x_data, 'x': x_data,
'y': y_data}, 'y': y_data},
fetch_list=[result]) fetch_list=[result])
expect = np.where(cond_data, x_data, y_data) expect = np.where(cond_data, x_data, y_data)
assert np.array_equal(out[0], expect) assert np.array_equal(out[0], expect)
def test_static_api_broadcast_1(self): def test_static_api_broadcast_1(self):
...@@ -198,28 +190,24 @@ class TestWhereAPI(unittest.TestCase): ...@@ -198,28 +190,24 @@ class TestWhereAPI(unittest.TestCase):
b_shape = [2, 2, 4] b_shape = [2, 2, 4]
self.__test_where_with_broadcast_static(cond_shape, a_shape, b_shape) self.__test_where_with_broadcast_static(cond_shape, a_shape, b_shape)
# @Note Now, maybe not compatibility with old version
def test_static_api_broadcast_5(self): def test_static_api_broadcast_5(self):
cond_shape = [3, 2, 2, 4] cond_shape = [3, 2, 2, 4]
a_shape = [2, 2, 4] a_shape = [2, 2, 4]
b_shape = [2, 2, 4] b_shape = [2, 2, 4]
self.__test_where_with_broadcast_static(cond_shape, a_shape, b_shape) self.__test_where_with_broadcast_static(cond_shape, a_shape, b_shape)
# @Note Now, maybe not compatibility with old version
def test_static_api_broadcast_6(self): def test_static_api_broadcast_6(self):
cond_shape = [2, 2, 4] cond_shape = [2, 2, 4]
a_shape = [2, 2, 1] a_shape = [2, 2, 1]
b_shape = [2, 2, 1] b_shape = [2, 2, 1]
self.__test_where_with_broadcast_static(cond_shape, a_shape, b_shape) self.__test_where_with_broadcast_static(cond_shape, a_shape, b_shape)
# @Note Now, maybe not compatibility with old version
def test_static_api_broadcast_7(self): def test_static_api_broadcast_7(self):
cond_shape = [2, 2, 4] cond_shape = [2, 2, 4]
a_shape = [2, 1, 4] a_shape = [2, 1, 4]
b_shape = [2, 1, 4] b_shape = [2, 1, 4]
self.__test_where_with_broadcast_static(cond_shape, a_shape, b_shape) self.__test_where_with_broadcast_static(cond_shape, a_shape, b_shape)
# @Note Now, maybe not compatibility with old version
def test_static_api_broadcast_8(self): def test_static_api_broadcast_8(self):
cond_shape = [3, 2, 2, 4] cond_shape = [3, 2, 2, 4]
a_shape = [2, 2, 1] a_shape = [2, 2, 1]
...@@ -230,9 +218,9 @@ class TestWhereAPI(unittest.TestCase): ...@@ -230,9 +218,9 @@ class TestWhereAPI(unittest.TestCase):
class TestWhereDygraphAPI(unittest.TestCase): class TestWhereDygraphAPI(unittest.TestCase):
def test_api(self): def test_api(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
x_i = np.array([0.9383, 0.1983, 3.2, 1.2]).astype("float64") x_i = np.array([0.9383, 0.1983, 3.2, 1.2]).astype('float64')
y_i = np.array([1.0, 1.0, 1.0, 1.0]).astype("float64") y_i = np.array([1.0, 1.0, 1.0, 1.0]).astype('float64')
cond_i = np.array([False, False, True, True]).astype("bool") cond_i = np.array([False, False, True, True]).astype('bool')
x = fluid.dygraph.to_variable(x_i) x = fluid.dygraph.to_variable(x_i)
y = fluid.dygraph.to_variable(y_i) y = fluid.dygraph.to_variable(y_i)
cond = fluid.dygraph.to_variable(cond_i) cond = fluid.dygraph.to_variable(cond_i)
...@@ -242,15 +230,12 @@ class TestWhereDygraphAPI(unittest.TestCase): ...@@ -242,15 +230,12 @@ class TestWhereDygraphAPI(unittest.TestCase):
def __test_where_with_broadcast_dygraph(self, cond_shape, a_shape, b_shape): def __test_where_with_broadcast_dygraph(self, cond_shape, a_shape, b_shape):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
cond_tmp = paddle.rand(cond_shape) cond_tmp = paddle.rand(cond_shape)
cond = cond_tmp < 0.3 cond = (cond_tmp < 0.3)
a = paddle.rand(a_shape) a = paddle.rand(a_shape)
b = paddle.rand(b_shape) b = paddle.rand(b_shape)
result = paddle.where(cond, a, b) result = paddle.where(cond, a, b)
result = result.numpy() result = result.numpy()
expect = np.where(cond, a, b) expect = np.where(cond, a, b)
self.assertTrue(np.array_equal(expect, result)) self.assertTrue(np.array_equal(expect, result))
def test_dygraph_api_broadcast_1(self): def test_dygraph_api_broadcast_1(self):
...@@ -277,28 +262,24 @@ class TestWhereDygraphAPI(unittest.TestCase): ...@@ -277,28 +262,24 @@ class TestWhereDygraphAPI(unittest.TestCase):
b_shape = [2, 2, 4] b_shape = [2, 2, 4]
self.__test_where_with_broadcast_dygraph(cond_shape, a_shape, b_shape) self.__test_where_with_broadcast_dygraph(cond_shape, a_shape, b_shape)
# @Note Now, maybe not compatibility with old version
def test_dygraph_api_broadcast_5(self): def test_dygraph_api_broadcast_5(self):
cond_shape = [3, 2, 2, 4] cond_shape = [3, 2, 2, 4]
a_shape = [2, 2, 4] a_shape = [2, 2, 4]
b_shape = [2, 2, 4] b_shape = [2, 2, 4]
self.__test_where_with_broadcast_dygraph(cond_shape, a_shape, b_shape) self.__test_where_with_broadcast_dygraph(cond_shape, a_shape, b_shape)
# @Note Now, maybe not compatibility with old version
def test_dygraph_api_broadcast_6(self): def test_dygraph_api_broadcast_6(self):
cond_shape = [2, 2, 4] cond_shape = [2, 2, 4]
a_shape = [2, 2, 1] a_shape = [2, 2, 1]
b_shape = [2, 2, 1] b_shape = [2, 2, 1]
self.__test_where_with_broadcast_dygraph(cond_shape, a_shape, b_shape) self.__test_where_with_broadcast_dygraph(cond_shape, a_shape, b_shape)
# @Note Now, maybe not compatibility with old version
def test_dygraph_api_broadcast_7(self): def test_dygraph_api_broadcast_7(self):
cond_shape = [2, 2, 4] cond_shape = [2, 2, 4]
a_shape = [2, 1, 4] a_shape = [2, 1, 4]
b_shape = [2, 1, 4] b_shape = [2, 1, 4]
self.__test_where_with_broadcast_dygraph(cond_shape, a_shape, b_shape) self.__test_where_with_broadcast_dygraph(cond_shape, a_shape, b_shape)
# @Note Now, maybe not compatibility with old version
def test_dygraph_api_broadcast_8(self): def test_dygraph_api_broadcast_8(self):
cond_shape = [3, 2, 2, 4] cond_shape = [3, 2, 2, 4]
a_shape = [2, 2, 1] a_shape = [2, 2, 1]
...@@ -308,40 +289,50 @@ class TestWhereDygraphAPI(unittest.TestCase): ...@@ -308,40 +289,50 @@ class TestWhereDygraphAPI(unittest.TestCase):
def test_where_condition(self): def test_where_condition(self):
data = np.array([[True, False], [False, True]]) data = np.array([[True, False], [False, True]])
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
x = fluid.layers.data(name='x', shape=[-1, 2]) x = fluid.layers.data(name='x', shape=[(-1), 2])
y = paddle.where(x) y = paddle.where(x)
self.assertEqual(type(y), tuple) self.assertEqual(type(y), tuple)
self.assertEqual(len(y), 2) self.assertEqual(len(y), 2)
z = fluid.layers.concat(list(y), axis=1) z = fluid.layers.concat(list(y), axis=1)
exe = fluid.Executor(fluid.CPUPlace()) exe = fluid.Executor(fluid.CPUPlace())
(res, ) = exe.run(feed={'x': data},
res, = exe.run(feed={'x': data}, fetch_list=[z.name],
fetch_list=[z.name], return_numpy=False)
return_numpy=False)
expect_out = np.array([[0, 0], [1, 1]]) expect_out = np.array([[0, 0], [1, 1]])
self.assertTrue(np.allclose(expect_out, np.array(res))) self.assertTrue(np.allclose(expect_out, np.array(res)))
data = np.array([True, True, False]) data = np.array([True, True, False])
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
x = fluid.layers.data(name='x', shape=[-1]) x = fluid.layers.data(name='x', shape=[(-1)])
y = paddle.where(x) y = paddle.where(x)
self.assertEqual(type(y), tuple) self.assertEqual(type(y), tuple)
self.assertEqual(len(y), 1) self.assertEqual(len(y), 1)
z = fluid.layers.concat(list(y), axis=1) z = fluid.layers.concat(list(y), axis=1)
exe = fluid.Executor(fluid.CPUPlace()) exe = fluid.Executor(fluid.CPUPlace())
res, = exe.run(feed={'x': data}, (res, ) = exe.run(feed={'x': data},
fetch_list=[z.name], fetch_list=[z.name],
return_numpy=False) return_numpy=False)
expect_out = np.array([[0], [1]]) expect_out = np.array([[0], [1]])
self.assertTrue(np.allclose(expect_out, np.array(res))) self.assertTrue(np.allclose(expect_out, np.array(res)))
def test_eager(self):
with _test_eager_guard():
self.test_api()
self.test_dygraph_api_broadcast_1()
self.test_dygraph_api_broadcast_2()
self.test_dygraph_api_broadcast_3()
self.test_dygraph_api_broadcast_4()
self.test_dygraph_api_broadcast_5()
self.test_dygraph_api_broadcast_6()
self.test_dygraph_api_broadcast_7()
self.test_dygraph_api_broadcast_8()
class TestWhereOpError(unittest.TestCase): class TestWhereOpError(unittest.TestCase):
def test_errors(self): def test_errors(self):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
x_i = np.array([0.9383, 0.1983, 3.2, 1.2]).astype("float64") x_i = np.array([0.9383, 0.1983, 3.2, 1.2]).astype('float64')
y_i = np.array([1.0, 1.0, 1.0, 1.0]).astype("float64") y_i = np.array([1.0, 1.0, 1.0, 1.0]).astype('float64')
cond_i = np.array([False, False, True, True]).astype("bool") cond_i = np.array([False, False, True, True]).astype('bool')
def test_Variable(): def test_Variable():
paddle.where(cond_i, x_i, y_i) paddle.where(cond_i, x_i, y_i)
...@@ -360,10 +351,14 @@ class TestWhereOpError(unittest.TestCase): ...@@ -360,10 +351,14 @@ class TestWhereOpError(unittest.TestCase):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
cond_shape = [2, 2, 4] cond_shape = [2, 2, 4]
cond_tmp = paddle.rand(cond_shape) cond_tmp = paddle.rand(cond_shape)
cond = cond_tmp < 0.3 cond = (cond_tmp < 0.3)
a = paddle.rand(cond_shape) a = paddle.rand(cond_shape)
self.assertRaises(ValueError, paddle.where, cond, a) self.assertRaises(ValueError, paddle.where, cond, a)
def test_eager(self):
with _test_eager_guard():
self.test_value_error()
if __name__ == '__main__': if (__name__ == '__main__'):
unittest.main() unittest.main()
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
# You may obtain a copy of the License at # You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0 # http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, # distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
...@@ -13,23 +13,22 @@ ...@@ -13,23 +13,22 @@
# limitations under the License. # limitations under the License.
from __future__ import division from __future__ import division
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle import paddle
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.framework import _test_eager_guard
def sigmoid(x): def sigmoid(x):
return 1.0 / (1.0 + np.exp(-1.0 * x)) return (1.0 / (1.0 + np.exp(((-1.0) * x))))
def YoloBox(x, img_size, attrs): def YoloBox(x, img_size, attrs):
n, c, h, w = x.shape (n, c, h, w) = x.shape
anchors = attrs['anchors'] anchors = attrs['anchors']
an_num = int(len(anchors) // 2) an_num = int((len(anchors) // 2))
class_num = attrs['class_num'] class_num = attrs['class_num']
conf_thresh = attrs['conf_thresh'] conf_thresh = attrs['conf_thresh']
downsample = attrs['downsample'] downsample = attrs['downsample']
...@@ -37,60 +36,56 @@ def YoloBox(x, img_size, attrs): ...@@ -37,60 +36,56 @@ def YoloBox(x, img_size, attrs):
scale_x_y = attrs['scale_x_y'] scale_x_y = attrs['scale_x_y']
iou_aware = attrs['iou_aware'] iou_aware = attrs['iou_aware']
iou_aware_factor = attrs['iou_aware_factor'] iou_aware_factor = attrs['iou_aware_factor']
bias_x_y = -0.5 * (scale_x_y - 1.) bias_x_y = ((-0.5) * (scale_x_y - 1.0))
input_h = downsample * h input_h = (downsample * h)
input_w = downsample * w input_w = (downsample * w)
if iou_aware: if iou_aware:
ioup = x[:, :an_num, :, :] ioup = x[:, :an_num, :, :]
ioup = np.expand_dims(ioup, axis=-1) ioup = np.expand_dims(ioup, axis=(-1))
x = x[:, an_num:, :, :] x = x[:, an_num:, :, :]
x = x.reshape((n, an_num, 5 + class_num, h, w)).transpose((0, 1, 3, 4, 2)) x = x.reshape((n, an_num, (5 + class_num), h, w)).transpose((0, 1, 3, 4, 2))
pred_box = x[:, :, :, :, :4].copy() pred_box = x[:, :, :, :, :4].copy()
grid_x = np.tile(np.arange(w).reshape((1, w)), (h, 1)) grid_x = np.tile(np.arange(w).reshape((1, w)), (h, 1))
grid_y = np.tile(np.arange(h).reshape((h, 1)), (1, w)) grid_y = np.tile(np.arange(h).reshape((h, 1)), (1, w))
pred_box[:, :, :, :, 0] = ( pred_box[:, :, :, :, 0] = ((
grid_x + sigmoid(pred_box[:, :, :, :, 0]) * scale_x_y + bias_x_y) / w (grid_x + (sigmoid(pred_box[:, :, :, :, 0]) * scale_x_y)) + bias_x_y) /
pred_box[:, :, :, :, 1] = ( w)
grid_y + sigmoid(pred_box[:, :, :, :, 1]) * scale_x_y + bias_x_y) / h pred_box[:, :, :, :, 1] = ((
(grid_y + (sigmoid(pred_box[:, :, :, :, 1]) * scale_x_y)) + bias_x_y) /
anchors = [(anchors[i], anchors[i + 1]) for i in range(0, len(anchors), 2)] h)
anchors = [(anchors[i], anchors[(i + 1)])
for i in range(0, len(anchors), 2)]
anchors_s = np.array( anchors_s = np.array(
[(an_w / input_w, an_h / input_h) for an_w, an_h in anchors]) [((an_w / input_w), (an_h / input_h)) for (an_w, an_h) in anchors])
anchor_w = anchors_s[:, 0:1].reshape((1, an_num, 1, 1)) anchor_w = anchors_s[:, 0:1].reshape((1, an_num, 1, 1))
anchor_h = anchors_s[:, 1:2].reshape((1, an_num, 1, 1)) anchor_h = anchors_s[:, 1:2].reshape((1, an_num, 1, 1))
pred_box[:, :, :, :, 2] = np.exp(pred_box[:, :, :, :, 2]) * anchor_w pred_box[:, :, :, :, 2] = (np.exp(pred_box[:, :, :, :, 2]) * anchor_w)
pred_box[:, :, :, :, 3] = np.exp(pred_box[:, :, :, :, 3]) * anchor_h pred_box[:, :, :, :, 3] = (np.exp(pred_box[:, :, :, :, 3]) * anchor_h)
if iou_aware: if iou_aware:
pred_conf = sigmoid(x[:, :, :, :, 4:5])**( pred_conf = ((sigmoid(x[:, :, :, :, 4:5])**(1 - iou_aware_factor)) *
1 - iou_aware_factor) * sigmoid(ioup)**iou_aware_factor (sigmoid(ioup)**iou_aware_factor))
else: else:
pred_conf = sigmoid(x[:, :, :, :, 4:5]) pred_conf = sigmoid(x[:, :, :, :, 4:5])
pred_conf[pred_conf < conf_thresh] = 0. pred_conf[(pred_conf < conf_thresh)] = 0.0
pred_score = sigmoid(x[:, :, :, :, 5:]) * pred_conf pred_score = (sigmoid(x[:, :, :, :, 5:]) * pred_conf)
pred_box = pred_box * (pred_conf > 0.).astype('float32') pred_box = (pred_box * (pred_conf > 0.0).astype('float32'))
pred_box = pred_box.reshape((n, (-1), 4))
pred_box = pred_box.reshape((n, -1, 4)) (pred_box[:, :, :2], pred_box[:, :, 2:4]) = (
pred_box[:, :, :2], pred_box[:, :, 2:4] = \ (pred_box[:, :, :2] - (pred_box[:, :, 2:4] / 2.0)),
pred_box[:, :, :2] - pred_box[:, :, 2:4] / 2., \ (pred_box[:, :, :2] + (pred_box[:, :, 2:4] / 2.0)))
pred_box[:, :, :2] + pred_box[:, :, 2:4] / 2.0 pred_box[:, :, 0] = (pred_box[:, :, 0] * img_size[:, 1][:, np.newaxis])
pred_box[:, :, 0] = pred_box[:, :, 0] * img_size[:, 1][:, np.newaxis] pred_box[:, :, 1] = (pred_box[:, :, 1] * img_size[:, 0][:, np.newaxis])
pred_box[:, :, 1] = pred_box[:, :, 1] * img_size[:, 0][:, np.newaxis] pred_box[:, :, 2] = (pred_box[:, :, 2] * img_size[:, 1][:, np.newaxis])
pred_box[:, :, 2] = pred_box[:, :, 2] * img_size[:, 1][:, np.newaxis] pred_box[:, :, 3] = (pred_box[:, :, 3] * img_size[:, 0][:, np.newaxis])
pred_box[:, :, 3] = pred_box[:, :, 3] * img_size[:, 0][:, np.newaxis]
if clip_bbox: if clip_bbox:
for i in range(len(pred_box)): for i in range(len(pred_box)):
pred_box[i, :, 0] = np.clip(pred_box[i, :, 0], 0, np.inf) pred_box[i, :, 0] = np.clip(pred_box[i, :, 0], 0, np.inf)
pred_box[i, :, 1] = np.clip(pred_box[i, :, 1], 0, np.inf) pred_box[i, :, 1] = np.clip(pred_box[i, :, 1], 0, np.inf)
pred_box[i, :, 2] = np.clip(pred_box[i, :, 2], -np.inf, pred_box[i, :, 2] = np.clip(pred_box[i, :, 2], (-np.inf),
img_size[i, 1] - 1) (img_size[(i, 1)] - 1))
pred_box[i, :, 3] = np.clip(pred_box[i, :, 3], -np.inf, pred_box[i, :, 3] = np.clip(pred_box[i, :, 3], (-np.inf),
img_size[i, 0] - 1) (img_size[(i, 0)] - 1))
return (pred_box, pred_score.reshape((n, (-1), class_num)))
return pred_box, pred_score.reshape((n, -1, class_num))
class TestYoloBoxOp(OpTest): class TestYoloBoxOp(OpTest):
...@@ -99,42 +94,35 @@ class TestYoloBoxOp(OpTest): ...@@ -99,42 +94,35 @@ class TestYoloBoxOp(OpTest):
self.op_type = 'yolo_box' self.op_type = 'yolo_box'
x = np.random.random(self.x_shape).astype('float32') x = np.random.random(self.x_shape).astype('float32')
img_size = np.random.randint(10, 20, self.imgsize_shape).astype('int32') img_size = np.random.randint(10, 20, self.imgsize_shape).astype('int32')
self.attrs = { self.attrs = {
"anchors": self.anchors, 'anchors': self.anchors,
"class_num": self.class_num, 'class_num': self.class_num,
"conf_thresh": self.conf_thresh, 'conf_thresh': self.conf_thresh,
"downsample": self.downsample, 'downsample': self.downsample,
"clip_bbox": self.clip_bbox, 'clip_bbox': self.clip_bbox,
"scale_x_y": self.scale_x_y, 'scale_x_y': self.scale_x_y,
"iou_aware": self.iou_aware, 'iou_aware': self.iou_aware,
"iou_aware_factor": self.iou_aware_factor 'iou_aware_factor': self.iou_aware_factor
}
self.inputs = {
'X': x,
'ImgSize': img_size,
}
boxes, scores = YoloBox(x, img_size, self.attrs)
self.outputs = {
"Boxes": boxes,
"Scores": scores,
} }
self.inputs = {'X': x, 'ImgSize': img_size}
(boxes, scores) = YoloBox(x, img_size, self.attrs)
self.outputs = {'Boxes': boxes, 'Scores': scores}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def initTestCase(self): def initTestCase(self):
self.anchors = [10, 13, 16, 30, 33, 23] self.anchors = [10, 13, 16, 30, 33, 23]
an_num = int(len(self.anchors) // 2) an_num = int((len(self.anchors) // 2))
self.batch_size = 32 self.batch_size = 32
self.class_num = 2 self.class_num = 2
self.conf_thresh = 0.5 self.conf_thresh = 0.5
self.downsample = 32 self.downsample = 32
self.clip_bbox = True self.clip_bbox = True
self.x_shape = (self.batch_size, an_num * (5 + self.class_num), 13, 13) self.x_shape = (self.batch_size, (an_num * (5 + self.class_num)), 13,
13)
self.imgsize_shape = (self.batch_size, 2) self.imgsize_shape = (self.batch_size, 2)
self.scale_x_y = 1. self.scale_x_y = 1.0
self.iou_aware = False self.iou_aware = False
self.iou_aware_factor = 0.5 self.iou_aware_factor = 0.5
...@@ -142,15 +130,16 @@ class TestYoloBoxOp(OpTest): ...@@ -142,15 +130,16 @@ class TestYoloBoxOp(OpTest):
class TestYoloBoxOpNoClipBbox(TestYoloBoxOp): class TestYoloBoxOpNoClipBbox(TestYoloBoxOp):
def initTestCase(self): def initTestCase(self):
self.anchors = [10, 13, 16, 30, 33, 23] self.anchors = [10, 13, 16, 30, 33, 23]
an_num = int(len(self.anchors) // 2) an_num = int((len(self.anchors) // 2))
self.batch_size = 32 self.batch_size = 32
self.class_num = 2 self.class_num = 2
self.conf_thresh = 0.5 self.conf_thresh = 0.5
self.downsample = 32 self.downsample = 32
self.clip_bbox = False self.clip_bbox = False
self.x_shape = (self.batch_size, an_num * (5 + self.class_num), 13, 13) self.x_shape = (self.batch_size, (an_num * (5 + self.class_num)), 13,
13)
self.imgsize_shape = (self.batch_size, 2) self.imgsize_shape = (self.batch_size, 2)
self.scale_x_y = 1. self.scale_x_y = 1.0
self.iou_aware = False self.iou_aware = False
self.iou_aware_factor = 0.5 self.iou_aware_factor = 0.5
...@@ -158,13 +147,14 @@ class TestYoloBoxOpNoClipBbox(TestYoloBoxOp): ...@@ -158,13 +147,14 @@ class TestYoloBoxOpNoClipBbox(TestYoloBoxOp):
class TestYoloBoxOpScaleXY(TestYoloBoxOp): class TestYoloBoxOpScaleXY(TestYoloBoxOp):
def initTestCase(self): def initTestCase(self):
self.anchors = [10, 13, 16, 30, 33, 23] self.anchors = [10, 13, 16, 30, 33, 23]
an_num = int(len(self.anchors) // 2) an_num = int((len(self.anchors) // 2))
self.batch_size = 32 self.batch_size = 32
self.class_num = 2 self.class_num = 2
self.conf_thresh = 0.5 self.conf_thresh = 0.5
self.downsample = 32 self.downsample = 32
self.clip_bbox = True self.clip_bbox = True
self.x_shape = (self.batch_size, an_num * (5 + self.class_num), 13, 13) self.x_shape = (self.batch_size, (an_num * (5 + self.class_num)), 13,
13)
self.imgsize_shape = (self.batch_size, 2) self.imgsize_shape = (self.batch_size, 2)
self.scale_x_y = 1.2 self.scale_x_y = 1.2
self.iou_aware = False self.iou_aware = False
...@@ -174,15 +164,16 @@ class TestYoloBoxOpScaleXY(TestYoloBoxOp): ...@@ -174,15 +164,16 @@ class TestYoloBoxOpScaleXY(TestYoloBoxOp):
class TestYoloBoxOpIoUAware(TestYoloBoxOp): class TestYoloBoxOpIoUAware(TestYoloBoxOp):
def initTestCase(self): def initTestCase(self):
self.anchors = [10, 13, 16, 30, 33, 23] self.anchors = [10, 13, 16, 30, 33, 23]
an_num = int(len(self.anchors) // 2) an_num = int((len(self.anchors) // 2))
self.batch_size = 32 self.batch_size = 32
self.class_num = 2 self.class_num = 2
self.conf_thresh = 0.5 self.conf_thresh = 0.5
self.downsample = 32 self.downsample = 32
self.clip_bbox = True self.clip_bbox = True
self.x_shape = (self.batch_size, an_num * (6 + self.class_num), 13, 13) self.x_shape = (self.batch_size, (an_num * (6 + self.class_num)), 13,
13)
self.imgsize_shape = (self.batch_size, 2) self.imgsize_shape = (self.batch_size, 2)
self.scale_x_y = 1. self.scale_x_y = 1.0
self.iou_aware = True self.iou_aware = True
self.iou_aware_factor = 0.5 self.iou_aware_factor = 0.5
...@@ -192,10 +183,9 @@ class TestYoloBoxDygraph(unittest.TestCase): ...@@ -192,10 +183,9 @@ class TestYoloBoxDygraph(unittest.TestCase):
paddle.disable_static() paddle.disable_static()
img_size = np.ones((2, 2)).astype('int32') img_size = np.ones((2, 2)).astype('int32')
img_size = paddle.to_tensor(img_size) img_size = paddle.to_tensor(img_size)
x1 = np.random.random([2, 14, 8, 8]).astype('float32') x1 = np.random.random([2, 14, 8, 8]).astype('float32')
x1 = paddle.to_tensor(x1) x1 = paddle.to_tensor(x1)
boxes, scores = paddle.vision.ops.yolo_box( (boxes, scores) = paddle.vision.ops.yolo_box(
x1, x1,
img_size=img_size, img_size=img_size,
anchors=[10, 13, 16, 30], anchors=[10, 13, 16, 30],
...@@ -203,12 +193,11 @@ class TestYoloBoxDygraph(unittest.TestCase): ...@@ -203,12 +193,11 @@ class TestYoloBoxDygraph(unittest.TestCase):
conf_thresh=0.01, conf_thresh=0.01,
downsample_ratio=8, downsample_ratio=8,
clip_bbox=True, clip_bbox=True,
scale_x_y=1.) scale_x_y=1.0)
assert boxes is not None and scores is not None assert ((boxes is not None) and (scores is not None))
x2 = np.random.random([2, 16, 8, 8]).astype('float32') x2 = np.random.random([2, 16, 8, 8]).astype('float32')
x2 = paddle.to_tensor(x2) x2 = paddle.to_tensor(x2)
boxes, scores = paddle.vision.ops.yolo_box( (boxes, scores) = paddle.vision.ops.yolo_box(
x2, x2,
img_size=img_size, img_size=img_size,
anchors=[10, 13, 16, 30], anchors=[10, 13, 16, 30],
...@@ -216,18 +205,21 @@ class TestYoloBoxDygraph(unittest.TestCase): ...@@ -216,18 +205,21 @@ class TestYoloBoxDygraph(unittest.TestCase):
conf_thresh=0.01, conf_thresh=0.01,
downsample_ratio=8, downsample_ratio=8,
clip_bbox=True, clip_bbox=True,
scale_x_y=1., scale_x_y=1.0,
iou_aware=True, iou_aware=True,
iou_aware_factor=0.5) iou_aware_factor=0.5)
paddle.enable_static() paddle.enable_static()
def test_eager(self):
with _test_eager_guard():
self.test_dygraph()
class TestYoloBoxStatic(unittest.TestCase): class TestYoloBoxStatic(unittest.TestCase):
def test_static(self): def test_static(self):
x1 = paddle.static.data('x1', [2, 14, 8, 8], 'float32') x1 = paddle.static.data('x1', [2, 14, 8, 8], 'float32')
img_size = paddle.static.data('img_size', [2, 2], 'int32') img_size = paddle.static.data('img_size', [2, 2], 'int32')
(boxes, scores) = paddle.vision.ops.yolo_box(
boxes, scores = paddle.vision.ops.yolo_box(
x1, x1,
img_size=img_size, img_size=img_size,
anchors=[10, 13, 16, 30], anchors=[10, 13, 16, 30],
...@@ -235,11 +227,10 @@ class TestYoloBoxStatic(unittest.TestCase): ...@@ -235,11 +227,10 @@ class TestYoloBoxStatic(unittest.TestCase):
conf_thresh=0.01, conf_thresh=0.01,
downsample_ratio=8, downsample_ratio=8,
clip_bbox=True, clip_bbox=True,
scale_x_y=1.) scale_x_y=1.0)
assert boxes is not None and scores is not None assert ((boxes is not None) and (scores is not None))
x2 = paddle.static.data('x2', [2, 16, 8, 8], 'float32') x2 = paddle.static.data('x2', [2, 16, 8, 8], 'float32')
boxes, scores = paddle.vision.ops.yolo_box( (boxes, scores) = paddle.vision.ops.yolo_box(
x2, x2,
img_size=img_size, img_size=img_size,
anchors=[10, 13, 16, 30], anchors=[10, 13, 16, 30],
...@@ -247,27 +238,27 @@ class TestYoloBoxStatic(unittest.TestCase): ...@@ -247,27 +238,27 @@ class TestYoloBoxStatic(unittest.TestCase):
conf_thresh=0.01, conf_thresh=0.01,
downsample_ratio=8, downsample_ratio=8,
clip_bbox=True, clip_bbox=True,
scale_x_y=1., scale_x_y=1.0,
iou_aware=True, iou_aware=True,
iou_aware_factor=0.5) iou_aware_factor=0.5)
assert boxes is not None and scores is not None assert ((boxes is not None) and (scores is not None))
class TestYoloBoxOpHW(TestYoloBoxOp): class TestYoloBoxOpHW(TestYoloBoxOp):
def initTestCase(self): def initTestCase(self):
self.anchors = [10, 13, 16, 30, 33, 23] self.anchors = [10, 13, 16, 30, 33, 23]
an_num = int(len(self.anchors) // 2) an_num = int((len(self.anchors) // 2))
self.batch_size = 32 self.batch_size = 32
self.class_num = 2 self.class_num = 2
self.conf_thresh = 0.5 self.conf_thresh = 0.5
self.downsample = 32 self.downsample = 32
self.clip_bbox = False self.clip_bbox = False
self.x_shape = (self.batch_size, an_num * (5 + self.class_num), 13, 9) self.x_shape = (self.batch_size, (an_num * (5 + self.class_num)), 13, 9)
self.imgsize_shape = (self.batch_size, 2) self.imgsize_shape = (self.batch_size, 2)
self.scale_x_y = 1. self.scale_x_y = 1.0
self.iou_aware = False self.iou_aware = False
self.iou_aware_factor = 0.5 self.iou_aware_factor = 0.5
if __name__ == "__main__": if (__name__ == '__main__'):
unittest.main() unittest.main()
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
# You may obtain a copy of the License at # You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0 # http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, # distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
...@@ -13,13 +13,13 @@ ...@@ -13,13 +13,13 @@
# limitations under the License. # limitations under the License.
from __future__ import print_function from __future__ import print_function
import unittest import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle import zeros_like from paddle import zeros_like
from paddle.fluid import core, Program, program_guard from paddle.fluid import core, Program, program_guard
from paddle.fluid.framework import _test_eager_guard
class TestZerosLikeAPIError(unittest.TestCase): class TestZerosLikeAPIError(unittest.TestCase):
...@@ -28,6 +28,10 @@ class TestZerosLikeAPIError(unittest.TestCase): ...@@ -28,6 +28,10 @@ class TestZerosLikeAPIError(unittest.TestCase):
x = paddle.fluid.data('x', [3, 4]) x = paddle.fluid.data('x', [3, 4])
self.assertRaises(TypeError, zeros_like, x, 'int8') self.assertRaises(TypeError, zeros_like, x, 'int8')
def test_eager(self):
with _test_eager_guard():
self.test_errors()
class TestZerosLikeAPI(unittest.TestCase): class TestZerosLikeAPI(unittest.TestCase):
def test_api(self): def test_api(self):
...@@ -36,46 +40,48 @@ class TestZerosLikeAPI(unittest.TestCase): ...@@ -36,46 +40,48 @@ class TestZerosLikeAPI(unittest.TestCase):
train_program = Program() train_program = Program()
with program_guard(train_program, startup_program): with program_guard(train_program, startup_program):
x = paddle.fluid.data('X', shape) x = paddle.fluid.data('X', shape)
# 'bool', 'float32', 'float64', 'int32', 'int64'
out1 = zeros_like(x) out1 = zeros_like(x)
out2 = zeros_like(x, np.bool) out2 = zeros_like(x, np.bool)
out3 = zeros_like(x, 'float64') out3 = zeros_like(x, 'float64')
out4 = zeros_like(x, 'int32') out4 = zeros_like(x, 'int32')
out5 = zeros_like(x, 'int64') out5 = zeros_like(x, 'int64')
place = (fluid.CUDAPlace(0)
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( if core.is_compiled_with_cuda() else fluid.CPUPlace())
) else fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
outs = exe.run(train_program, outs = exe.run(train_program,
feed={'X': np.ones(shape).astype('float32')}, feed={'X': np.ones(shape).astype('float32')},
fetch_list=[out1, out2, out3, out4, out5]) fetch_list=[out1, out2, out3, out4, out5])
for (i, dtype) in enumerate(
for i, dtype in enumerate(
[np.float32, np.bool, np.float64, np.int32, np.int64]): [np.float32, np.bool, np.float64, np.int32, np.int64]):
self.assertEqual(outs[i].dtype, dtype) self.assertEqual(outs[i].dtype, dtype)
self.assertEqual((outs[i] == np.zeros(shape, dtype)).all(), True) self.assertEqual((outs[i] == np.zeros(shape, dtype)).all(), True)
def test_eager(self):
with _test_eager_guard():
self.test_api()
class TestZerosLikeImpeartive(unittest.TestCase): class TestZerosLikeImpeartive(unittest.TestCase):
def test_out(self): def test_out(self):
shape = [3, 4] shape = [3, 4]
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( place = (fluid.CUDAPlace(0)
) else fluid.CPUPlace() if core.is_compiled_with_cuda() else fluid.CPUPlace())
paddle.disable_static(place) paddle.disable_static(place)
x = paddle.to_tensor(np.ones(shape)) x = paddle.to_tensor(np.ones(shape))
for dtype in [np.bool, np.float32, np.float64, np.int32, np.int64]: for dtype in [np.bool, np.float32, np.float64, np.int32, np.int64]:
out = zeros_like(x, dtype) out = zeros_like(x, dtype)
self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(), self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(),
True) True)
out = paddle.tensor.zeros_like(x) out = paddle.tensor.zeros_like(x)
self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(), True) self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(), True)
out = paddle.tensor.creation.zeros_like(x) out = paddle.tensor.creation.zeros_like(x)
self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(), True) self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(), True)
paddle.enable_static() paddle.enable_static()
def test_eager(self):
with _test_eager_guard():
self.test_out()
if __name__ == "__main__": if (__name__ == '__main__'):
unittest.main() unittest.main()
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
# You may obtain a copy of the License at # You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0 # http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, # distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
...@@ -13,56 +13,55 @@ ...@@ -13,56 +13,55 @@
# limitations under the License. # limitations under the License.
from __future__ import print_function from __future__ import print_function
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.op import Operator from paddle.fluid.op import Operator
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard from paddle.fluid import compiler, Program, program_guard
from paddle.fluid.framework import _test_eager_guard
class TestZerosOpError(unittest.TestCase): class TestZerosOpError(unittest.TestCase):
def test_errors(self): def test_errors(self):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
# The input dtype of zeros_op must be bool, float16, float32, float64, int32, int64.
shape = [4] shape = [4]
dtype = "int8" dtype = 'int8'
self.assertRaises(TypeError, fluid.layers.zeros, shape, dtype) self.assertRaises(TypeError, fluid.layers.zeros, shape, dtype)
def test_eager(self):
with _test_eager_guard():
self.test_errors()
class ApiZerosTest(unittest.TestCase): class ApiZerosTest(unittest.TestCase):
def test_out(self): def test_out(self):
with program_guard(Program()): with program_guard(Program()):
zeros = paddle.zeros(shape=[10], dtype="float64") zeros = paddle.zeros(shape=[10], dtype='float64')
place = paddle.CPUPlace() place = paddle.CPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
result, = exe.run(fetch_list=[zeros]) (result, ) = exe.run(fetch_list=[zeros])
expected_result = np.zeros(10, dtype="float64") expected_result = np.zeros(10, dtype='float64')
self.assertEqual((result == expected_result).all(), True) self.assertEqual((result == expected_result).all(), True)
with paddle.static.program_guard(Program()): with paddle.static.program_guard(Program()):
zeros = paddle.zeros(shape=[10], dtype="int64") zeros = paddle.zeros(shape=[10], dtype='int64')
place = paddle.CPUPlace() place = paddle.CPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
result, = exe.run(fetch_list=[zeros]) (result, ) = exe.run(fetch_list=[zeros])
expected_result = np.zeros(10, dtype="int64") expected_result = np.zeros(10, dtype='int64')
self.assertEqual((result == expected_result).all(), True) self.assertEqual((result == expected_result).all(), True)
with program_guard(Program()): with program_guard(Program()):
zeros = paddle.zeros(shape=[10], dtype="int64") zeros = paddle.zeros(shape=[10], dtype='int64')
place = paddle.CPUPlace() place = paddle.CPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
result, = exe.run(fetch_list=[zeros]) (result, ) = exe.run(fetch_list=[zeros])
expected_result = np.zeros(10, dtype="int64") expected_result = np.zeros(10, dtype='int64')
self.assertEqual((result == expected_result).all(), True) self.assertEqual((result == expected_result).all(), True)
with program_guard(Program()): with program_guard(Program()):
out_np = np.zeros(shape=(1), dtype='float32') out_np = np.zeros(shape=1, dtype='float32')
out = paddle.zeros(shape=[1], dtype="float32") out = paddle.zeros(shape=[1], dtype='float32')
place = paddle.CPUPlace() place = paddle.CPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
result = exe.run(fetch_list=[out]) result = exe.run(fetch_list=[out])
...@@ -70,28 +69,37 @@ class ApiZerosTest(unittest.TestCase): ...@@ -70,28 +69,37 @@ class ApiZerosTest(unittest.TestCase):
def test_fluid_out(self): def test_fluid_out(self):
with program_guard(Program()): with program_guard(Program()):
zeros = fluid.layers.zeros(shape=[10], dtype="int64") zeros = fluid.layers.zeros(shape=[10], dtype='int64')
place = paddle.CPUPlace() place = paddle.CPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
result, = exe.run(fetch_list=[zeros]) (result, ) = exe.run(fetch_list=[zeros])
expected_result = np.zeros(10, dtype="int64") expected_result = np.zeros(10, dtype='int64')
self.assertEqual((result == expected_result).all(), True) self.assertEqual((result == expected_result).all(), True)
def test_eager(self):
with _test_eager_guard():
self.test_out()
self.test_fluid_out()
class ApiZerosError(unittest.TestCase): class ApiZerosError(unittest.TestCase):
def test_errors(self): def test_errors(self):
def test_error1(): def test_error1():
with paddle.static.program_guard(fluid.Program()): with paddle.static.program_guard(fluid.Program()):
ones = fluid.layers.zeros(shape=10, dtype="int64") ones = fluid.layers.zeros(shape=10, dtype='int64')
self.assertRaises(TypeError, test_error1) self.assertRaises(TypeError, test_error1)
def test_error2(): def test_error2():
with paddle.static.program_guard(fluid.Program()): with paddle.static.program_guard(fluid.Program()):
ones = fluid.layers.zeros(shape=[10], dtype="int8") ones = fluid.layers.zeros(shape=[10], dtype='int8')
self.assertRaises(TypeError, test_error2) self.assertRaises(TypeError, test_error2)
def test_eager(self):
with _test_eager_guard():
self.test_errors()
if __name__ == "__main__": if (__name__ == '__main__'):
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册