From 972299446240cda664567e48fa18d2cd22f9a998 Mon Sep 17 00:00:00 2001 From: Zhanlue Yang Date: Mon, 14 Feb 2022 09:57:04 +0800 Subject: [PATCH] Fixed get_tensor method for EagerTensor (#39414) * Enabled Eager OpTest #1 * Enabled Eager OpTest #1 * Fixed get_tensor method for EagerTensor --- paddle/fluid/pybind/eager_utils.cc | 2 +- .../fluid/tests/unittests/test_where_op.py | 155 +++++++-------- .../fluid/tests/unittests/test_yolo_box_op.py | 185 +++++++++--------- .../tests/unittests/test_zeros_like_op.py | 40 ++-- .../fluid/tests/unittests/test_zeros_op.py | 64 +++--- 5 files changed, 223 insertions(+), 223 deletions(-) diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index 1a6bd9f35aa..85a39710564 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -506,7 +506,7 @@ PyObject* ToPyObject(const paddle::framework::proto::VarType& type) { } PyObject* ToPyObject(const paddle::framework::LoDTensor* value) { - auto obj = ::pybind11::cast(value, py::return_value_policy::copy); + auto obj = ::pybind11::cast(value, py::return_value_policy::reference); obj.inc_ref(); return obj.ptr(); } diff --git a/python/paddle/fluid/tests/unittests/test_where_op.py b/python/paddle/fluid/tests/unittests/test_where_op.py index 5b92fcf52de..d601117b96f 100644 --- a/python/paddle/fluid/tests/unittests/test_where_op.py +++ b/python/paddle/fluid/tests/unittests/test_where_op.py @@ -1,11 +1,11 @@ -#Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -13,7 +13,6 @@ # limitations under the License. from __future__ import print_function - import unittest import numpy as np import paddle @@ -24,38 +23,39 @@ from op_test import OpTest from paddle.fluid import compiler, Program, program_guard from paddle.fluid.op import Operator from paddle.fluid.backward import append_backward +from paddle.fluid.framework import _test_eager_guard class TestWhereOp(OpTest): def setUp(self): - self.op_type = "where" + self.op_type = 'where' self.init_config() self.inputs = {'Condition': self.cond, 'X': self.x, 'Y': self.y} self.outputs = {'Out': np.where(self.cond, self.x, self.y)} def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X', 'Y'], 'Out') + self.check_grad(['X', 'Y'], 'Out', check_eager=True) def init_config(self): - self.x = np.random.uniform(-3, 5, (100)).astype("float64") - self.y = np.random.uniform(-3, 5, (100)).astype("float64") - self.cond = np.zeros((100)).astype("bool") + self.x = np.random.uniform((-3), 5, 100).astype('float64') + self.y = np.random.uniform((-3), 5, 100).astype('float64') + self.cond = np.zeros(100).astype('bool') class TestWhereOp2(TestWhereOp): def init_config(self): - self.x = np.random.uniform(-5, 5, (60, 2)).astype("float64") - self.y = np.random.uniform(-5, 5, (60, 2)).astype("float64") - self.cond = np.ones((60, 2)).astype("bool") + self.x = np.random.uniform((-5), 5, (60, 2)).astype('float64') + self.y = np.random.uniform((-5), 5, (60, 2)).astype('float64') + self.cond = np.ones((60, 2)).astype('bool') class TestWhereOp3(TestWhereOp): def init_config(self): - self.x = np.random.uniform(-3, 5, (20, 2, 4)).astype("float64") - self.y = np.random.uniform(-3, 5, (20, 2, 4)).astype("float64") + self.x = np.random.uniform((-3), 5, (20, 2, 4)).astype('float64') + self.y = np.random.uniform((-3), 5, (20, 2, 4)).astype('float64') self.cond = np.array(np.random.randint(2, size=(20, 2, 4)), dtype=bool) @@ -66,15 +66,15 @@ class TestWhereAPI(unittest.TestCase): def init_data(self): self.shape = [10, 15] self.cond = np.array(np.random.randint(2, size=self.shape), dtype=bool) - self.x = np.random.uniform(-2, 3, self.shape).astype(np.float32) - self.y = np.random.uniform(-2, 3, self.shape).astype(np.float32) + self.x = np.random.uniform((-2), 3, self.shape).astype(np.float32) + self.y = np.random.uniform((-2), 3, self.shape).astype(np.float32) self.out = np.where(self.cond, self.x, self.y) def ref_x_backward(self, dout): - return np.where(self.cond == True, dout, 0) + return np.where((self.cond == True), dout, 0) def ref_y_backward(self, dout): - return np.where(self.cond == False, dout, 0) + return np.where((self.cond == False), dout, 0) def test_api(self, use_cuda=False): for x_stop_gradient in [False, True]: @@ -90,17 +90,17 @@ class TestWhereAPI(unittest.TestCase): y.stop_gradient = y_stop_gradient result = paddle.where(cond, x, y) append_backward(layers.mean(result)) - for use_cuda in [False, True]: - if use_cuda and not fluid.core.is_compiled_with_cuda(): + if (use_cuda and + (not fluid.core.is_compiled_with_cuda())): break - place = fluid.CUDAPlace( - 0) if use_cuda else fluid.CPUPlace() + place = (fluid.CUDAPlace(0) + if use_cuda else fluid.CPUPlace()) exe = fluid.Executor(place) fetch_list = [result, result.grad_name] - if x_stop_gradient is False: + if (x_stop_gradient is False): fetch_list.append(x.grad_name) - if y_stop_gradient is False: + if (y_stop_gradient is False): fetch_list.append(y.grad_name) out = exe.run( fluid.default_main_program(), @@ -109,13 +109,13 @@ class TestWhereAPI(unittest.TestCase): 'y': self.y}, fetch_list=fetch_list) assert np.array_equal(out[0], self.out) - if x_stop_gradient is False: + if (x_stop_gradient is False): assert np.array_equal(out[2], self.ref_x_backward(out[1])) - if y.stop_gradient is False: + if (y.stop_gradient is False): assert np.array_equal( out[3], self.ref_y_backward(out[1])) - elif y.stop_gradient is False: + elif (y.stop_gradient is False): assert np.array_equal(out[2], self.ref_y_backward(out[1])) @@ -124,44 +124,38 @@ class TestWhereAPI(unittest.TestCase): with fluid.program_guard(main_program): x = fluid.layers.data(name='x', shape=[4, 1], dtype='float32') y = fluid.layers.data(name='y', shape=[4, 2], dtype='float32') - x_i = np.array([[0.9383, 0.1983, 3.2, 1.2]]).astype("float32") - y_i = np.array([[1.0, 1.0, 1.0, 1.0], - [1.0, 1.0, 1.0, 1.0]]).astype("float32") - result = paddle.where(x > 1, x=x, y=y) - + x_i = np.array([[0.9383, 0.1983, 3.2, 1.2]]).astype('float32') + y_i = np.array( + [[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]).astype('float32') + result = paddle.where((x > 1), x=x, y=y) for use_cuda in [False, True]: - if use_cuda and not fluid.core.is_compiled_with_cuda(): + if (use_cuda and (not fluid.core.is_compiled_with_cuda())): return - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + place = (fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()) exe = fluid.Executor(place) out = exe.run(fluid.default_main_program(), feed={'x': x_i, 'y': y_i}, fetch_list=[result]) - assert np.array_equal(out[0], np.where(x_i > 1, x_i, y_i)) + assert np.array_equal(out[0], np.where((x_i > 1), x_i, y_i)) def __test_where_with_broadcast_static(self, cond_shape, x_shape, y_shape): paddle.enable_static() - main_program = Program() with fluid.program_guard(main_program): cond = fluid.layers.data( name='cond', shape=cond_shape, dtype='bool') x = fluid.layers.data(name='x', shape=x_shape, dtype='float32') y = fluid.layers.data(name='y', shape=y_shape, dtype='float32') - - cond_data_tmp = np.random.random(size=cond_shape).astype("float32") - cond_data = cond_data_tmp < 0.3 - x_data = np.random.random(size=x_shape).astype("float32") - y_data = np.random.random(size=y_shape).astype("float32") - + cond_data_tmp = np.random.random(size=cond_shape).astype('float32') + cond_data = (cond_data_tmp < 0.3) + x_data = np.random.random(size=x_shape).astype('float32') + y_data = np.random.random(size=y_shape).astype('float32') result = paddle.where(condition=cond, x=x, y=y) - for use_cuda in [False, True]: - if use_cuda and not fluid.core.is_compiled_with_cuda(): + if (use_cuda and (not fluid.core.is_compiled_with_cuda())): return - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - + place = (fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()) exe = fluid.Executor(place) out = exe.run( fluid.default_main_program(), @@ -169,9 +163,7 @@ class TestWhereAPI(unittest.TestCase): 'x': x_data, 'y': y_data}, fetch_list=[result]) - expect = np.where(cond_data, x_data, y_data) - assert np.array_equal(out[0], expect) def test_static_api_broadcast_1(self): @@ -198,28 +190,24 @@ class TestWhereAPI(unittest.TestCase): b_shape = [2, 2, 4] self.__test_where_with_broadcast_static(cond_shape, a_shape, b_shape) - # @Note Now, maybe not compatibility with old version def test_static_api_broadcast_5(self): cond_shape = [3, 2, 2, 4] a_shape = [2, 2, 4] b_shape = [2, 2, 4] self.__test_where_with_broadcast_static(cond_shape, a_shape, b_shape) - # @Note Now, maybe not compatibility with old version def test_static_api_broadcast_6(self): cond_shape = [2, 2, 4] a_shape = [2, 2, 1] b_shape = [2, 2, 1] self.__test_where_with_broadcast_static(cond_shape, a_shape, b_shape) - # @Note Now, maybe not compatibility with old version def test_static_api_broadcast_7(self): cond_shape = [2, 2, 4] a_shape = [2, 1, 4] b_shape = [2, 1, 4] self.__test_where_with_broadcast_static(cond_shape, a_shape, b_shape) - # @Note Now, maybe not compatibility with old version def test_static_api_broadcast_8(self): cond_shape = [3, 2, 2, 4] a_shape = [2, 2, 1] @@ -230,9 +218,9 @@ class TestWhereAPI(unittest.TestCase): class TestWhereDygraphAPI(unittest.TestCase): def test_api(self): with fluid.dygraph.guard(): - x_i = np.array([0.9383, 0.1983, 3.2, 1.2]).astype("float64") - y_i = np.array([1.0, 1.0, 1.0, 1.0]).astype("float64") - cond_i = np.array([False, False, True, True]).astype("bool") + x_i = np.array([0.9383, 0.1983, 3.2, 1.2]).astype('float64') + y_i = np.array([1.0, 1.0, 1.0, 1.0]).astype('float64') + cond_i = np.array([False, False, True, True]).astype('bool') x = fluid.dygraph.to_variable(x_i) y = fluid.dygraph.to_variable(y_i) cond = fluid.dygraph.to_variable(cond_i) @@ -242,15 +230,12 @@ class TestWhereDygraphAPI(unittest.TestCase): def __test_where_with_broadcast_dygraph(self, cond_shape, a_shape, b_shape): with fluid.dygraph.guard(): cond_tmp = paddle.rand(cond_shape) - cond = cond_tmp < 0.3 + cond = (cond_tmp < 0.3) a = paddle.rand(a_shape) b = paddle.rand(b_shape) - result = paddle.where(cond, a, b) result = result.numpy() - expect = np.where(cond, a, b) - self.assertTrue(np.array_equal(expect, result)) def test_dygraph_api_broadcast_1(self): @@ -277,28 +262,24 @@ class TestWhereDygraphAPI(unittest.TestCase): b_shape = [2, 2, 4] self.__test_where_with_broadcast_dygraph(cond_shape, a_shape, b_shape) - # @Note Now, maybe not compatibility with old version def test_dygraph_api_broadcast_5(self): cond_shape = [3, 2, 2, 4] a_shape = [2, 2, 4] b_shape = [2, 2, 4] self.__test_where_with_broadcast_dygraph(cond_shape, a_shape, b_shape) - # @Note Now, maybe not compatibility with old version def test_dygraph_api_broadcast_6(self): cond_shape = [2, 2, 4] a_shape = [2, 2, 1] b_shape = [2, 2, 1] self.__test_where_with_broadcast_dygraph(cond_shape, a_shape, b_shape) - # @Note Now, maybe not compatibility with old version def test_dygraph_api_broadcast_7(self): cond_shape = [2, 2, 4] a_shape = [2, 1, 4] b_shape = [2, 1, 4] self.__test_where_with_broadcast_dygraph(cond_shape, a_shape, b_shape) - # @Note Now, maybe not compatibility with old version def test_dygraph_api_broadcast_8(self): cond_shape = [3, 2, 2, 4] a_shape = [2, 2, 1] @@ -308,40 +289,50 @@ class TestWhereDygraphAPI(unittest.TestCase): def test_where_condition(self): data = np.array([[True, False], [False, True]]) with program_guard(Program(), Program()): - x = fluid.layers.data(name='x', shape=[-1, 2]) + x = fluid.layers.data(name='x', shape=[(-1), 2]) y = paddle.where(x) self.assertEqual(type(y), tuple) self.assertEqual(len(y), 2) z = fluid.layers.concat(list(y), axis=1) exe = fluid.Executor(fluid.CPUPlace()) - - res, = exe.run(feed={'x': data}, - fetch_list=[z.name], - return_numpy=False) + (res, ) = exe.run(feed={'x': data}, + fetch_list=[z.name], + return_numpy=False) expect_out = np.array([[0, 0], [1, 1]]) self.assertTrue(np.allclose(expect_out, np.array(res))) - data = np.array([True, True, False]) with program_guard(Program(), Program()): - x = fluid.layers.data(name='x', shape=[-1]) + x = fluid.layers.data(name='x', shape=[(-1)]) y = paddle.where(x) self.assertEqual(type(y), tuple) self.assertEqual(len(y), 1) z = fluid.layers.concat(list(y), axis=1) exe = fluid.Executor(fluid.CPUPlace()) - res, = exe.run(feed={'x': data}, - fetch_list=[z.name], - return_numpy=False) + (res, ) = exe.run(feed={'x': data}, + fetch_list=[z.name], + return_numpy=False) expect_out = np.array([[0], [1]]) self.assertTrue(np.allclose(expect_out, np.array(res))) + def test_eager(self): + with _test_eager_guard(): + self.test_api() + self.test_dygraph_api_broadcast_1() + self.test_dygraph_api_broadcast_2() + self.test_dygraph_api_broadcast_3() + self.test_dygraph_api_broadcast_4() + self.test_dygraph_api_broadcast_5() + self.test_dygraph_api_broadcast_6() + self.test_dygraph_api_broadcast_7() + self.test_dygraph_api_broadcast_8() + class TestWhereOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): - x_i = np.array([0.9383, 0.1983, 3.2, 1.2]).astype("float64") - y_i = np.array([1.0, 1.0, 1.0, 1.0]).astype("float64") - cond_i = np.array([False, False, True, True]).astype("bool") + x_i = np.array([0.9383, 0.1983, 3.2, 1.2]).astype('float64') + y_i = np.array([1.0, 1.0, 1.0, 1.0]).astype('float64') + cond_i = np.array([False, False, True, True]).astype('bool') def test_Variable(): paddle.where(cond_i, x_i, y_i) @@ -360,10 +351,14 @@ class TestWhereOpError(unittest.TestCase): with fluid.dygraph.guard(): cond_shape = [2, 2, 4] cond_tmp = paddle.rand(cond_shape) - cond = cond_tmp < 0.3 + cond = (cond_tmp < 0.3) a = paddle.rand(cond_shape) self.assertRaises(ValueError, paddle.where, cond, a) + def test_eager(self): + with _test_eager_guard(): + self.test_value_error() + -if __name__ == '__main__': +if (__name__ == '__main__'): unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_yolo_box_op.py b/python/paddle/fluid/tests/unittests/test_yolo_box_op.py index 5793f0148fc..043c5c1651a 100644 --- a/python/paddle/fluid/tests/unittests/test_yolo_box_op.py +++ b/python/paddle/fluid/tests/unittests/test_yolo_box_op.py @@ -1,11 +1,11 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -13,23 +13,22 @@ # limitations under the License. from __future__ import division - import unittest import numpy as np from op_test import OpTest - import paddle from paddle.fluid import core +from paddle.fluid.framework import _test_eager_guard def sigmoid(x): - return 1.0 / (1.0 + np.exp(-1.0 * x)) + return (1.0 / (1.0 + np.exp(((-1.0) * x)))) def YoloBox(x, img_size, attrs): - n, c, h, w = x.shape + (n, c, h, w) = x.shape anchors = attrs['anchors'] - an_num = int(len(anchors) // 2) + an_num = int((len(anchors) // 2)) class_num = attrs['class_num'] conf_thresh = attrs['conf_thresh'] downsample = attrs['downsample'] @@ -37,60 +36,56 @@ def YoloBox(x, img_size, attrs): scale_x_y = attrs['scale_x_y'] iou_aware = attrs['iou_aware'] iou_aware_factor = attrs['iou_aware_factor'] - bias_x_y = -0.5 * (scale_x_y - 1.) - input_h = downsample * h - input_w = downsample * w - + bias_x_y = ((-0.5) * (scale_x_y - 1.0)) + input_h = (downsample * h) + input_w = (downsample * w) if iou_aware: ioup = x[:, :an_num, :, :] - ioup = np.expand_dims(ioup, axis=-1) + ioup = np.expand_dims(ioup, axis=(-1)) x = x[:, an_num:, :, :] - x = x.reshape((n, an_num, 5 + class_num, h, w)).transpose((0, 1, 3, 4, 2)) - + x = x.reshape((n, an_num, (5 + class_num), h, w)).transpose((0, 1, 3, 4, 2)) pred_box = x[:, :, :, :, :4].copy() grid_x = np.tile(np.arange(w).reshape((1, w)), (h, 1)) grid_y = np.tile(np.arange(h).reshape((h, 1)), (1, w)) - pred_box[:, :, :, :, 0] = ( - grid_x + sigmoid(pred_box[:, :, :, :, 0]) * scale_x_y + bias_x_y) / w - pred_box[:, :, :, :, 1] = ( - grid_y + sigmoid(pred_box[:, :, :, :, 1]) * scale_x_y + bias_x_y) / h - - anchors = [(anchors[i], anchors[i + 1]) for i in range(0, len(anchors), 2)] + pred_box[:, :, :, :, 0] = (( + (grid_x + (sigmoid(pred_box[:, :, :, :, 0]) * scale_x_y)) + bias_x_y) / + w) + pred_box[:, :, :, :, 1] = (( + (grid_y + (sigmoid(pred_box[:, :, :, :, 1]) * scale_x_y)) + bias_x_y) / + h) + anchors = [(anchors[i], anchors[(i + 1)]) + for i in range(0, len(anchors), 2)] anchors_s = np.array( - [(an_w / input_w, an_h / input_h) for an_w, an_h in anchors]) + [((an_w / input_w), (an_h / input_h)) for (an_w, an_h) in anchors]) anchor_w = anchors_s[:, 0:1].reshape((1, an_num, 1, 1)) anchor_h = anchors_s[:, 1:2].reshape((1, an_num, 1, 1)) - pred_box[:, :, :, :, 2] = np.exp(pred_box[:, :, :, :, 2]) * anchor_w - pred_box[:, :, :, :, 3] = np.exp(pred_box[:, :, :, :, 3]) * anchor_h - + pred_box[:, :, :, :, 2] = (np.exp(pred_box[:, :, :, :, 2]) * anchor_w) + pred_box[:, :, :, :, 3] = (np.exp(pred_box[:, :, :, :, 3]) * anchor_h) if iou_aware: - pred_conf = sigmoid(x[:, :, :, :, 4:5])**( - 1 - iou_aware_factor) * sigmoid(ioup)**iou_aware_factor + pred_conf = ((sigmoid(x[:, :, :, :, 4:5])**(1 - iou_aware_factor)) * + (sigmoid(ioup)**iou_aware_factor)) else: pred_conf = sigmoid(x[:, :, :, :, 4:5]) - pred_conf[pred_conf < conf_thresh] = 0. - pred_score = sigmoid(x[:, :, :, :, 5:]) * pred_conf - pred_box = pred_box * (pred_conf > 0.).astype('float32') - - pred_box = pred_box.reshape((n, -1, 4)) - pred_box[:, :, :2], pred_box[:, :, 2:4] = \ - pred_box[:, :, :2] - pred_box[:, :, 2:4] / 2., \ - pred_box[:, :, :2] + pred_box[:, :, 2:4] / 2.0 - pred_box[:, :, 0] = pred_box[:, :, 0] * img_size[:, 1][:, np.newaxis] - pred_box[:, :, 1] = pred_box[:, :, 1] * img_size[:, 0][:, np.newaxis] - pred_box[:, :, 2] = pred_box[:, :, 2] * img_size[:, 1][:, np.newaxis] - pred_box[:, :, 3] = pred_box[:, :, 3] * img_size[:, 0][:, np.newaxis] - + pred_conf[(pred_conf < conf_thresh)] = 0.0 + pred_score = (sigmoid(x[:, :, :, :, 5:]) * pred_conf) + pred_box = (pred_box * (pred_conf > 0.0).astype('float32')) + pred_box = pred_box.reshape((n, (-1), 4)) + (pred_box[:, :, :2], pred_box[:, :, 2:4]) = ( + (pred_box[:, :, :2] - (pred_box[:, :, 2:4] / 2.0)), + (pred_box[:, :, :2] + (pred_box[:, :, 2:4] / 2.0))) + pred_box[:, :, 0] = (pred_box[:, :, 0] * img_size[:, 1][:, np.newaxis]) + pred_box[:, :, 1] = (pred_box[:, :, 1] * img_size[:, 0][:, np.newaxis]) + pred_box[:, :, 2] = (pred_box[:, :, 2] * img_size[:, 1][:, np.newaxis]) + pred_box[:, :, 3] = (pred_box[:, :, 3] * img_size[:, 0][:, np.newaxis]) if clip_bbox: for i in range(len(pred_box)): pred_box[i, :, 0] = np.clip(pred_box[i, :, 0], 0, np.inf) pred_box[i, :, 1] = np.clip(pred_box[i, :, 1], 0, np.inf) - pred_box[i, :, 2] = np.clip(pred_box[i, :, 2], -np.inf, - img_size[i, 1] - 1) - pred_box[i, :, 3] = np.clip(pred_box[i, :, 3], -np.inf, - img_size[i, 0] - 1) - - return pred_box, pred_score.reshape((n, -1, class_num)) + pred_box[i, :, 2] = np.clip(pred_box[i, :, 2], (-np.inf), + (img_size[(i, 1)] - 1)) + pred_box[i, :, 3] = np.clip(pred_box[i, :, 3], (-np.inf), + (img_size[(i, 0)] - 1)) + return (pred_box, pred_score.reshape((n, (-1), class_num))) class TestYoloBoxOp(OpTest): @@ -99,42 +94,35 @@ class TestYoloBoxOp(OpTest): self.op_type = 'yolo_box' x = np.random.random(self.x_shape).astype('float32') img_size = np.random.randint(10, 20, self.imgsize_shape).astype('int32') - self.attrs = { - "anchors": self.anchors, - "class_num": self.class_num, - "conf_thresh": self.conf_thresh, - "downsample": self.downsample, - "clip_bbox": self.clip_bbox, - "scale_x_y": self.scale_x_y, - "iou_aware": self.iou_aware, - "iou_aware_factor": self.iou_aware_factor - } - - self.inputs = { - 'X': x, - 'ImgSize': img_size, - } - boxes, scores = YoloBox(x, img_size, self.attrs) - self.outputs = { - "Boxes": boxes, - "Scores": scores, + 'anchors': self.anchors, + 'class_num': self.class_num, + 'conf_thresh': self.conf_thresh, + 'downsample': self.downsample, + 'clip_bbox': self.clip_bbox, + 'scale_x_y': self.scale_x_y, + 'iou_aware': self.iou_aware, + 'iou_aware_factor': self.iou_aware_factor } + self.inputs = {'X': x, 'ImgSize': img_size} + (boxes, scores) = YoloBox(x, img_size, self.attrs) + self.outputs = {'Boxes': boxes, 'Scores': scores} def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def initTestCase(self): self.anchors = [10, 13, 16, 30, 33, 23] - an_num = int(len(self.anchors) // 2) + an_num = int((len(self.anchors) // 2)) self.batch_size = 32 self.class_num = 2 self.conf_thresh = 0.5 self.downsample = 32 self.clip_bbox = True - self.x_shape = (self.batch_size, an_num * (5 + self.class_num), 13, 13) + self.x_shape = (self.batch_size, (an_num * (5 + self.class_num)), 13, + 13) self.imgsize_shape = (self.batch_size, 2) - self.scale_x_y = 1. + self.scale_x_y = 1.0 self.iou_aware = False self.iou_aware_factor = 0.5 @@ -142,15 +130,16 @@ class TestYoloBoxOp(OpTest): class TestYoloBoxOpNoClipBbox(TestYoloBoxOp): def initTestCase(self): self.anchors = [10, 13, 16, 30, 33, 23] - an_num = int(len(self.anchors) // 2) + an_num = int((len(self.anchors) // 2)) self.batch_size = 32 self.class_num = 2 self.conf_thresh = 0.5 self.downsample = 32 self.clip_bbox = False - self.x_shape = (self.batch_size, an_num * (5 + self.class_num), 13, 13) + self.x_shape = (self.batch_size, (an_num * (5 + self.class_num)), 13, + 13) self.imgsize_shape = (self.batch_size, 2) - self.scale_x_y = 1. + self.scale_x_y = 1.0 self.iou_aware = False self.iou_aware_factor = 0.5 @@ -158,13 +147,14 @@ class TestYoloBoxOpNoClipBbox(TestYoloBoxOp): class TestYoloBoxOpScaleXY(TestYoloBoxOp): def initTestCase(self): self.anchors = [10, 13, 16, 30, 33, 23] - an_num = int(len(self.anchors) // 2) + an_num = int((len(self.anchors) // 2)) self.batch_size = 32 self.class_num = 2 self.conf_thresh = 0.5 self.downsample = 32 self.clip_bbox = True - self.x_shape = (self.batch_size, an_num * (5 + self.class_num), 13, 13) + self.x_shape = (self.batch_size, (an_num * (5 + self.class_num)), 13, + 13) self.imgsize_shape = (self.batch_size, 2) self.scale_x_y = 1.2 self.iou_aware = False @@ -174,15 +164,16 @@ class TestYoloBoxOpScaleXY(TestYoloBoxOp): class TestYoloBoxOpIoUAware(TestYoloBoxOp): def initTestCase(self): self.anchors = [10, 13, 16, 30, 33, 23] - an_num = int(len(self.anchors) // 2) + an_num = int((len(self.anchors) // 2)) self.batch_size = 32 self.class_num = 2 self.conf_thresh = 0.5 self.downsample = 32 self.clip_bbox = True - self.x_shape = (self.batch_size, an_num * (6 + self.class_num), 13, 13) + self.x_shape = (self.batch_size, (an_num * (6 + self.class_num)), 13, + 13) self.imgsize_shape = (self.batch_size, 2) - self.scale_x_y = 1. + self.scale_x_y = 1.0 self.iou_aware = True self.iou_aware_factor = 0.5 @@ -192,10 +183,9 @@ class TestYoloBoxDygraph(unittest.TestCase): paddle.disable_static() img_size = np.ones((2, 2)).astype('int32') img_size = paddle.to_tensor(img_size) - x1 = np.random.random([2, 14, 8, 8]).astype('float32') x1 = paddle.to_tensor(x1) - boxes, scores = paddle.vision.ops.yolo_box( + (boxes, scores) = paddle.vision.ops.yolo_box( x1, img_size=img_size, anchors=[10, 13, 16, 30], @@ -203,12 +193,11 @@ class TestYoloBoxDygraph(unittest.TestCase): conf_thresh=0.01, downsample_ratio=8, clip_bbox=True, - scale_x_y=1.) - assert boxes is not None and scores is not None - + scale_x_y=1.0) + assert ((boxes is not None) and (scores is not None)) x2 = np.random.random([2, 16, 8, 8]).astype('float32') x2 = paddle.to_tensor(x2) - boxes, scores = paddle.vision.ops.yolo_box( + (boxes, scores) = paddle.vision.ops.yolo_box( x2, img_size=img_size, anchors=[10, 13, 16, 30], @@ -216,18 +205,21 @@ class TestYoloBoxDygraph(unittest.TestCase): conf_thresh=0.01, downsample_ratio=8, clip_bbox=True, - scale_x_y=1., + scale_x_y=1.0, iou_aware=True, iou_aware_factor=0.5) paddle.enable_static() + def test_eager(self): + with _test_eager_guard(): + self.test_dygraph() + class TestYoloBoxStatic(unittest.TestCase): def test_static(self): x1 = paddle.static.data('x1', [2, 14, 8, 8], 'float32') img_size = paddle.static.data('img_size', [2, 2], 'int32') - - boxes, scores = paddle.vision.ops.yolo_box( + (boxes, scores) = paddle.vision.ops.yolo_box( x1, img_size=img_size, anchors=[10, 13, 16, 30], @@ -235,11 +227,10 @@ class TestYoloBoxStatic(unittest.TestCase): conf_thresh=0.01, downsample_ratio=8, clip_bbox=True, - scale_x_y=1.) - assert boxes is not None and scores is not None - + scale_x_y=1.0) + assert ((boxes is not None) and (scores is not None)) x2 = paddle.static.data('x2', [2, 16, 8, 8], 'float32') - boxes, scores = paddle.vision.ops.yolo_box( + (boxes, scores) = paddle.vision.ops.yolo_box( x2, img_size=img_size, anchors=[10, 13, 16, 30], @@ -247,27 +238,27 @@ class TestYoloBoxStatic(unittest.TestCase): conf_thresh=0.01, downsample_ratio=8, clip_bbox=True, - scale_x_y=1., + scale_x_y=1.0, iou_aware=True, iou_aware_factor=0.5) - assert boxes is not None and scores is not None + assert ((boxes is not None) and (scores is not None)) class TestYoloBoxOpHW(TestYoloBoxOp): def initTestCase(self): self.anchors = [10, 13, 16, 30, 33, 23] - an_num = int(len(self.anchors) // 2) + an_num = int((len(self.anchors) // 2)) self.batch_size = 32 self.class_num = 2 self.conf_thresh = 0.5 self.downsample = 32 self.clip_bbox = False - self.x_shape = (self.batch_size, an_num * (5 + self.class_num), 13, 9) + self.x_shape = (self.batch_size, (an_num * (5 + self.class_num)), 13, 9) self.imgsize_shape = (self.batch_size, 2) - self.scale_x_y = 1. + self.scale_x_y = 1.0 self.iou_aware = False self.iou_aware_factor = 0.5 -if __name__ == "__main__": +if (__name__ == '__main__'): unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_zeros_like_op.py b/python/paddle/fluid/tests/unittests/test_zeros_like_op.py index 6546d7b99f4..80b4db793ff 100644 --- a/python/paddle/fluid/tests/unittests/test_zeros_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_zeros_like_op.py @@ -1,11 +1,11 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -13,13 +13,13 @@ # limitations under the License. from __future__ import print_function - import unittest import numpy as np import paddle import paddle.fluid as fluid from paddle import zeros_like from paddle.fluid import core, Program, program_guard +from paddle.fluid.framework import _test_eager_guard class TestZerosLikeAPIError(unittest.TestCase): @@ -28,6 +28,10 @@ class TestZerosLikeAPIError(unittest.TestCase): x = paddle.fluid.data('x', [3, 4]) self.assertRaises(TypeError, zeros_like, x, 'int8') + def test_eager(self): + with _test_eager_guard(): + self.test_errors() + class TestZerosLikeAPI(unittest.TestCase): def test_api(self): @@ -36,46 +40,48 @@ class TestZerosLikeAPI(unittest.TestCase): train_program = Program() with program_guard(train_program, startup_program): x = paddle.fluid.data('X', shape) - - # 'bool', 'float32', 'float64', 'int32', 'int64' out1 = zeros_like(x) out2 = zeros_like(x, np.bool) out3 = zeros_like(x, 'float64') out4 = zeros_like(x, 'int32') out5 = zeros_like(x, 'int64') - - place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( - ) else fluid.CPUPlace() + place = (fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() else fluid.CPUPlace()) exe = fluid.Executor(place) outs = exe.run(train_program, feed={'X': np.ones(shape).astype('float32')}, fetch_list=[out1, out2, out3, out4, out5]) - - for i, dtype in enumerate( + for (i, dtype) in enumerate( [np.float32, np.bool, np.float64, np.int32, np.int64]): self.assertEqual(outs[i].dtype, dtype) self.assertEqual((outs[i] == np.zeros(shape, dtype)).all(), True) + def test_eager(self): + with _test_eager_guard(): + self.test_api() + class TestZerosLikeImpeartive(unittest.TestCase): def test_out(self): shape = [3, 4] - place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( - ) else fluid.CPUPlace() + place = (fluid.CUDAPlace(0) + if core.is_compiled_with_cuda() else fluid.CPUPlace()) paddle.disable_static(place) x = paddle.to_tensor(np.ones(shape)) for dtype in [np.bool, np.float32, np.float64, np.int32, np.int64]: out = zeros_like(x, dtype) self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(), True) - out = paddle.tensor.zeros_like(x) self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(), True) - out = paddle.tensor.creation.zeros_like(x) self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(), True) paddle.enable_static() + def test_eager(self): + with _test_eager_guard(): + self.test_out() + -if __name__ == "__main__": +if (__name__ == '__main__'): unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_zeros_op.py b/python/paddle/fluid/tests/unittests/test_zeros_op.py index 23dec935507..449f95aac29 100644 --- a/python/paddle/fluid/tests/unittests/test_zeros_op.py +++ b/python/paddle/fluid/tests/unittests/test_zeros_op.py @@ -1,11 +1,11 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -13,56 +13,55 @@ # limitations under the License. from __future__ import print_function - import unittest import numpy as np from op_test import OpTest - import paddle import paddle.fluid.core as core from paddle.fluid.op import Operator import paddle.fluid as fluid from paddle.fluid import compiler, Program, program_guard +from paddle.fluid.framework import _test_eager_guard class TestZerosOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): - # The input dtype of zeros_op must be bool, float16, float32, float64, int32, int64. shape = [4] - dtype = "int8" + dtype = 'int8' self.assertRaises(TypeError, fluid.layers.zeros, shape, dtype) + def test_eager(self): + with _test_eager_guard(): + self.test_errors() + class ApiZerosTest(unittest.TestCase): def test_out(self): with program_guard(Program()): - zeros = paddle.zeros(shape=[10], dtype="float64") + zeros = paddle.zeros(shape=[10], dtype='float64') place = paddle.CPUPlace() exe = paddle.static.Executor(place) - result, = exe.run(fetch_list=[zeros]) - expected_result = np.zeros(10, dtype="float64") + (result, ) = exe.run(fetch_list=[zeros]) + expected_result = np.zeros(10, dtype='float64') self.assertEqual((result == expected_result).all(), True) - with paddle.static.program_guard(Program()): - zeros = paddle.zeros(shape=[10], dtype="int64") + zeros = paddle.zeros(shape=[10], dtype='int64') place = paddle.CPUPlace() exe = paddle.static.Executor(place) - result, = exe.run(fetch_list=[zeros]) - expected_result = np.zeros(10, dtype="int64") + (result, ) = exe.run(fetch_list=[zeros]) + expected_result = np.zeros(10, dtype='int64') self.assertEqual((result == expected_result).all(), True) - with program_guard(Program()): - zeros = paddle.zeros(shape=[10], dtype="int64") + zeros = paddle.zeros(shape=[10], dtype='int64') place = paddle.CPUPlace() exe = paddle.static.Executor(place) - result, = exe.run(fetch_list=[zeros]) - expected_result = np.zeros(10, dtype="int64") + (result, ) = exe.run(fetch_list=[zeros]) + expected_result = np.zeros(10, dtype='int64') self.assertEqual((result == expected_result).all(), True) - with program_guard(Program()): - out_np = np.zeros(shape=(1), dtype='float32') - out = paddle.zeros(shape=[1], dtype="float32") + out_np = np.zeros(shape=1, dtype='float32') + out = paddle.zeros(shape=[1], dtype='float32') place = paddle.CPUPlace() exe = paddle.static.Executor(place) result = exe.run(fetch_list=[out]) @@ -70,28 +69,37 @@ class ApiZerosTest(unittest.TestCase): def test_fluid_out(self): with program_guard(Program()): - zeros = fluid.layers.zeros(shape=[10], dtype="int64") + zeros = fluid.layers.zeros(shape=[10], dtype='int64') place = paddle.CPUPlace() exe = paddle.static.Executor(place) - result, = exe.run(fetch_list=[zeros]) - expected_result = np.zeros(10, dtype="int64") + (result, ) = exe.run(fetch_list=[zeros]) + expected_result = np.zeros(10, dtype='int64') self.assertEqual((result == expected_result).all(), True) + def test_eager(self): + with _test_eager_guard(): + self.test_out() + self.test_fluid_out() + class ApiZerosError(unittest.TestCase): def test_errors(self): def test_error1(): with paddle.static.program_guard(fluid.Program()): - ones = fluid.layers.zeros(shape=10, dtype="int64") + ones = fluid.layers.zeros(shape=10, dtype='int64') self.assertRaises(TypeError, test_error1) def test_error2(): with paddle.static.program_guard(fluid.Program()): - ones = fluid.layers.zeros(shape=[10], dtype="int8") + ones = fluid.layers.zeros(shape=[10], dtype='int8') self.assertRaises(TypeError, test_error2) + def test_eager(self): + with _test_eager_guard(): + self.test_errors() + -if __name__ == "__main__": +if (__name__ == '__main__'): unittest.main() -- GitLab