# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from decorator_helper import prog_scope import paddle import paddle.fluid as fluid import paddle.nn.functional as F fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) unary_api_list = [ paddle.nn.functional.elu, paddle.nn.functional.gelu, paddle.nn.functional.hardsigmoid, paddle.nn.functional.hardswish, paddle.nn.functional.hardshrink, paddle.nn.functional.hardtanh, paddle.nn.functional.leaky_relu, paddle.nn.functional.log_sigmoid, paddle.nn.functional.relu, paddle.nn.functional.relu6, paddle.nn.functional.sigmoid, paddle.nn.functional.softplus, paddle.nn.functional.softshrink, paddle.nn.functional.softsign, paddle.nn.functional.swish, paddle.nn.functional.tanhshrink, paddle.nn.functional.thresholded_relu, paddle.stanh, paddle.nn.functional.celu, paddle.nn.functional.selu, paddle.nn.functional.mish, paddle.nn.functional.silu, paddle.nn.functional.tanh, paddle.nn.functional.dropout, paddle.cosh, paddle.sinh, paddle.abs, paddle.acos, paddle.asin, paddle.atan, paddle.ceil, paddle.cos, paddle.exp, paddle.floor, paddle.log, paddle.log1p, paddle.reciprocal, paddle.round, paddle.sin, paddle.sqrt, paddle.square, paddle.tanh, paddle.acosh, paddle.asinh, paddle.atanh, paddle.expm1, paddle.log10, paddle.log2, paddle.tan, paddle.erf, paddle.erfinv, paddle.rsqrt, paddle.sign, paddle.deg2rad, paddle.rad2deg, paddle.neg, paddle.logit, paddle.trunc, paddle.digamma, paddle.lgamma, paddle.poisson, paddle.bernoulli, ] inplace_api_list = [ paddle.nn.functional.relu_, paddle.nn.functional.tanh_, ] # Use to test zero-dim in unary API. class TestUnaryAPI(unittest.TestCase): def test_dygraph_unary(self): paddle.disable_static() for api in unary_api_list: x = paddle.rand([]) x.stop_gradient = False out = api(x) out.backward() self.assertEqual(x.shape, []) self.assertEqual(out.shape, []) if x.grad is not None: self.assertEqual(x.grad.shape, []) self.assertEqual(out.grad.shape, []) for api in inplace_api_list: x = paddle.rand([]) out = api(x) self.assertEqual(x.shape, []) self.assertEqual(out.shape, []) paddle.enable_static() def test_static_unary(self): paddle.enable_static() for api in unary_api_list: main_prog = fluid.Program() with fluid.program_guard(main_prog, fluid.Program()): x = paddle.rand([]) x.stop_gradient = False out = api(x) paddle.static.append_backward(out) # Test compile shape self.assertEqual(x.shape, ()) self.assertEqual(out.shape, ()) fetch_list = [x, out] # TODO(zhouwei): ScaleLossGradOp / append_backward set grad shape to [1] # will change to [] after kernel is fixed prog = paddle.static.default_main_program() block = prog.global_block() if block.has_var(fluid.framework.grad_var_name(x.name)): out_grad = block.var( fluid.framework.grad_var_name(out.name) ) fetch_list.append(out_grad) self.assertEqual(out_grad.shape, ()) # Test runtime shape exe = fluid.Executor() result = exe.run(main_prog, fetch_list=fetch_list) self.assertEqual(result[0].shape, ()) self.assertEqual(result[1].shape, ()) if len(result) == 3: # TODO(zhouwei): will change to [] after kernel is fixed self.assertEqual(result[2].shape, (1,)) # 0D will be stacked when 1+ place, due to it cannot be concated # for 1 place: [ x-place1 ] # for 1+ place: [ paddle.stack([x-place1, x_place2...]) ] if paddle.device.is_compiled_with_cuda(): places = [paddle.CUDAPlace(0)] device_num = 1 expect_shape = () else: places = [paddle.CPUPlace()] * 4 device_num = 4 expect_shape = (device_num,) compiled_program = fluid.CompiledProgram( main_prog ).with_data_parallel(out.name, places=places) result = exe.run( compiled_program, fetch_list=fetch_list, return_merged=True, ) # Test runtime parallel shape self.assertEqual(result[0].shape, expect_shape) self.assertEqual(result[1].shape, expect_shape) if len(result) == 3: self.assertEqual(result[2].shape, (device_num,)) compiled_program = fluid.CompiledProgram( main_prog ).with_data_parallel(out.name, places=places) result = exe.run( compiled_program, fetch_list=fetch_list, return_merged=False, ) # [[x-place1, x-place2, ...], [], [], ...] self.assertEqual(np.array(result[0]).shape, (device_num,)) self.assertEqual(np.array(result[1]).shape, (device_num,)) if len(result) == 3: self.assertEqual(np.array(result[2]).shape, (device_num, 1)) paddle.disable_static() reduce_api_list = [ paddle.sum, paddle.mean, paddle.nansum, paddle.nanmean, paddle.min, paddle.max, paddle.amin, paddle.amax, paddle.prod, paddle.logsumexp, paddle.all, paddle.any, ] # Use to test zero-dim of reduce API class TestReduceAPI(unittest.TestCase): def test_dygraph(self): paddle.disable_static() for api in reduce_api_list: if api in [paddle.all, paddle.any]: x = paddle.randint(0, 2, []).astype('bool') out = api(x, None) self.assertEqual(x.shape, []) self.assertEqual(out.shape, []) else: x = paddle.rand([]) x.stop_gradient = False out = api(x, None) out.backward() self.assertEqual(x.shape, []) self.assertEqual(x.grad.shape, []) self.assertEqual(out.shape, []) self.assertEqual(out.grad.shape, []) paddle.enable_static() def test_static(self): paddle.enable_static() for api in reduce_api_list: main_prog = fluid.Program() with fluid.program_guard(main_prog, fluid.Program()): if api in [paddle.all, paddle.any]: x = paddle.randint(0, 2, []).astype('bool') else: x = paddle.rand([]) x.stop_gradient = False out = api(x, None) # Test compile shape, grad is always [1] self.assertEqual(x.shape, ()) self.assertEqual(out.shape, ()) exe = fluid.Executor() result = exe.run(main_prog, fetch_list=[x, out]) # Test runtime shape self.assertEqual(result[0].shape, ()) self.assertEqual(result[1].shape, ()) paddle.disable_static() binary_api_list = [ {'func': paddle.add, 'cls_method': '__add__'}, {'func': paddle.subtract, 'cls_method': '__sub__'}, {'func': paddle.multiply, 'cls_method': '__mul__'}, {'func': paddle.divide, 'cls_method': '__div__'}, {'func': paddle.pow, 'cls_method': '__pow__'}, {'func': paddle.equal, 'cls_method': '__eq__'}, {'func': paddle.not_equal, 'cls_method': '__ne__'}, {'func': paddle.greater_equal, 'cls_method': '__ge__'}, {'func': paddle.greater_than, 'cls_method': '__gt__'}, {'func': paddle.less_equal, 'cls_method': '__le__'}, {'func': paddle.less_than, 'cls_method': '__lt__'}, {'func': paddle.remainder, 'cls_method': '__mod__'}, paddle.mod, paddle.floor_mod, paddle.logical_and, paddle.logical_or, paddle.logical_xor, ] binary_int_api_list = [ paddle.bitwise_and, paddle.bitwise_or, paddle.bitwise_xor, ] # Use to test zero-dim of binary API class TestBinaryAPI(unittest.TestCase): def test_dygraph_binary(self): paddle.disable_static() for api in binary_api_list: # 1) x/y is 0D x = paddle.rand([]) y = paddle.rand([]) x.stop_gradient = False y.stop_gradient = False if isinstance(api, dict): out = api['func'](x, y) out_cls = getattr(paddle.Tensor, api['cls_method'])(x, y) np.testing.assert_array_equal(out_cls.numpy(), out.numpy()) else: out = api(x, y) self.assertEqual(out.shape, []) out.backward() if x.grad is not None: self.assertEqual(x.grad.shape, []) self.assertEqual(y.grad.shape, []) self.assertEqual(out.grad.shape, []) # 2) x is not 0D , y is 0D x = paddle.rand([2, 3, 4]) y = paddle.rand([]) x.stop_gradient = False y.stop_gradient = False if isinstance(api, dict): out = api['func'](x, y) out_cls = getattr(paddle.Tensor, api['cls_method'])(x, y) np.testing.assert_array_equal(out_cls.numpy(), out.numpy()) else: out = api(x, y) self.assertEqual(out.shape, [2, 3, 4]) out.backward() if x.grad is not None: self.assertEqual(x.grad.shape, [2, 3, 4]) self.assertEqual(y.grad.shape, []) self.assertEqual(out.grad.shape, [2, 3, 4]) # 3) x is 0D , y is not 0D x = paddle.rand([]) y = paddle.rand([2, 3, 4]) x.stop_gradient = False y.stop_gradient = False if isinstance(api, dict): out = api['func'](x, y) out_cls = getattr(paddle.Tensor, api['cls_method'])(x, y) np.testing.assert_array_equal(out_cls.numpy(), out.numpy()) else: out = api(x, y) self.assertEqual(out.shape, [2, 3, 4]) out.backward() if x.grad is not None: self.assertEqual(x.grad.shape, []) self.assertEqual(y.grad.shape, [2, 3, 4]) self.assertEqual(out.grad.shape, [2, 3, 4]) # 4) x is 0D , y is scalar x = paddle.rand([]) y = 0.5 x.stop_gradient = False if isinstance(api, dict): out = getattr(paddle.Tensor, api['cls_method'])(x, y) self.assertEqual(out.shape, []) for api in binary_int_api_list: # 1) x/y is 0D x = paddle.randint(-10, 10, []) y = paddle.randint(-10, 10, []) out = api(x, y) self.assertEqual(out.shape, []) # 2) x is not 0D , y is 0D x = paddle.randint(-10, 10, [3, 5]) y = paddle.randint(-10, 10, []) out = api(x, y) self.assertEqual(out.shape, [3, 5]) # 3) x is 0D , y is not 0D x = paddle.randint(-10, 10, []) y = paddle.randint(-10, 10, [3, 5]) out = api(x, y) self.assertEqual(out.shape, [3, 5]) paddle.enable_static() def test_static_binary(self): paddle.enable_static() for api in binary_api_list: main_prog = fluid.Program() with fluid.program_guard(main_prog, fluid.Program()): # 1) x/y is 0D x = paddle.rand([]) y = paddle.rand([]) x.stop_gradient = False y.stop_gradient = False if isinstance(api, dict): out = api['func'](x, y) out_cls = getattr( paddle.static.Variable, api['cls_method'] )(x, y) self.assertEqual(out.shape, out_cls.shape) else: out = api(x, y) paddle.static.append_backward(out) self.assertEqual(out.shape, ()) exe = fluid.Executor() result = exe.run(main_prog, fetch_list=[out]) self.assertEqual(result[0].shape, ()) # TODO: will open when create_scalar is [] # 2) x is 0D , y is scalar ''' x = paddle.rand([]) y = 0.5 x.stop_gradient = False print(api) if isinstance(api, dict): out = getattr(paddle.static.Variable, api['cls_method'])( x, y ) self.assertEqual(out.shape, ()) ''' for api in binary_int_api_list: main_prog = fluid.Program() with fluid.program_guard(main_prog, fluid.Program()): # 1) x/y is 0D x = paddle.randint(-10, 10, []) y = paddle.randint(-10, 10, []) out = api(x, y) self.assertEqual(out.shape, ()) # 2) x is not 0D , y is 0D x = paddle.randint(-10, 10, [3, 5]) y = paddle.randint(-10, 10, []) out = api(x, y) self.assertEqual(out.shape, (3, 5)) # 3) x is 0D , y is not 0D x = paddle.randint(-10, 10, []) y = paddle.randint(-10, 10, [3, 5]) out = api(x, y) self.assertEqual(out.shape, (3, 5)) paddle.disable_static() # Use to test zero-dim of Sundry API, which is unique and can not be classified # with others. It can be implemented here flexibly. class TestSundryAPI(unittest.TestCase): def setUp(self): paddle.disable_static() self.x = paddle.rand([]) def test_flip(self): x = paddle.rand([]) x.stop_gradient = False out = paddle.flip(x, axis=[]) out.backward() self.assertEqual(x.shape, []) self.assertEqual(out.shape, []) self.assertEqual(out.grad.shape, []) def test_linear(self): x = paddle.randn([3, 2]) w = paddle.full(shape=[2, 4], fill_value=0.5) b = paddle.zeros([]) np.testing.assert_array_equal( F.linear(x, w, b).numpy(), F.linear(x, w).numpy() ) def test_is_complex(self): x = paddle.rand([]) + 1j * paddle.rand([]) self.assertTrue(paddle.is_complex(x)) def test_is_floating_point(self): self.assertTrue(paddle.is_floating_point(self.x)) def test_is_integer(self): x = paddle.randint(0, 10, []) self.assertTrue(paddle.is_integer(x)) def test_is_tensor(self): self.assertTrue(paddle.is_tensor(self.x)) def test_is_empty(self): x = paddle.rand([3, 0, 5]) self.assertTrue(paddle.is_empty(x)) def test_isfinite(self): out = paddle.isfinite(self.x) np.testing.assert_array_equal(out.numpy(), np.array(True)) def test_isinf(self): x = paddle.to_tensor(np.array(float('-inf'))) out = paddle.isinf(x) np.testing.assert_array_equal(out.numpy(), np.array(True)) def test_isnan(self): x = paddle.to_tensor(np.array(float('nan'))) out = paddle.isnan(x) np.testing.assert_array_equal(out.numpy(), np.array(True)) def test_isclose(self): out = paddle.isclose(self.x, self.x) np.testing.assert_array_equal(out.numpy(), np.array(True)) def test_clone(self): out = paddle.clone(self.x) np.testing.assert_array_equal(out.numpy(), self.x.numpy()) def test_assign(self): out = paddle.assign(self.x) np.testing.assert_array_equal(out.numpy(), self.x.numpy()) def test_item(self): x = paddle.full([], 0.5) self.assertEqual(x.item(), 0.5) def test_tolist(self): x = paddle.full([], 0.5) self.assertEqual(x.tolist(), 0.5) def test_numpy(self): x = paddle.full([], 0.5) np.testing.assert_array_equal(x.numpy(), np.array(0.5)) def test_numel(self): out = paddle.numel(self.x) self.assertEqual(out.shape, []) np.testing.assert_array_equal(out.numpy(), np.array(1)) def test_rank(self): out = paddle.rank(self.x) self.assertEqual(out.shape, []) np.testing.assert_array_equal(out.numpy(), np.array(0)) def test_shape(self): out = paddle.shape(self.x) self.assertEqual(out.shape, [0]) np.testing.assert_array_equal(out.numpy(), np.array([])) def test_pow_factor(self): x = paddle.rand([]) x.stop_gradient = False out = paddle.pow(x, 2.0) out.backward() self.assertEqual(out.shape, []) self.assertEqual(out.grad.shape, []) self.assertEqual(x.grad.shape, []) def test_cast(self): x = paddle.full([], 1.0, 'float32') x.stop_gradient = False out = paddle.cast(x, 'int32') out.backward() self.assertEqual(out.shape, []) self.assertEqual(out.grad.shape, []) self.assertEqual(x.grad.shape, []) def test_cumprod(self): x = paddle.full([], 1.0, 'float32') x.stop_gradient = False out = paddle.cumprod(x, 0) out.backward() with self.assertRaises(ValueError): tmp = paddle.cumprod(x, 2) self.assertEqual(out.shape, []) self.assertEqual(out.grad.shape, []) self.assertEqual(x.grad.shape, []) def test_clip(self): x = paddle.uniform([], None, -10, 10) x.stop_gradient = False out = paddle.clip(x, -5, 5) out.backward() self.assertEqual(out.shape, []) self.assertEqual(out.grad.shape, []) self.assertEqual(x.grad.shape, []) def test_increment(self): x = paddle.rand([]) x.stop_gradient = False out = paddle.increment(x, 1.0) out.backward() self.assertEqual(out.shape, []) self.assertEqual(out.grad.shape, []) self.assertEqual(x.grad.shape, []) def test_bitwise_not(self): x = paddle.randint(-1, 1, []) out1 = ~x out2 = paddle.bitwise_not(x) self.assertEqual(out1.shape, []) self.assertEqual(out2.shape, []) def test_logical_not(self): x = paddle.randint(0, 1, []) out = paddle.logical_not(x) self.assertEqual(out.shape, []) def test_searchsorted(self): x = paddle.to_tensor([1, 3, 5, 7, 9]) y = paddle.rand([]) # only has forward kernel out = paddle.searchsorted(x, y) self.assertEqual(out.shape, []) self.assertEqual(out.numpy(), 0) def test_gather_1D(self): x = paddle.to_tensor([1.0, 3.0, 5.0, 7.0, 9.0], stop_gradient=False) index = paddle.full([], 2, 'int64') out = paddle.gather(x, index) out.backward() self.assertEqual(out.shape, []) self.assertEqual(out.numpy(), 5) self.assertEqual(out.grad.shape, []) def test_gather_xD_axis_0(self): x = paddle.to_tensor( [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], stop_gradient=False ) index = paddle.full([], 1, 'int64') out = paddle.gather(x, index) out.backward() self.assertEqual(out.shape, [3]) for i in range(3): self.assertEqual(out.numpy()[i], x.numpy()[1][i]) self.assertEqual(out.grad.shape, [3]) def test_gather_xD_axis_1(self): x = paddle.to_tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) index = paddle.full([], 1, 'int64') out = paddle.gather(x, index, axis=1) self.assertEqual(out.shape, [2]) for i in range(2): self.assertEqual(out.numpy()[i], x.numpy()[i][1]) def test_scatter_1D(self): x = paddle.to_tensor([1.0, 3.0, 5.0, 7.0, 9.0], stop_gradient=False) index = paddle.full([], 2, 'int64') updates = paddle.full([], 4.0) out = paddle.scatter(x, index, updates) out.backward() self.assertEqual(out.grad.shape, [5]) self.assertEqual(out.numpy()[2], 4) def test_scatter_XD(self): x = paddle.to_tensor( [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], stop_gradient=False ) index = paddle.full([], 1, 'int64') updates = paddle.to_tensor([1.0, 2.0, 3.0]) out = paddle.scatter(x, index, updates) out.backward() for i in range(3): self.assertEqual(out.numpy()[1][i], updates.numpy()[i]) self.assertEqual(out.grad.shape, [2, 3]) def test_diagflat(self): x1 = paddle.rand([]) x2 = paddle.rand([]) x3 = paddle.rand([]) x1.stop_gradient = False x2.stop_gradient = False x3.stop_gradient = False out1 = paddle.diagflat(x1, 1) out2 = paddle.diagflat(x2, -1) out3 = paddle.diagflat(x3, 0) out1.backward() out2.backward() out3.backward() self.assertEqual(out1.shape, [2, 2]) self.assertEqual(out2.shape, [2, 2]) self.assertEqual(out3.shape, [1, 1]) self.assertEqual(out1.grad.shape, [2, 2]) self.assertEqual(out2.grad.shape, [2, 2]) self.assertEqual(out3.grad.shape, [1, 1]) self.assertEqual(x1.grad.shape, []) self.assertEqual(x2.grad.shape, []) self.assertEqual(x3.grad.shape, []) def test_scatter__1D(self): x = paddle.to_tensor([1.0, 3.0, 5.0, 7.0, 9.0]) index = paddle.full([], 2, 'int64') updates = paddle.full([], 4.0) out = paddle.scatter_(x, index, updates) self.assertEqual(out.numpy()[2], 4) def test_scatter__XD(self): x = paddle.to_tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) index = paddle.full([], 1, 'int64') updates = paddle.to_tensor([1.0, 2.0, 3.0]) out = paddle.scatter_(x, index, updates) for i in range(3): self.assertEqual(out.numpy()[1][i], updates.numpy()[i]) def test_scatter_nd(self): index = paddle.to_tensor([3], dtype="int64", stop_gradient=False) updates = paddle.full([], 2, dtype='float32') updates.stop_gradient = False shape = [5] out = paddle.scatter_nd(index, updates, shape) out.backward() self.assertEqual(out.shape, [5]) self.assertEqual(out.numpy()[3], 2) self.assertEqual(out.grad.shape, [5]) def test_kthvalue(self): places = ['cpu'] if paddle.is_compiled_with_cuda(): places.append('gpu') for place in places: paddle.set_device(place) x = paddle.randn(()) x.stop_gradient = False out = paddle.kthvalue(x, 1) out[0].backward() # check shape of output value and indice self.assertEqual(out[0].shape, []) self.assertEqual(out[1].shape, []) # check grad shape and value self.assertEqual(x.grad.shape, []) self.assertTrue(x.grad.numpy() == 1) def test_mode(self): places = ['cpu'] if paddle.is_compiled_with_cuda(): places.append('gpu') for place in places: paddle.set_device(place) x = paddle.randn(()) x.stop_gradient = False out = paddle.mode(x) out[0].backward() # check shape of output value and indice self.assertEqual(out[0].shape, []) self.assertEqual(out[1].shape, []) # check grad shape and value self.assertEqual(x.grad.shape, []) self.assertTrue(x.grad.numpy() == 1) def test_flatten(self): x = paddle.rand([]) x.stop_gradient = False start_axis = 0 stop_axis = -1 out = paddle.flatten(x, start_axis=start_axis, stop_axis=stop_axis) out.backward() self.assertEqual(out.shape, [1]) self.assertEqual(x.grad.shape, []) def test_scale(self): x = paddle.rand([]) x.stop_gradient = False out = paddle.scale(x, scale=2.0, bias=1.0) out.backward() self.assertEqual(out.shape, []) self.assertEqual(out.grad.shape, []) self.assertEqual(x.grad.shape, []) def test_floor_divide(self): # 1-d // 0-d x = paddle.to_tensor([1, -2, 3], dtype="int64") y = paddle.full([], 2, dtype='int64') out1_1 = paddle.floor_divide(x, y) out1_2 = paddle.Tensor.__floordiv__(x, y) np.testing.assert_array_equal(out1_1.numpy(), out1_2.numpy()) np.testing.assert_array_equal(out1_1.numpy(), np.asarray([0, -1, 1])) # 0-d // 1-d out2_1 = paddle.floor_divide(y, x) out2_2 = paddle.Tensor.__floordiv__(y, x) np.testing.assert_array_equal(out2_1.numpy(), out2_2.numpy()) np.testing.assert_array_equal(out2_2.numpy(), np.asarray([2, -1, 0])) # 0-d // 0-d x = paddle.full([], 3, dtype='int64') out3_1 = paddle.floor_divide(x, y) out3_2 = paddle.Tensor.__floordiv__(x, y) np.testing.assert_array_equal(out3_1.numpy(), out3_2.numpy()) np.testing.assert_array_equal(out3_2.numpy(), np.asarray(1)) def test_reshape_list(self): x = paddle.rand([]) x.stop_gradient = False out = paddle.reshape(x, []) out.backward() self.assertEqual(x.grad.shape, []) self.assertEqual(out.shape, []) self.assertEqual(out.grad.shape, []) out = paddle.reshape(x, [1]) out.backward() self.assertEqual(x.grad.shape, []) self.assertEqual(out.shape, [1]) self.assertEqual(out.grad.shape, [1]) out = paddle.reshape(x, [-1]) out.backward() self.assertEqual(x.grad.shape, []) self.assertEqual(out.shape, [1]) self.assertEqual(out.grad.shape, [1]) out = paddle.reshape(x, [-1, 1]) out.backward() self.assertEqual(x.grad.shape, []) self.assertEqual(out.shape, [1, 1]) self.assertEqual(out.grad.shape, [1, 1]) def test_reshape_tensor(self): x = paddle.rand([1, 1]) x.stop_gradient = False out = paddle.reshape(x, []) out.backward() self.assertEqual(x.grad.shape, [1, 1]) self.assertEqual(out.shape, []) self.assertEqual(out.grad.shape, []) new_shape = paddle.full([1], 1, "int32") out = paddle.reshape(x, new_shape) out.backward() self.assertEqual(x.grad.shape, [1, 1]) self.assertEqual(out.shape, [1]) self.assertEqual(out.grad.shape, [1]) new_shape = paddle.full([1], -1, "int32") out = paddle.reshape(x, new_shape) out.backward() self.assertEqual(x.grad.shape, [1, 1]) self.assertEqual(out.shape, [1]) self.assertEqual(out.grad.shape, [1]) new_shape = [paddle.full([], -1, "int32"), paddle.full([], 1, "int32")] out = paddle.reshape(x, new_shape) out.backward() self.assertEqual(x.grad.shape, [1, 1]) self.assertEqual(out.shape, [1, 1]) self.assertEqual(out.grad.shape, [1, 1]) def test_reshape__list(self): x = paddle.rand([]) out = paddle.reshape_(x, []) self.assertEqual(out.shape, []) out = paddle.reshape_(x, [1]) self.assertEqual(out.shape, [1]) out = paddle.reshape_(x, [-1]) self.assertEqual(out.shape, [1]) out = paddle.reshape_(x, [-1, 1]) self.assertEqual(out.shape, [1, 1]) def test_reshape__tensor(self): x = paddle.rand([1, 1]) out = paddle.reshape_(x, []) self.assertEqual(out.shape, []) new_shape = paddle.full([1], 1, "int32") out = paddle.reshape_(x, new_shape) self.assertEqual(out.shape, [1]) new_shape = paddle.full([1], -1, "int32") out = paddle.reshape_(x, new_shape) self.assertEqual(out.shape, [1]) new_shape = [paddle.full([], -1, "int32"), paddle.full([], 1, "int32")] out = paddle.reshape_(x, new_shape) self.assertEqual(out.shape, [1, 1]) def test_reverse(self): x = paddle.rand([]) x.stop_gradient = False out = paddle.reverse(x, axis=[]) out.backward() self.assertEqual(x.shape, []) self.assertEqual(out.shape, []) self.assertEqual(out.grad.shape, []) def test_sort(self): x1 = paddle.rand([]) x2 = paddle.rand([]) x1.stop_gradient = False x2.stop_gradient = False out1 = paddle.sort(x1, axis=-1) out2 = paddle.sort(x2, axis=0) out1.backward() out2.backward() self.assertEqual(out1.shape, []) self.assertEqual(out2.shape, []) self.assertEqual(out1.numpy(), x1.numpy()) self.assertEqual(out2.numpy(), x2.numpy()) self.assertEqual(out1.grad.shape, []) self.assertEqual(out2.grad.shape, []) self.assertEqual(x1.grad.shape, []) self.assertEqual(x2.grad.shape, []) self.assertEqual(x1.grad.numpy(), 1) self.assertEqual(x2.grad.numpy(), 1) def test_argsort(self): x1 = paddle.rand([]) x2 = paddle.rand([]) x1.stop_gradient = False x2.stop_gradient = False out1 = paddle.argsort(x1, axis=-1) out2 = paddle.argsort(x2, axis=0) out1.backward() out2.backward() self.assertEqual(out1.shape, []) self.assertEqual(out2.shape, []) self.assertEqual(out1.numpy(), 0) self.assertEqual(out2.numpy(), 0) self.assertEqual(out1.grad.shape, []) self.assertEqual(out2.grad.shape, []) self.assertEqual(x1.grad.shape, []) self.assertEqual(x2.grad.shape, []) self.assertEqual(x1.grad.numpy(), 0) self.assertEqual(x2.grad.numpy(), 0) class TestSundryAPIStatic(unittest.TestCase): def setUp(self): paddle.enable_static() self.exe = paddle.static.Executor() @prog_scope() def test_flip(self): x = paddle.rand([]) x.stop_gradient = False out = paddle.flip(x, axis=[]) paddle.static.append_backward(out) program = paddle.static.default_main_program() res1, res2 = self.exe.run(program, fetch_list=[x, out]) self.assertEqual(res1.shape, ()) self.assertEqual(res2.shape, ()) @prog_scope() def test_pow_factor(self): x = paddle.rand([]) x.stop_gradient = False out = paddle.pow(x, 2.0) paddle.static.append_backward(out) prog = paddle.static.default_main_program() res = self.exe.run(prog, fetch_list=[out]) self.assertEqual(res[0].shape, ()) @prog_scope() def test_cast(self): x = paddle.full([], 1.0, 'float32') x.stop_gradient = False out = paddle.cast(x, 'int32') paddle.static.append_backward(out) prog = paddle.static.default_main_program() res = self.exe.run(prog, fetch_list=[out]) self.assertEqual(res[0].shape, ()) @prog_scope() def test_cumprod(self): x = paddle.full([], 1.0, 'float32') x.stop_gradient = False out = paddle.cumprod(x, 0) paddle.static.append_backward(out) prog = paddle.static.default_main_program() res = self.exe.run(prog, fetch_list=[out]) with self.assertRaises(ValueError): tmp = paddle.cumprod(x, 2) self.assertEqual(res[0].shape, ()) @prog_scope() def test_clip(self): x = paddle.uniform([], None, -10, 10) x.stop_gradient = False out = paddle.clip(x, -5, 5) paddle.static.append_backward(out) prog = paddle.static.default_main_program() res = self.exe.run(prog, fetch_list=[out]) self.assertEqual(res[0].shape, ()) @prog_scope() def test_increment(self): x = paddle.rand([]) x.stop_gradient = False out = paddle.increment(x, 1.0) paddle.static.append_backward(out) prog = paddle.static.default_main_program() res = self.exe.run(prog, fetch_list=[out]) self.assertEqual(res[0].shape, ()) @prog_scope() def test_bitwise_not(self): x = paddle.randint(-1, 1, []) out = paddle.bitwise_not(x) paddle.static.append_backward(out) prog = paddle.static.default_main_program() res = self.exe.run(prog, fetch_list=[out]) self.assertEqual(res[0].shape, ()) @prog_scope() def test_logical_not(self): x = paddle.randint(0, 1, []) out = paddle.logical_not(x) paddle.static.append_backward(out) prog = paddle.static.default_main_program() res = self.exe.run(prog, fetch_list=[out]) self.assertEqual(res[0].shape, ()) @prog_scope() def test_searchsorted(self): x = paddle.full([10], 1.0, 'float32') y = paddle.full([], 1.0, 'float32') out = paddle.searchsorted(x, y) prog = paddle.static.default_main_program() res = self.exe.run(prog, fetch_list=[out]) self.assertEqual(res[0].shape, ()) self.assertEqual(res[0], 0) @prog_scope() def test_gather_1D(self): x = paddle.full([10], 1.0, 'float32') index = paddle.full([], 2, 'int64') out = paddle.gather(x, index) paddle.static.append_backward(out) prog = paddle.static.default_main_program() res = self.exe.run(prog, fetch_list=[out]) self.assertEqual(res[0].shape, ()) self.assertEqual(res[0], 1) @prog_scope() def test_gather_XD_axis_0(self): x = paddle.full([2, 3], 1.0, 'float32') index = paddle.full([], 1, 'int64') out = paddle.gather(x, index) paddle.static.append_backward(out) prog = paddle.static.default_main_program() res = self.exe.run(prog, fetch_list=[out]) self.assertEqual(res[0].shape, (3,)) for i in range(3): self.assertEqual(res[0][i], 1) @prog_scope() def test_gather_XD_axis_1(self): x = paddle.full([2, 3], 1.0, 'float32') index = paddle.full([], 1, 'int64') out = paddle.gather(x, index, axis=1) prog = paddle.static.default_main_program() res = self.exe.run(prog, fetch_list=[out]) self.assertEqual(res[0].shape, (2,)) for i in range(2): self.assertEqual(res[0][i], 1) @prog_scope() def test_scatter_1D(self): x = paddle.full([10], 1.0, 'float32') index = paddle.full([], 2, 'int64') updates = paddle.full([], 4, 'float32') out = paddle.scatter(x, index, updates) paddle.static.append_backward(out) prog = paddle.static.default_main_program() res = self.exe.run(prog, fetch_list=[out]) self.assertEqual(res[0][2], 4) @prog_scope() def test_scatter_XD(self): x = paddle.full([2, 3], 1.0, 'float32') index = paddle.full([], 1, 'int64') updates = paddle.full([3], 4, 'float32') out = paddle.scatter(x, index, updates) paddle.static.append_backward(out) prog = paddle.static.default_main_program() res = self.exe.run(prog, fetch_list=[out]) for i in range(3): self.assertEqual(res[0][1][i], 4) @prog_scope() def test_diagflat(self): x1 = paddle.rand([]) out1 = paddle.diagflat(x1, 1) paddle.static.append_backward(out1) x2 = paddle.rand([]) out2 = paddle.diagflat(x2, -1) paddle.static.append_backward(out2) x3 = paddle.rand([]) out3 = paddle.diagflat(x3) paddle.static.append_backward(out3) prog = paddle.static.default_main_program() res1, res2, res3 = self.exe.run(prog, fetch_list=[out1, out2, out3]) self.assertEqual(res1.shape, (2, 2)) self.assertEqual(res2.shape, (2, 2)) self.assertEqual(res3.shape, (1, 1)) @prog_scope() def test_scatter__1D(self): x = paddle.full([10], 1.0, 'float32') index = paddle.full([], 2, 'int64') updates = paddle.full([], 4, 'float32') out = paddle.scatter_(x, index, updates) paddle.static.append_backward(out) prog = paddle.static.default_main_program() res = self.exe.run(prog, fetch_list=[out]) self.assertEqual(res[0][2], 4) @prog_scope() def test_scatter__XD(self): x = paddle.full([2, 3], 1.0, 'float32') index = paddle.full([], 1, 'int64') updates = paddle.full([3], 4, 'float32') out = paddle.scatter_(x, index, updates) paddle.static.append_backward(out) prog = paddle.static.default_main_program() res = self.exe.run(prog, fetch_list=[out]) for i in range(3): self.assertEqual(res[0][1][i], 4) @prog_scope() def test_scatter_nd(self): index = paddle.static.data(name='index', shape=[1], dtype='int64') updates = paddle.full([], 2, 'float32') shape = [5] index_data = np.array([3], dtype=np.longlong) out = paddle.scatter_nd(index, updates, shape) paddle.static.append_backward(out) prog = paddle.static.default_main_program() res = self.exe.run(prog, feed={'index': index_data}, fetch_list=[out]) self.assertEqual(res[0].shape, (5,)) self.assertEqual(res[0][3], 2) @prog_scope() def test_kthvalue(self): x = paddle.full([], 1, 'float32') out = paddle.kthvalue(x, 1) paddle.static.append_backward(out[0]) prog = paddle.static.default_main_program() res = self.exe.run(prog, fetch_list=[out]) self.assertEqual(len(res[0].shape), 0) self.assertEqual(len(res[0].shape), 0) @prog_scope() def test_mode(self): x = paddle.full([], 1, 'float32') out = paddle.mode(x) paddle.static.append_backward(out[0]) prog = paddle.static.default_main_program() res = self.exe.run(prog, fetch_list=[out]) self.assertEqual(len(res[0].shape), 0) self.assertEqual(len(res[0].shape), 0) @prog_scope() def test_flatten(self): x = paddle.full([], 1, 'float32') x.stop_gradient = False start_axis = 0 stop_axis = -1 out = paddle.flatten(x, start_axis=start_axis, stop_axis=stop_axis) paddle.static.append_backward(out) prog = paddle.static.default_main_program() res = self.exe.run(prog, feed={}, fetch_list=[out]) self.assertEqual(res[0].shape, (1,)) @prog_scope() def test_scale(self): x = paddle.rand([]) x.stop_gradient = False out = paddle.scale(x, scale=2.0, bias=1.0) paddle.static.append_backward(out) prog = paddle.static.default_main_program() res = self.exe.run(prog, fetch_list=[out]) self.assertEqual(res[0].shape, ()) @prog_scope() def test_floor_divide(self): # 1-d // 0-d x = paddle.to_tensor([1, -2, 3], dtype="int64") y = paddle.full([], 2, dtype='int64') out1_1 = paddle.floor_divide(x, y) out1_2 = x // y # 0-d // 1-d out2_1 = paddle.floor_divide(y, x) out2_2 = y // x # 0-d // 0-d x = paddle.full([], 3, dtype='int64') out3_1 = paddle.floor_divide(x, y) out3_2 = x // y prog = paddle.static.default_main_program() res = self.exe.run( prog, fetch_list=[out1_1, out1_2, out2_1, out2_2, out3_1, out3_2] ) out1_1, out1_2, out2_1, out2_2, out3_1, out3_2 = res np.testing.assert_array_equal(out1_1, out1_2) np.testing.assert_array_equal(out1_1, np.asarray([0, -1, 1])) np.testing.assert_array_equal(out2_1, out2_2) np.testing.assert_array_equal(out2_2, np.asarray([2, -1, 0])) np.testing.assert_array_equal(out3_1, out3_2) np.testing.assert_array_equal(out3_2, np.asarray(1)) @prog_scope() def test_reshape_list(self): x1 = paddle.rand([]) x2 = paddle.rand([]) x3 = paddle.rand([]) x4 = paddle.rand([]) x1.stop_gradient = False x2.stop_gradient = False x3.stop_gradient = False x4.stop_gradient = False out1 = paddle.reshape(x1, []) paddle.static.append_backward(out1) out2 = paddle.reshape(x2, [1]) paddle.static.append_backward(out2) out3 = paddle.reshape(x3, [-1]) paddle.static.append_backward(out3) out4 = paddle.reshape(x4, [-1, 1]) paddle.static.append_backward(out4) program = paddle.static.default_main_program() res1, res2, res3, res4 = self.exe.run( program, fetch_list=[out1, out2, out3, out4] ) self.assertEqual(res1.shape, ()) self.assertEqual(res2.shape, (1,)) self.assertEqual(res3.shape, (1,)) self.assertEqual(res4.shape, (1, 1)) @prog_scope() def test_reshape_tensor(self): x1 = paddle.rand([]) x2 = paddle.rand([]) x3 = paddle.rand([]) x1.stop_gradient = False x2.stop_gradient = False x3.stop_gradient = False new_shape = paddle.full([1], 1, "int32") out1 = paddle.reshape(x1, new_shape) paddle.static.append_backward(out1) new_shape = paddle.full([1], -1, "int32") out2 = paddle.reshape(x2, new_shape) paddle.static.append_backward(out2) new_shape = [paddle.full([], -1, "int32"), paddle.full([], 1, "int32")] out3 = paddle.reshape(x3, new_shape) paddle.static.append_backward(out3) program = paddle.static.default_main_program() res1, res2, res3 = self.exe.run(program, fetch_list=[out1, out2, out3]) self.assertEqual(res1.shape, (1,)) self.assertEqual(res2.shape, (1,)) self.assertEqual(res3.shape, (1, 1)) @prog_scope() def test_reverse(self): x = paddle.rand([]) x.stop_gradient = False out = paddle.reverse(x, axis=[]) paddle.static.append_backward(out) program = paddle.static.default_main_program() res1, res2 = self.exe.run(program, fetch_list=[x, out]) self.assertEqual(res1.shape, ()) self.assertEqual(res2.shape, ()) @prog_scope() def test_sort(self): x1 = paddle.rand([]) x1.stop_gradient = False out1 = paddle.sort(x1, axis=-1) paddle.static.append_backward(out1) x2 = paddle.rand([]) x2.stop_gradient = False out2 = paddle.sort(x2, axis=0) paddle.static.append_backward(out2) prog = paddle.static.default_main_program() res = self.exe.run(prog, fetch_list=[out1, out2]) self.assertEqual(res[0].shape, ()) self.assertEqual(res[1].shape, ()) @prog_scope() def test_argsort(self): x1 = paddle.rand([]) x1.stop_gradient = False out1 = paddle.argsort(x1, axis=-1) paddle.static.append_backward(out1) x2 = paddle.rand([]) x2.stop_gradient = False out2 = paddle.argsort(x2, axis=0) paddle.static.append_backward(out2) prog = paddle.static.default_main_program() res = self.exe.run(prog, fetch_list=[out1, out2]) self.assertEqual(res[0].shape, ()) self.assertEqual(res[1].shape, ()) # Use to test API whose zero-dim input tensors don't have grad and not need to test backward in OpTest. class TestNoBackwardAPI(unittest.TestCase): def setUp(self): paddle.disable_static() self.shape = [ paddle.full([], 2, 'int32'), paddle.full([], 3, 'int32'), paddle.full([], 4, 'int32'), ] def test_slice(self): starts = [paddle.full([], 1, 'int32'), paddle.full([], 1, 'int32')] ends = [paddle.full([], 3, 'int32'), paddle.full([], 3, 'int32')] x = paddle.rand([5, 3, 3]) out = paddle.slice(x, [1, 2], starts, ends) self.assertEqual(out.shape, [5, 2, 2]) def test_strided_slice(self): starts = [paddle.full([], 0, 'int32'), paddle.full([], 0, 'int32')] ends = [paddle.full([], 4, 'int32'), paddle.full([], 4, 'int32')] strides = [paddle.full([], 2, 'int32'), paddle.full([], 2, 'int32')] x = paddle.rand([5, 5, 5]) out = paddle.strided_slice(x, [1, 2], starts, ends, strides) self.assertEqual(out.shape, [5, 2, 2]) def test_linspace(self): start = paddle.full([], 1.0) stop = paddle.full([], 5.0) num = paddle.full([], 5, 'int32') out = paddle.linspace(start, stop, num) np.testing.assert_array_equal(out.numpy(), [1.0, 2.0, 3.0, 4.0, 5.0]) def test_arange(self): start = paddle.full([], 1.0) stop = paddle.full([], 6.0) step = paddle.full([], 1.0) out = paddle.arange(start, stop, step) np.testing.assert_array_equal(out.numpy(), [1.0, 2.0, 3.0, 4.0, 5.0]) def test_normal(self): mean = paddle.full([], 0.0) std = paddle.full([], 0.0) out = paddle.normal(mean, std) self.assertEqual(out.shape, []) out = paddle.normal(0.0, 1.0, []) self.assertEqual(out.shape, []) out = paddle.normal(0.0, 1.0, self.shape) self.assertEqual(out.shape, [2, 3, 4]) def test_rand(self): out = paddle.rand([]) self.assertEqual(out.shape, []) out = paddle.rand(self.shape) self.assertEqual(out.shape, [2, 3, 4]) def test_randn(self): out = paddle.randn([]) self.assertEqual(out.shape, []) out = paddle.randn(self.shape) self.assertEqual(out.shape, [2, 3, 4]) def test_randint_and_randint_like(self): out = paddle.randint(-10, 10, []) self.assertEqual(out.shape, []) out = paddle.randint_like(out, -10, 10) self.assertEqual(out.shape, []) out = paddle.randint(-10, 10, self.shape) self.assertEqual(out.shape, [2, 3, 4]) def test_standard_normal(self): out = paddle.standard_normal([]) self.assertEqual(out.shape, []) out = paddle.standard_normal(self.shape) self.assertEqual(out.shape, [2, 3, 4]) def test_uniform(self): out = paddle.uniform([]) self.assertEqual(out.shape, []) out = paddle.uniform(self.shape) self.assertEqual(out.shape, [2, 3, 4]) def test_empty_and_empty_like(self): out = paddle.empty([]) self.assertEqual(out.shape, []) out = paddle.empty_like(out) self.assertEqual(out.shape, []) out = paddle.empty(self.shape) self.assertEqual(out.shape, [2, 3, 4]) def test_full_and_full_like(self): out = paddle.full([], 0.5) self.assertEqual(out.shape, []) out = paddle.full_like(out, 0.5) self.assertEqual(out.shape, []) out = paddle.full(self.shape, 0.5) self.assertEqual(out.shape, [2, 3, 4]) def test_ones_and_ones_like(self): out = paddle.ones([]) self.assertEqual(out.shape, []) out = paddle.ones_like(out) self.assertEqual(out.shape, []) out = paddle.ones(self.shape) self.assertEqual(out.shape, [2, 3, 4]) def test_zeros_and_zeros_like(self): out = paddle.zeros([]) self.assertEqual(out.shape, []) out = paddle.zeros_like(out) self.assertEqual(out.shape, []) out = paddle.zeros(self.shape) self.assertEqual(out.shape, [2, 3, 4]) def test_embedding(self): ids = paddle.full(shape=[], fill_value=1, dtype='int64') w0 = paddle.arange(3, 9).reshape((3, 2)).astype(paddle.float32) w = paddle.to_tensor(w0, stop_gradient=False) emb = paddle.nn.functional.embedding( x=ids, weight=w, sparse=True, name="embedding" ) self.assertEqual(emb.shape, [2]) res = [5.0, 6.0] for i in range(len(res)): self.assertEqual(emb.numpy()[i], res[i]) def test_one_hot_label(self): label = paddle.full(shape=[], fill_value=2, dtype='int64') one_hot_label = paddle.nn.functional.one_hot(label, num_classes=4) self.assertEqual(one_hot_label.shape, [4]) self.assertEqual(one_hot_label.numpy()[2], 1) class TestNoBackwardAPIStatic(unittest.TestCase): def setUp(self): paddle.enable_static() self.exe = paddle.static.Executor() self.shape = [ paddle.full([], 2, 'int32'), paddle.full([], 3, 'int32'), paddle.full([], 4, 'int32'), ] def test_slice(self): starts = [paddle.full([], 1, 'int32'), paddle.full([], 1, 'int32')] ends = [paddle.full([], 3, 'int32'), paddle.full([], 3, 'int32')] x = paddle.rand([5, 3, 3]) out = paddle.slice(x, [1, 2], starts, ends) res = self.exe.run( paddle.static.default_main_program(), fetch_list=[out] )[0] self.assertEqual(res.shape, (5, 2, 2)) def test_strided_slice(self): starts = [paddle.full([], 0, 'int32'), paddle.full([], 0, 'int32')] ends = [paddle.full([], 4, 'int32'), paddle.full([], 4, 'int32')] strides = [paddle.full([], 2, 'int32'), paddle.full([], 2, 'int32')] x = paddle.rand([5, 5, 5]) out = paddle.strided_slice(x, [1, 2], starts, ends, strides) res = self.exe.run( paddle.static.default_main_program(), fetch_list=[out] )[0] self.assertEqual(res.shape, (5, 2, 2)) def test_linspace(self): start = paddle.full([], 1.0) stop = paddle.full([], 5.0) num = paddle.full([], 5, 'int32') out = paddle.linspace(start, stop, num) res = self.exe.run( paddle.static.default_main_program(), fetch_list=[out] )[0] np.testing.assert_array_equal(res, [1.0, 2.0, 3.0, 4.0, 5.0]) def test_arange(self): start = paddle.full([], 1.0) stop = paddle.full([], 6.0) step = paddle.full([], 1.0) out = paddle.arange(start, stop, step) res = self.exe.run( paddle.static.default_main_program(), fetch_list=[out] )[0] np.testing.assert_array_equal(res, [1.0, 2.0, 3.0, 4.0, 5.0]) def test_normal(self): mean = paddle.full([], 0.0) std = paddle.full([], 0.0) out1 = paddle.normal(mean, std) out2 = paddle.normal(0.0, 1.0, []) out3 = paddle.normal(0.0, 1.0, self.shape) res = self.exe.run( paddle.static.default_main_program(), fetch_list=[out1, out2, out3] ) self.assertEqual(res[0].shape, ()) self.assertEqual(res[1].shape, ()) self.assertEqual(res[2].shape, (2, 3, 4)) def test_rand(self): out1 = paddle.rand([]) out2 = paddle.rand(self.shape) res = self.exe.run( paddle.static.default_main_program(), fetch_list=[out1, out2] ) self.assertEqual(res[0].shape, ()) self.assertEqual(res[1].shape, (2, 3, 4)) def test_randn(self): out1 = paddle.randn([]) out2 = paddle.randn(self.shape) res = self.exe.run( paddle.static.default_main_program(), fetch_list=[out1, out2] ) self.assertEqual(res[0].shape, ()) self.assertEqual(res[1].shape, (2, 3, 4)) def test_randint_and_randint_like(self): out1 = paddle.randint(-10, 10, []) out2 = paddle.randint_like(out1, -10, 10) out3 = paddle.randint(-10, 10, self.shape) res = self.exe.run( paddle.static.default_main_program(), fetch_list=[out1, out2, out3] ) self.assertEqual(res[0].shape, ()) self.assertEqual(res[1].shape, ()) self.assertEqual(res[2].shape, (2, 3, 4)) def test_standard_normal(self): out1 = paddle.standard_normal([]) out2 = paddle.standard_normal(self.shape) res = self.exe.run( paddle.static.default_main_program(), fetch_list=[out1, out2] ) self.assertEqual(res[0].shape, ()) self.assertEqual(res[1].shape, (2, 3, 4)) def test_uniform(self): out1 = paddle.uniform([]) out2 = paddle.uniform(self.shape) res = self.exe.run( paddle.static.default_main_program(), fetch_list=[out1, out2] ) self.assertEqual(res[0].shape, ()) self.assertEqual(res[1].shape, (2, 3, 4)) def test_empty_and_empty_like(self): out1 = paddle.empty([]) out2 = paddle.empty_like(out1) out3 = paddle.empty(self.shape) res = self.exe.run( paddle.static.default_main_program(), fetch_list=[out1, out2, out3] ) self.assertEqual(res[0].shape, ()) self.assertEqual(res[1].shape, ()) self.assertEqual(res[2].shape, (2, 3, 4)) def test_full_and_full_like(self): out1 = paddle.full([], 0.5) out2 = paddle.full_like(out1, 0.5) out3 = paddle.full(self.shape, 0.5) out4 = paddle.full(self.shape, paddle.full([], 0.5)) res = self.exe.run( paddle.static.default_main_program(), fetch_list=[out1, out2, out3, out4], ) self.assertEqual(res[0].shape, ()) self.assertEqual(res[1].shape, ()) self.assertEqual(res[2].shape, (2, 3, 4)) self.assertEqual(res[3].shape, (2, 3, 4)) def test_ones_and_ones_like(self): out1 = paddle.ones([]) out2 = paddle.ones_like(out1) out3 = paddle.ones(self.shape) res = self.exe.run( paddle.static.default_main_program(), fetch_list=[out1, out2, out3] ) self.assertEqual(res[0].shape, ()) self.assertEqual(res[1].shape, ()) self.assertEqual(res[2].shape, (2, 3, 4)) def test_zeros_and_zeros_like(self): out1 = paddle.zeros([]) out2 = paddle.zeros_like(out1) out3 = paddle.zeros(self.shape) res = self.exe.run( paddle.static.default_main_program(), fetch_list=[out1, out2, out3] ) self.assertEqual(res[0].shape, ()) self.assertEqual(res[1].shape, ()) self.assertEqual(res[2].shape, (2, 3, 4)) def test_embedding(self): ids = paddle.full(shape=[], fill_value=1, dtype='int64') w0 = paddle.arange(3, 9).reshape((3, 2)).astype(paddle.float32) w = paddle.to_tensor(w0, stop_gradient=False) emb = paddle.nn.functional.embedding( x=ids, weight=w, sparse=True, name="embedding" ) prog = paddle.static.default_main_program() res = self.exe.run(prog, fetch_list=[emb]) self.assertEqual(res[0].shape, (2,)) result = [5.0, 6.0] for i in range(len(res)): self.assertEqual(res[0][i], result[i]) def test_static_embedding(self): ids = paddle.full(shape=[], fill_value=1, dtype='int64') emb = paddle.static.nn.embedding(ids, (20, 3)) prog = paddle.static.default_main_program() self.exe.run(paddle.fluid.default_startup_program()) res = self.exe.run(prog, fetch_list=[emb]) self.assertEqual(res[0].shape, (3,)) def test_one_hot_label(self): label = paddle.full(shape=[], fill_value=2, dtype='int64') one_hot_label = paddle.nn.functional.one_hot(label, num_classes=4) prog = paddle.static.default_main_program() self.exe.run(paddle.fluid.default_startup_program()) res = self.exe.run(prog, fetch_list=[one_hot_label]) self.assertEqual(res[0].shape, (4,)) self.assertEqual(res[0][2], 1) if __name__ == "__main__": unittest.main()