diff --git a/python/paddle/distribution/transform.py b/python/paddle/distribution/transform.py index db55eca2d1a71c837ec3e94bf4eb4145d693c96e..40cd4b6627b387b36334f964577d200495244d2a 100644 --- a/python/paddle/distribution/transform.py +++ b/python/paddle/distribution/transform.py @@ -13,9 +13,7 @@ # limitations under the License. import enum -import functools import math -import operator import typing import paddle @@ -401,7 +399,7 @@ class AbsTransform(Transform): return -y, y def _inverse_log_det_jacobian(self, y): - zero = paddle.zeros([1], dtype=y.dtype) + zero = paddle.zeros([], dtype=y.dtype) return zero, zero @property @@ -872,12 +870,16 @@ class ReshapeTransform(Transform): f"Squence[int], but got 'in_event_shape': {in_event_shape}, " f"'out_event_shape': {out_event_shape}" ) - if functools.reduce(operator.mul, in_event_shape) != functools.reduce( - operator.mul, out_event_shape - ): + in_size = 1 + for e in in_event_shape: + in_size *= e + out_size = 1 + for e in out_event_shape: + out_size *= e + if in_size != out_size: raise ValueError( f"The numel of 'in_event_shape' should be 'out_event_shape', " - f"but got {functools.reduce(operator.mul, in_event_shape)}!={functools.reduce(operator.mul, out_event_shape)}" + f"but got {in_size}!={out_size}" ) self._in_event_shape = tuple(in_event_shape) @@ -917,7 +919,9 @@ class ReshapeTransform(Transform): raise ValueError( f"Expected length of 'shape' is not less than {len(self._in_event_shape)}, but got {len(shape)}" ) - if shape[-len(self._in_event_shape) :] != self._in_event_shape: + if tuple(shape[-len(self._in_event_shape) :]) != tuple( + self._in_event_shape + ): raise ValueError( f"Event shape mismatch, expected: {self._in_event_shape}, but got {shape[-len(self._in_event_shape):]}" ) @@ -930,7 +934,9 @@ class ReshapeTransform(Transform): raise ValueError( f"Expected 'shape' length is not less than {len(self._out_event_shape)}, but got {len(shape)}" ) - if shape[-len(self._out_event_shape) :] != self._out_event_shape: + if tuple(shape[-len(self._out_event_shape) :]) != tuple( + self._out_event_shape + ): raise ValueError( f"Event shape mismatch, expected: {self._out_event_shape}, but got {shape[-len(self._out_event_shape):]}" ) @@ -939,7 +945,7 @@ class ReshapeTransform(Transform): ) def _forward_log_det_jacobian(self, x): - # paddle.zeros not support zero dimension Tensor. + # TODO(zhouwei): should not set shape to [1], which is [] shape = x.shape[: x.dim() - len(self._in_event_shape)] or [1] return paddle.zeros(shape, dtype=x.dtype) diff --git a/python/paddle/fluid/tests/unittests/distribution/parameterize.py b/python/paddle/fluid/tests/unittests/distribution/parameterize.py index 9c3341e34ce72038c4c076425c2c11b080046a5b..f962efc7139997679329b2759f06ddc1ecd0735c 100644 --- a/python/paddle/fluid/tests/unittests/distribution/parameterize.py +++ b/python/paddle/fluid/tests/unittests/distribution/parameterize.py @@ -103,7 +103,7 @@ def parameterize_func( frame_locals[name].__doc__ = doc_func(f, num, p) # Delete original patches to prevent new function from evaluating - # original patching object as well as re-constructed patches. + # original patching object as well as re-constrfucted patches. delete_patches_if_need(f) f.__test__ = False diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution_transform.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution_transform.py index f3f89068b351097aab49c980ab85fd0804431e85..478a449b973e78c0999a8117721bd45b838e421d 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution_transform.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution_transform.py @@ -191,6 +191,17 @@ class TestAbsTransform(unittest.TestCase): def test_inverse_shape(self, shape, expected_shape): self.assertEqual(self._t.forward_shape(shape), expected_shape) + @param.param_func([(np.array(1.0), np.array(1.0))]) + def test_zerodim(self, input, expected): + x = paddle.to_tensor(input).astype('float32') + self.assertEqual(self._t.forward(x).shape, []) + self.assertEqual(self._t.inverse(x)[0].shape, []) + self.assertEqual(self._t.inverse(x)[1].shape, []) + self.assertEqual(self._t.inverse_log_det_jacobian(x)[0].shape, []) + self.assertEqual(self._t.inverse_log_det_jacobian(x)[1].shape, []) + self.assertEqual(self._t.forward_shape(x.shape), []) + self.assertEqual(self._t.inverse_shape(x.shape), []) + @param.place(config.DEVICES) @param.param_cls( @@ -297,6 +308,18 @@ class TestAffineTransform(unittest.TestCase): np.broadcast(np.random.random(shape), self.loc, self.scale).shape, ) + @param.param_func([(np.array(1.0), np.array(1.0))]) + def test_zerodim(self, input, expected): + affine = transform.AffineTransform(paddle.zeros([]), paddle.ones([])) + + x = paddle.to_tensor(input).astype('float32') + self.assertEqual(affine.forward(x).shape, []) + self.assertEqual(affine.inverse(x).shape, []) + self.assertEqual(affine.forward_log_det_jacobian(x).shape, []) + self.assertEqual(affine.inverse_log_det_jacobian(x).shape, []) + self.assertEqual(affine.forward_shape(x.shape), ()) + self.assertEqual(affine.inverse_shape(x.shape), ()) + @param.place(config.DEVICES) class TestExpTransform(unittest.TestCase): @@ -395,6 +418,16 @@ class TestExpTransform(unittest.TestCase): def test_inverse_shape(self, shape, expected_shape): self.assertEqual(self._t.forward_shape(shape), expected_shape) + @param.param_func([(np.array(1.0), np.array(1.0))]) + def test_zerodim(self, input, expected): + x = paddle.to_tensor(input).astype('float32') + self.assertEqual(self._t.forward(x).shape, []) + self.assertEqual(self._t.inverse(x).shape, []) + self.assertEqual(self._t.forward_log_det_jacobian(x).shape, []) + self.assertEqual(self._t.inverse_log_det_jacobian(x).shape, []) + self.assertEqual(self._t.forward_shape(x.shape), []) + self.assertEqual(self._t.inverse_shape(x.shape), []) + @param.place(config.DEVICES) class TestChainTransform(unittest.TestCase): @@ -785,6 +818,18 @@ class TestPowerTransform(unittest.TestCase): def test_inverse_shape(self, shape, expected_shape): self.assertEqual(self._t.forward_shape(shape), expected_shape) + @param.param_func([(np.array(2.0), np.array(1.0))]) + def test_zerodim(self, input, expected): + power = transform.PowerTransform(paddle.full([], 2.0)) + + x = paddle.to_tensor(input).astype('float32') + self.assertEqual(power.forward(x).shape, []) + self.assertEqual(power.inverse(x).shape, []) + self.assertEqual(power.forward_log_det_jacobian(x).shape, []) + self.assertEqual(power.inverse_log_det_jacobian(x).shape, []) + self.assertEqual(power.forward_shape(x.shape), ()) + self.assertEqual(power.inverse_shape(x.shape), ()) + @param.place(config.DEVICES) class TestTanhTransform(unittest.TestCase): @@ -892,6 +937,16 @@ class TestTanhTransform(unittest.TestCase): def test_inverse_shape(self, shape, expected_shape): self.assertEqual(self._t.forward_shape(shape), expected_shape) + @param.param_func([(np.array(1.0), np.array(1.0))]) + def test_zerodim(self, input, expected): + x = paddle.to_tensor(input).astype('float32') + self.assertEqual(self._t.forward(x).shape, []) + self.assertEqual(self._t.inverse(x).shape, []) + self.assertEqual(self._t.forward_log_det_jacobian(x).shape, []) + self.assertEqual(self._t.inverse_log_det_jacobian(x).shape, []) + self.assertEqual(self._t.forward_shape(x.shape), []) + self.assertEqual(self._t.inverse_shape(x.shape), []) + @param.place(config.DEVICES) @param.param_cls( @@ -965,6 +1020,20 @@ class TestReshapeTransform(unittest.TestCase): with self.assertRaises(exc): self._t.inverse_shape(shape) + @param.param_func([(np.array(2.0), np.array(1.0))]) + def test_zerodim(self, input, expected): + reshape = transform.ReshapeTransform((), (1, 1)) + + x = paddle.to_tensor(input).astype('float32') + out = reshape.forward(x) + + self.assertEqual(out.shape, [1, 1]) + self.assertEqual(reshape.inverse(out).shape, []) + # self.assertEqual(reshape.forward_log_det_jacobian(x).shape, []) + # self.assertEqual(reshape.inverse_log_det_jacobian(out).shape, []) + self.assertEqual(reshape.forward_shape(x.shape), (1, 1)) + self.assertEqual(reshape.inverse_shape(out.shape), ()) + def _np_softplus(x, beta=1.0, threshold=20.0): if np.any(beta * x > threshold): @@ -1031,6 +1100,16 @@ class TestSigmoidTransform(unittest.TestCase): def test_inverse_shape(self, shape, expected_shape): self.assertEqual(self._t.forward_shape(shape), expected_shape) + @param.param_func([(np.array(1.0), np.array(1.0))]) + def test_zerodim(self, input, expected): + x = paddle.to_tensor(input).astype('float32') + self.assertEqual(self._t.forward(x).shape, []) + self.assertEqual(self._t.inverse(x).shape, []) + self.assertEqual(self._t.forward_log_det_jacobian(x).shape, []) + self.assertEqual(self._t.inverse_log_det_jacobian(x).shape, []) + self.assertEqual(self._t.forward_shape(x.shape), []) + self.assertEqual(self._t.inverse_shape(x.shape), []) + class TestSoftmaxTransform(unittest.TestCase): def setUp(self):