From 8bb199603ab806c233b404030f07926632684452 Mon Sep 17 00:00:00 2001 From: Bai Yifan Date: Fri, 17 Apr 2020 15:45:19 +0800 Subject: [PATCH] [Cherry-pick Release/2.0] Add addcmul, test=release/2.0 (#23894) * add addcmul, test=release/2.0 --- python/paddle/__init__.py | 2 +- .../fluid/tests/unittests/test_addcmul.py | 198 ++++++++++++++++++ python/paddle/tensor/__init__.py | 2 +- python/paddle/tensor/math.py | 53 ++++- 4 files changed, 252 insertions(+), 3 deletions(-) create mode 100644 python/paddle/fluid/tests/unittests/test_addcmul.py diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index ac7bb9d4de1..169ee014327 100644 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -142,7 +142,7 @@ from .tensor.math import logsumexp #DEFINE_ALIAS # from .tensor.math import inverse #DEFINE_ALIAS from .tensor.math import log1p #DEFINE_ALIAS # from .tensor.math import erf #DEFINE_ALIAS -# from .tensor.math import addcmul #DEFINE_ALIAS +from .tensor.math import addcmul #DEFINE_ALIAS from .tensor.math import addmm #DEFINE_ALIAS # from .tensor.attribute import rank #DEFINE_ALIAS # from .tensor.attribute import shape #DEFINE_ALIAS diff --git a/python/paddle/fluid/tests/unittests/test_addcmul.py b/python/paddle/fluid/tests/unittests/test_addcmul.py new file mode 100644 index 00000000000..981df8cbdcd --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_addcmul.py @@ -0,0 +1,198 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +import unittest +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.core as core +from paddle.fluid.op import Operator +from paddle.fluid import compiler, Program, program_guard +from op_test import OpTest, skip_check_grad_ci + + +class TestAddcmulLayer(unittest.TestCase): + def setUp(self): + self._dtype = "float64" + self.input = np.random.uniform(0.1, 1, [3, 100]).astype(self._dtype) + self.tensor1 = np.random.uniform(0.1, 1, [100]).astype(self._dtype) + self.tensor2 = np.random.uniform(0.1, 1, [3, 100]).astype(self._dtype) + + def static(self, value=1.0): + prog = fluid.Program() + with fluid.program_guard(prog): + input = fluid.data(name="input", dtype=self._dtype, shape=[3, 100]) + tensor1 = fluid.data(name="tensor1", dtype=self._dtype, shape=[100]) + tensor2 = fluid.data( + name="tensor2", dtype=self._dtype, shape=[3, 100]) + out = paddle.addcmul(input, tensor1, tensor2, value) + + exe = fluid.Executor(self._place) + return exe.run(feed={ + "input": self.input, + "tensor1": self.tensor1, + "tensor2": self.tensor2 + }, + program=prog, + fetch_list=[out])[0] + + def dynamic(self, value=1.0): + with fluid.dygraph.guard(self._place): + input = fluid.dygraph.to_variable(self.input) + tensor1 = fluid.dygraph.to_variable(self.tensor1) + tensor2 = fluid.dygraph.to_variable(self.tensor2) + out = paddle.addcmul(input, tensor1, tensor2, value) + return out.numpy() + + def numpy(self, value=1.0): + self.out = np.add(self.input, + np.multiply(self.tensor1, self.tensor2) * value) + return self.out + + def test_equal(self): + places = [] + if fluid.core.is_compiled_with_cuda(): + places.append(fluid.CUDAPlace(0)) + for place in places: + self._place = place + self.assertTrue(np.allclose(self.numpy(), self.static())) + self.assertTrue( + np.allclose( + self.numpy(value=0.9), self.dynamic(value=0.9))) + self.assertTrue( + np.allclose( + self.numpy(value=0), self.dynamic(value=0))) + + +class TestAddcmul(unittest.TestCase): + def test_addcmul(self): + program = Program() + with program_guard(program): + data_shape = [3, 64, 64] + input = fluid.data(name='in', shape=data_shape, dtype='float32') + tensor1 = fluid.data(name='t1', shape=data_shape, dtype='float32') + tensor2 = fluid.data(name='t2', shape=data_shape, dtype='float32') + + out = paddle.addcmul(input, tensor1, tensor2) + self.assertEqual(out.shape, input.shape) + + def test_addcmul_with_broadcast0(self): + program = Program() + with program_guard(program): + input = fluid.data(name='in', shape=[3, 100], dtype='float32') + tensor1 = fluid.data(name='t1', shape=[3, 100], dtype='float32') + tensor2 = fluid.data(name='t2', shape=[100], dtype='float32') + + out = paddle.addcmul(input, tensor1, tensor2) + self.assertEqual(out.shape, input.shape) + + def test_addcmul_with_broadcast1(self): + program = Program() + with program_guard(program): + input = fluid.data(name='in', shape=[4, 100], dtype='float32') + tensor1 = fluid.data(name='t1', shape=[100], dtype='float32') + tensor2 = fluid.data(name='t2', shape=[4, 100], dtype='float32') + + out = paddle.addcmul(input, tensor1, tensor2) + self.assertEqual(out.shape, input.shape) + + def test_addcmul_with_broadcast2(self): + program = Program() + with program_guard(program): + input = fluid.data(name='in', shape=[4, 100], dtype='float32') + tensor1 = fluid.data(name='t1', shape=[100], dtype='float32') + tensor2 = fluid.data(name='t2', shape=[100], dtype='float32') + + out = paddle.addcmul(input, tensor1, tensor2) + self.assertEqual(out.shape, input.shape) + + def test_addcmul_has_out(self): + program = Program() + with program_guard(program): + input = fluid.data(name='in', shape=[4, 100], dtype='float32') + tensor1 = fluid.data(name='t1', shape=[100], dtype='float32') + tensor2 = fluid.data(name='t2', shape=[100], dtype='float32') + out = fluid.data(name='out', shape=[4, 100], dtype='float32') + + out = paddle.addcmul(input, tensor1, tensor2, out=out) + self.assertEqual(out.shape, input.shape) + + +class InvalidInputTest(unittest.TestCase): + def test_error(self): + def test_invalid_input(): + program = Program() + with program_guard(program): + input = [20, 20] + tensor1 = fluid.data( + name='tensor1', shape=[20, 20], dtype='float32') + tensor2 = fluid.data( + name='tensor2', shape=[20, 20], dtype='float32') + out = paddle.addcmul(input, tensor1, tensor2) + + self.assertRaises(TypeError, test_invalid_input) + + def test_invalid_tensor1(): + program = Program() + with program_guard(program): + input = fluid.data( + name='input', shape=[20, 20], dtype='float32') + tensor1 = [20, 20] + tensor2 = fluid.data( + name='tensor2', shape=[20, 20], dtype='float32') + out = paddle.addcmul(input, tensor1, tensor2) + + self.assertRaises(TypeError, test_invalid_tensor1) + + def test_invalid_tensor2(): + program = Program() + with program_guard(program): + input = fluid.data( + name='input', shape=[20, 20], dtype='float32') + tensor1 = fluid.data( + name='tensor1', shape=[20, 20], dtype='float32') + tensor2 = [20, 20] + out = paddle.addcmul(input, tensor1, tensor2) + + self.assertRaises(TypeError, test_invalid_tensor2) + + def test_invalid_value_int(): + program = Program() + with program_guard(program): + input = fluid.data( + name='input', shape=[20, 20], dtype='float32') + tensor1 = fluid.data( + name='tensor1', shape=[20, 20], dtype='float32') + tensor2 = fluid.data( + name='tensor2', shape=[20, 20], dtype='float32') + out = paddle.addcmul(input, tensor1, tensor2, value=1) + + self.assertRaises(TypeError, test_invalid_value_int) + + def test_invalid_value_float(): + program = Program() + with program_guard(program): + input = fluid.data(name='input', shape=[20, 20], dtype='int32') + tensor1 = fluid.data( + name='tensor1', shape=[20, 20], dtype='int32') + tensor2 = fluid.data( + name='tensor2', shape=[20, 20], dtype='int32') + out = paddle.addcmul(input, tensor1, tensor2, value=1.0) + + self.assertRaises(TypeError, test_invalid_value_float) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/tensor/__init__.py b/python/paddle/tensor/__init__.py index 6d1f2997bce..04ee0215de9 100644 --- a/python/paddle/tensor/__init__.py +++ b/python/paddle/tensor/__init__.py @@ -121,7 +121,7 @@ from .math import logsumexp #DEFINE_ALIAS # from .math import inverse #DEFINE_ALIAS from .math import log1p #DEFINE_ALIAS # from .math import erf #DEFINE_ALIAS -# from .math import addcmul #DEFINE_ALIAS +from .math import addcmul #DEFINE_ALIAS from .math import addmm #DEFINE_ALIAS # from .attribute import rank #DEFINE_ALIAS # from .attribute import shape #DEFINE_ALIAS diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index d503f953879..c18d6304216 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -75,7 +75,7 @@ __all__ = [ # 'inverse', 'log1p', # 'erf', -# 'addcmul', + 'addcmul', 'addmm' ] # yapf: enable. @@ -1255,3 +1255,54 @@ def log1p(x, out=None, name=None): out = helper.create_variable_for_type_inference(dtype) helper.append_op(type="log1p", inputs={"X": x}, outputs={"Out": out}) return out + + +def addcmul(input, tensor1, tensor2, value=1.0, out=None, name=None): + """ + Calculate the element-wise multiplication of tensor1 and tensor2, + then multiply the result by value, and add it to input. The shape of input, + tensor1, tensor2 should be broadcastable. + The equation is: + + .. math:: + out = input + value * tensor1 * tensor2 + + Args: + input(Variable): The input to be added. A Tensor with type float32, float64, int32, int64. + tensor1(Variable): The tensor to be multiplied. A Tensor with type float32, float64, int32, int64. + tensor2(Variable): The tensor to be multiplied. A Tensor with type float32, float64, int32, int64. + value(int|float): The multiplier for tensor1*tensor2. For float32 and float64 type input, value must be float, otherwise an integer. + out(Variable, Optional): The variable that specifies the output of the + operator, which can be Variable that has been created in the + program. The default value is None, and a new Variable will be + created to save the output. Default: None. + name(str, Optional): For details, please refer to :ref:`api_guide_Name`. + Generally, no setting is required. Default: None. + + Returns: + out(Variable): The output result. A Tensor with the same data type as input's. + + Examples: + .. code-block:: python + + import paddle + import paddle.fluid as fluid + input = fluid.data(name='input', dtype='float32', shape=[3, 4]) + tensor1 = fluid.data(name='tenosr1', dtype='float32', shape=[1, 4]) + tensor2 = fluid.data(name='tensor2', dtype='float32', shape=[3, 4]) + data = paddle.addcmul(input, tensor1, tensor2, value=1.0) + """ + + check_variable_and_dtype(input, 'input', ['float32', 'float64', 'int32', 'int64'], 'addcmul') + check_variable_and_dtype(tensor1, 'tensor1', ['float32', 'float64', 'int32', 'int64'], 'addcmul') + check_variable_and_dtype(tensor2, 'tensor2', ['float32', 'float64', 'int32', 'int64'], 'addcmul') + if convert_dtype(input.dtype) in ['float32', 'float64']: + check_type(value, 'value', float, 'addcmul') + if convert_dtype(input.dtype) in ['int32', 'int64']: + check_type(value, 'value', int, 'addcmul') + + if out is not None: + layers.assign(layers.elementwise_add(input, layers.elementwise_mul(tensor1, tensor2) * value), out) + else: + out = layers.elementwise_add(input, layers.elementwise_mul(tensor1, tensor2) * value) + return out -- GitLab