diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index 144b1920fd8a5cd66be2d8a07e49fd8598ef10c7..908e06b96e493c825537e81c09caf992bb2a4608 100755 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -187,7 +187,6 @@ from .tensor.math import logsumexp #DEFINE_ALIAS from .tensor.math import inverse #DEFINE_ALIAS from .tensor.math import log1p #DEFINE_ALIAS from .tensor.math import erf #DEFINE_ALIAS -# from .tensor.math import addcmul #DEFINE_ALIAS from .tensor.math import addmm #DEFINE_ALIAS from .tensor.math import clip #DEFINE_ALIAS from .tensor.math import trace #DEFINE_ALIAS diff --git a/python/paddle/fluid/tests/unittests/test_addcmul.py b/python/paddle/fluid/tests/unittests/test_addcmul.py deleted file mode 100644 index ed466cda3864d9007813df68f3550b1a9c8750dd..0000000000000000000000000000000000000000 --- a/python/paddle/fluid/tests/unittests/test_addcmul.py +++ /dev/null @@ -1,187 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function -import unittest -import numpy as np -import paddle -import paddle.fluid as fluid -import paddle.fluid.core as core -from paddle.fluid.op import Operator -from paddle.fluid import compiler, Program, program_guard -from op_test import OpTest, skip_check_grad_ci - - -class TestAddcmulLayer(unittest.TestCase): - def setUp(self): - self._dtype = "float64" - self.input = np.random.uniform(0.1, 1, [3, 100]).astype(self._dtype) - self.tensor1 = np.random.uniform(0.1, 1, [100]).astype(self._dtype) - self.tensor2 = np.random.uniform(0.1, 1, [3, 100]).astype(self._dtype) - - def static(self, value=1.0): - prog = fluid.Program() - with fluid.program_guard(prog): - input = fluid.data(name="input", dtype=self._dtype, shape=[3, 100]) - tensor1 = fluid.data(name="tensor1", dtype=self._dtype, shape=[100]) - tensor2 = fluid.data( - name="tensor2", dtype=self._dtype, shape=[3, 100]) - out = paddle.tensor.math.addcmul(input, tensor1, tensor2, value) - - exe = fluid.Executor(self._place) - return exe.run(feed={ - "input": self.input, - "tensor1": self.tensor1, - "tensor2": self.tensor2 - }, - program=prog, - fetch_list=[out])[0] - - def dynamic(self, value=1.0): - with fluid.dygraph.guard(self._place): - input = fluid.dygraph.to_variable(self.input) - tensor1 = fluid.dygraph.to_variable(self.tensor1) - tensor2 = fluid.dygraph.to_variable(self.tensor2) - out = paddle.tensor.math.addcmul(input, tensor1, tensor2, value) - return out.numpy() - - def numpy(self, value=1.0): - self.out = np.add(self.input, - np.multiply(self.tensor1, self.tensor2) * value) - return self.out - - def test_equal(self): - places = [] - if fluid.core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) - for place in places: - self._place = place - self.assertTrue(np.allclose(self.numpy(), self.static())) - self.assertTrue( - np.allclose( - self.numpy(value=0.9), self.dynamic(value=0.9))) - self.assertTrue( - np.allclose( - self.numpy(value=0), self.dynamic(value=0))) - - -class TestAddcmul(unittest.TestCase): - def test_addcmul(self): - program = Program() - with program_guard(program): - data_shape = [3, 64, 64] - input = fluid.data(name='in', shape=data_shape, dtype='float32') - tensor1 = fluid.data(name='t1', shape=data_shape, dtype='float32') - tensor2 = fluid.data(name='t2', shape=data_shape, dtype='float32') - - out = paddle.tensor.math.addcmul(input, tensor1, tensor2) - self.assertEqual(out.shape, input.shape) - - def test_addcmul_with_broadcast0(self): - program = Program() - with program_guard(program): - input = fluid.data(name='in', shape=[3, 100], dtype='float32') - tensor1 = fluid.data(name='t1', shape=[3, 100], dtype='float32') - tensor2 = fluid.data(name='t2', shape=[100], dtype='float32') - - out = paddle.tensor.math.addcmul(input, tensor1, tensor2) - self.assertEqual(out.shape, input.shape) - - def test_addcmul_with_broadcast1(self): - program = Program() - with program_guard(program): - input = fluid.data(name='in', shape=[4, 100], dtype='float32') - tensor1 = fluid.data(name='t1', shape=[100], dtype='float32') - tensor2 = fluid.data(name='t2', shape=[4, 100], dtype='float32') - - out = paddle.tensor.math.addcmul(input, tensor1, tensor2) - self.assertEqual(out.shape, input.shape) - - def test_addcmul_with_broadcast2(self): - program = Program() - with program_guard(program): - input = fluid.data(name='in', shape=[4, 100], dtype='float32') - tensor1 = fluid.data(name='t1', shape=[100], dtype='float32') - tensor2 = fluid.data(name='t2', shape=[100], dtype='float32') - - out = paddle.tensor.math.addcmul(input, tensor1, tensor2) - self.assertEqual(out.shape, input.shape) - - -class InvalidInputTest(unittest.TestCase): - def test_error(self): - def test_invalid_input(): - program = Program() - with program_guard(program): - input = [20, 20] - tensor1 = fluid.data( - name='tensor1', shape=[20, 20], dtype='float32') - tensor2 = fluid.data( - name='tensor2', shape=[20, 20], dtype='float32') - out = paddle.tensor.math.addcmul(input, tensor1, tensor2) - - self.assertRaises(TypeError, test_invalid_input) - - def test_invalid_tensor1(): - program = Program() - with program_guard(program): - input = fluid.data( - name='input', shape=[20, 20], dtype='float32') - tensor1 = [20, 20] - tensor2 = fluid.data( - name='tensor2', shape=[20, 20], dtype='float32') - out = paddle.tensor.math.addcmul(input, tensor1, tensor2) - - self.assertRaises(TypeError, test_invalid_tensor1) - - def test_invalid_tensor2(): - program = Program() - with program_guard(program): - input = fluid.data( - name='input', shape=[20, 20], dtype='float32') - tensor1 = fluid.data( - name='tensor1', shape=[20, 20], dtype='float32') - tensor2 = [20, 20] - out = paddle.tensor.math.addcmul(input, tensor1, tensor2) - - self.assertRaises(TypeError, test_invalid_tensor2) - - def test_invalid_value_int(): - program = Program() - with program_guard(program): - input = fluid.data( - name='input', shape=[20, 20], dtype='float32') - tensor1 = fluid.data( - name='tensor1', shape=[20, 20], dtype='float32') - tensor2 = fluid.data( - name='tensor2', shape=[20, 20], dtype='float32') - out = paddle.tensor.math.addcmul(input, tensor1, tensor2, value=1) - - self.assertRaises(TypeError, test_invalid_value_int) - - def test_invalid_value_float(): - program = Program() - with program_guard(program): - input = fluid.data(name='input', shape=[20, 20], dtype='int32') - tensor1 = fluid.data( - name='tensor1', shape=[20, 20], dtype='int32') - tensor2 = fluid.data( - name='tensor2', shape=[20, 20], dtype='int32') - out = paddle.tensor.math.addcmul(input, tensor1, tensor2, value=1.0) - - self.assertRaises(TypeError, test_invalid_value_float) - - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/tensor/__init__.py b/python/paddle/tensor/__init__.py index e045bcf515c7428452e6543cc15bc0de6da047c4..515b4024471209cb84fdf354d9167ee07aa259f6 100755 --- a/python/paddle/tensor/__init__.py +++ b/python/paddle/tensor/__init__.py @@ -156,7 +156,6 @@ from .math import log2 #DEFINE_ALIAS from .math import log10 #DEFINE_ALIAS from .math import log1p #DEFINE_ALIAS from .math import erf #DEFINE_ALIAS -# from .math import addcmul #DEFINE_ALIAS from .math import addmm #DEFINE_ALIAS from .math import clip #DEFINE_ALIAS from .math import trace #DEFINE_ALIAS diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 88af78bf993afce9cb4c32e1b7d515dbc8c875fa..80d2a4a513398ed1630014a0327efbc1d0010fe9 100755 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -117,7 +117,6 @@ __all__ = [ 'inverse', 'log1p', 'erf', - 'addcmul', 'addmm', 'clip', 'trace', @@ -283,7 +282,7 @@ def add(x, y, name=None): def subtract(x, y, name=None): """ - Substract two tensors element-wise. The equation is: + Substract two tensors element-wise. The equation is: .. math:: out = x - y @@ -302,7 +301,7 @@ def subtract(x, y, name=None): Examples: .. code-block:: python - + import numpy as np import paddle @@ -517,7 +516,7 @@ def multiply(x, y, name=None): def maximum(x, y, name=None): """ - Compare two tensors and returns a new tensor containing the element-wise maxima. The equation is: + Compare two tensors and returns a new tensor containing the element-wise maxima. The equation is: .. math:: out = max(x, y) @@ -576,7 +575,7 @@ def maximum(x, y, name=None): def minimum(x, y, name=None): """ - Compare two tensors and returns a new tensor containing the element-wise minima. The equation is: + Compare two tensors and returns a new tensor containing the element-wise minima. The equation is: .. math:: out = min(x, y) @@ -1174,7 +1173,7 @@ def max(x, axis=None, keepdim=False, name=None): print(result1) #[0.9] result2 = paddle.max(x, axis=0) - print(result2) + print(result2) #[0.2 0.3 0.6 0.9] result3 = paddle.max(x, axis=-1) print(result3) @@ -1268,7 +1267,7 @@ def min(x, axis=None, keepdim=False, name=None): print(result2) #[0.1 0.2 0.5 0.7] result3 = paddle.min(x, axis=-1) - print(result3) + print(result3) #[0.2 0.1] result4 = paddle.min(x, axis=1, keepdim=True) print(result4) @@ -1280,7 +1279,7 @@ def min(x, axis=None, keepdim=False, name=None): y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]]) result5 = paddle.min(y, axis=[1, 2]) - print(result5) + print(result5) #[1. 5.] result6 = paddle.min(y, axis=[0, 1]) print(result6) @@ -1454,50 +1453,6 @@ def log10(x, name=None): return out -def addcmul(input, tensor1, tensor2, value=1.0, name=None): - """ - - Calculate the element-wise multiplication of tensor1 and tensor2, - then multiply the result by value, and add it to input. The shape of input, - tensor1, tensor2 should be broadcastable. - The equation is: - .. math:: - - out = input + value * tensor1 * tensor2 - Args: - input(Tensor): The input to be added. A Tensor with type float32, float64, int32, int64. - tensor1(Tensor): The tensor to be multiplied. A Tensor with type float32, float64, int32, int64. - tensor2(Tensor): The tensor to be multiplied. A Tensor with type float32, float64, int32, int64. - value(int|float): The multiplier for tensor1*tensor2. For float32 and float64 type input, value must be float, otherwise an integer. - name(str, Optional): For details, please refer to :ref:`api_guide_Name`. - Generally, no setting is required. Default: None. - Returns: - out(Tensor): The output result. A Tensor with the same data type as input's. - Examples: - .. code-block:: python - - import paddle - input = paddle.ones([2,2]) - tensor1 = paddle.ones([2,2]) - tensor2 = paddle.ones([2,2]) - out = paddle.tensor.math.addcmul(input, tensor1, tensor2, value=0.5) - print(out) - # [[1.5 1.5] - # [1.5 1.5]] - """ - - check_variable_and_dtype(input, 'input', ['float32', 'float64', 'int32', 'int64'], 'addcmul') - check_variable_and_dtype(tensor1, 'tensor1', ['float32', 'float64', 'int32', 'int64'], 'addcmul') - check_variable_and_dtype(tensor2, 'tensor2', ['float32', 'float64', 'int32', 'int64'], 'addcmul') - if convert_dtype(input.dtype) in ['float32', 'float64']: - check_type(value, 'value', float, 'addcmul') - if convert_dtype(input.dtype) in ['int32', 'int64']: - check_type(value, 'value', int, 'addcmul') - - out = layers.elementwise_add(input, layers.elementwise_mul(tensor1, tensor2) * value) - return out - - def clip(x, min=None, max=None, name=None): """ This operator clip all elements in input into the range [ min, max ] and return diff --git a/tools/static_mode_white_list.py b/tools/static_mode_white_list.py index 68e58445da0365ebed38c4e2893c0e4d51049d2a..05dfc9c621ee1ea37e437932b7b87884509da2e4 100644 --- a/tools/static_mode_white_list.py +++ b/tools/static_mode_white_list.py @@ -48,7 +48,6 @@ STATIC_MODE_TESTING_LIST = [ 'test_adaptive_max_pool1d', 'test_add_position_encoding_op', 'test_add_reader_dependency', - 'test_addcmul', 'test_addmm_op', 'test_affine_grid_op', 'test_allclose_layer',