diff --git a/paddle/fluid/operators/reduce_ops/logsumexp_op.cc b/paddle/fluid/operators/reduce_ops/logsumexp_op.cc deleted file mode 100644 index 322a1637f5deec909db13f1bd0433446cd7606ae..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/reduce_ops/logsumexp_op.cc +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/fluid/operators/reduce_ops/logsumexp_op.h" -#include -#include -#include -#include - -namespace paddle { -namespace operators { - -class LogsumexpOpMaker : public ops::ReduceOpMaker { - protected: - virtual std::string GetName() const { return "logsumexp"; } - virtual std::string GetOpType() const { return "Reduce logsumexp"; } -}; - -template -class LogsumexpGradOpMaker : public framework::SingleGradOpMaker { - public: - using framework::SingleGradOpMaker::SingleGradOpMaker; - - protected: - void Apply(GradOpPtr op) const override { - op->SetType("logsumexp_grad"); - op->SetInput("X", this->Input("X")); - op->SetInput("Out", this->Output("Out")); - op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); - op->SetAttrMap(this->Attrs()); - op->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); - } -}; - -} // namespace operators -} // namespace paddle - -REGISTER_OPERATOR(logsumexp, ops::ReduceOp, ops::LogsumexpOpMaker, - ops::LogsumexpGradOpMaker, - ops::LogsumexpGradOpMaker); -REGISTER_OPERATOR(logsumexp_grad, ops::ReduceGradOp); - -REGISTER_OP_CPU_KERNEL(logsumexp, - ops::ReduceKernel, - ops::ReduceKernel); -REGISTER_OP_CPU_KERNEL( - logsumexp_grad, ops::ReduceGradKernel, - ops::ReduceGradKernel); diff --git a/paddle/fluid/operators/reduce_ops/logsumexp_op.cu b/paddle/fluid/operators/reduce_ops/logsumexp_op.cu deleted file mode 100644 index c9ad1075c0c3c1c6f405144dbfde2e81b85124aa..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/reduce_ops/logsumexp_op.cu +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/fluid/operators/reduce_ops/logsumexp_op.h" - -REGISTER_OP_CUDA_KERNEL(logsumexp, - ops::ReduceKernel, - ops::ReduceKernel); diff --git a/paddle/fluid/operators/reduce_ops/logsumexp_op.h b/paddle/fluid/operators/reduce_ops/logsumexp_op.h deleted file mode 100644 index 1d0e00262a37ff7160abd7a865e63377f8b30461..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/reduce_ops/logsumexp_op.h +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include "paddle/fluid/operators/reduce_ops/reduce_op.h" - -namespace paddle { -namespace operators { - -struct LogsumexpFunctor { - template - void operator()(const DeviceContext& place, X* x, Y* y, const Dim& dim) { - auto x_dim = x->dimensions(); - auto t_dim = x_dim; - for (int i = 0; i < static_cast(dim.size()); i++) { - t_dim[dim[i]] = 1; - } - - auto r_dim = x_dim; - for (int i = 0; i < static_cast(r_dim.size()); i++) { - r_dim[i] = 1; - } - for (int i = 0; i < static_cast(dim.size()); i++) { - r_dim[dim[i]] = x_dim[dim[i]]; - } - - auto y_dim = y->dimensions(); - auto x_max = x->maximum(dim); - y->device(place) = - (x_max + - (*x - x_max.reshape(t_dim).broadcast(r_dim)).exp().sum(dim).log()) - .reshape(y_dim); - } -}; - -struct LogsumexpGradFunctor { - template - void operator()(const DeviceContext& place, X* x, Y* y, DX* dx, DY* dy, - const Dim& dim, int size) { - dx->device(place) = dy->broadcast(dim) * (*x - y->broadcast(dim)).exp(); - } -}; - -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/reduce_ops/logsumexp_op.part.cu b/paddle/fluid/operators/reduce_ops/logsumexp_op.part.cu deleted file mode 100644 index d6ad4863092a50233b806c944db0b8c161ed9dd0..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/reduce_ops/logsumexp_op.part.cu +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// .part used to speed up nvcc compile -#include "paddle/fluid/operators/reduce_ops/logsumexp_op.h" - -REGISTER_OP_CUDA_KERNEL( - logsumexp_grad, ops::ReduceGradKernel, - ops::ReduceGradKernel); diff --git a/python/paddle/fluid/tests/unittests/test_logsumexp.py b/python/paddle/fluid/tests/unittests/test_logsumexp.py index c2201a52605bc87246fb9c8734494b19f83ff180..5aaf29a6e38b4ca374e76d3d8be29db2d46a0ec1 100644 --- a/python/paddle/fluid/tests/unittests/test_logsumexp.py +++ b/python/paddle/fluid/tests/unittests/test_logsumexp.py @@ -12,128 +12,60 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import print_function import paddle +import paddle.fluid as fluid import unittest import numpy as np from op_test import OpTest +from paddle.fluid import Program, program_guard +from paddle.fluid.layer_helper import LayerHelper -def ref_logsumexp(x, axis=None, keepdim=False, reduce_all=False): - if isinstance(axis, int): - axis = (axis, ) - elif isinstance(axis, list): - axis = tuple(axis) - if reduce_all: - axis = None - out = np.log(np.exp(x).sum(axis=axis, keepdims=keepdim)) - return out - - -class TestLogsumexp(OpTest): - def setUp(self): - self.op_type = 'logsumexp' - self.shape = [2, 3, 4, 5] - self.dtype = 'float64' - self.axis = [-1] - self.keepdim = False - self.reduce_all = False - self.set_attrs() - - np.random.seed(10) - x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) - out = ref_logsumexp(x, self.axis, self.keepdim, self.reduce_all) - - self.inputs = {'X': x} - self.outputs = {'Out': out} - self.attrs = { - 'dim': self.axis, - 'keep_dim': self.keepdim, - 'reduce_all': self.reduce_all - } - - def set_attrs(self): - pass - - def test_check_output(self): - self.check_output() - - def test_check_grad(self): - self.check_grad(['X'], ['Out']) - - -class TestLogsumexp_shape(TestLogsumexp): - def set_attrs(self): - self.shape = [4, 5, 6] - - -class TestLogsumexp_axis(TestLogsumexp): - def set_attrs(self): - self.axis = [0, -1] - - -class TestLogsumexp_axis_all(TestLogsumexp): - def set_attrs(self): - self.axis = [0, 1, 2, 3] - - -class TestLogsumexp_keepdim(TestLogsumexp): - def set_attrs(self): - self.keepdim = True - - -class TestLogsumexp_reduce_all(TestLogsumexp): - def set_attrs(self): - self.reduce_all = True - - -class TestLogsumexpError(unittest.TestCase): +class TestLogSumOpError(unittest.TestCase): def test_errors(self): - with paddle.static.program_guard(paddle.static.Program()): - self.assertRaises(TypeError, paddle.logsumexp, 1) - x1 = paddle.data(name='x1', shape=[120], dtype="int32") - self.assertRaises(TypeError, paddle.logsumexp, x1) - - -class TestLogsumexpAPI(unittest.TestCase): - def setUp(self): - self.shape = [2, 3, 4, 5] - self.x = np.random.uniform(-1, 1, self.shape).astype(np.float32) - self.place = paddle.CUDAPlace(0) if paddle.fluid.core.is_compiled_with_cuda() \ - else paddle.CPUPlace() - - def api_case(self, axis=None, keepdim=False): - out_ref = ref_logsumexp(self.x, axis, keepdim) - with paddle.static.program_guard(paddle.static.Program()): - x = paddle.data('X', self.shape) - out = paddle.logsumexp(x, axis, keepdim) - exe = paddle.static.Executor(self.place) - res = exe.run(feed={'X': self.x}, fetch_list=[out]) - self.assertTrue(np.allclose(res[0], out_ref)) - - paddle.disable_static(self.place) - x = paddle.to_variable(self.x) - out = paddle.logsumexp(x, axis, keepdim) - self.assertTrue(np.allclose(out.numpy(), out_ref)) - paddle.enable_static() - - def test_api(self): - self.api_case() - self.api_case(2) - self.api_case([-1]) - self.api_case([2, -3]) - self.api_case((0, 1, -1)) - self.api_case(keepdim=True) - - def test_alias(self): - paddle.disable_static(self.place) - x = paddle.to_variable(self.x) - out1 = paddle.logsumexp(x) - out2 = paddle.tensor.logsumexp(x) - out3 = paddle.tensor.math.logsumexp(x) - out_ref = ref_logsumexp(self.x) - for out in [out1, out2, out3]: - self.assertTrue(np.allclose(out.numpy(), out_ref)) - paddle.enable_static() + with program_guard(Program(), Program()): + + x1 = fluid.layers.data(name='x1', shape=[120], dtype="uint8") + self.assertRaises(Exception, paddle.logsumexp, x1) + + x2 = fluid.layers.data(name='x2', shape=[2, 3], dtype="int") + self.assertRaises(Exception, paddle.logsumexp, x2) + + x3 = fluid.layers.data(name='x3', shape=[3], dtype="float16") + self.assertRaises(Exception, paddle.logsumexp, x3) + + +class TestLogSumExpOp(unittest.TestCase): + def test_dygraph(self): + with fluid.dygraph.guard(): + np_x = np.random.uniform(0.1, 1, [123]).astype(np.float32) + x = fluid.dygraph.to_variable(np_x) + self.assertTrue( + np.allclose( + paddle.logsumexp(x).numpy(), np.log(np.sum(np.exp(np_x))))) + + np_x = np.random.uniform(0.1, 1, [2, 3, 4]).astype(np.float32) + x = fluid.dygraph.to_variable(np_x) + self.assertTrue( + np.allclose( + paddle.logsumexp(x, [1, 2]).numpy(), + np.log(np.sum(np.exp(np_x), axis=(1, 2))))) + + np_x = np.random.uniform(0.1, 1, [2, 3, 4]).astype(np.float32) + x = fluid.dygraph.to_variable(np_x) + self.assertTrue( + np.allclose( + paddle.logsumexp(x, [2]).numpy(), + np.log(np.sum(np.exp(np_x), axis=(2))))) + + np_x = np.random.uniform(0.1, 1, [2, 3, 4]).astype(np.float32) + x = fluid.dygraph.to_variable(np_x) + self.assertTrue( + np.allclose( + paddle.logsumexp( + x, keepdim=True).numpy(), + np.log(np.sum(np.exp(np_x), keepdims=True)))) if __name__ == '__main__': diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index ed2bbe03a366054dfe7d798310c7fa5d419b44a8..f4e90cf46a82cab7c3703a3d87150ff1ee0d0157 100755 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -86,7 +86,6 @@ __all__ = [ 'floor', 'increment', 'log', - 'logsumexp', 'mul', 'multiplex', 'pow', @@ -1177,35 +1176,24 @@ def logsumexp(x, axis=None, keepdim=False, name=None): .. code-block:: python import paddle + import numpy as np paddle.disable_static() - - x = paddle.to_tensor([[-1.5, 0., 2.], [3., 1.2, -2.4]]) + + x = np.array([[-1.5, 0., 2.], [3., 1.2, -2.4]]) + x = paddle.to_tensor(x) out1 = paddle.logsumexp(x) # [3.4691226] out2 = paddle.logsumexp(x, 1) # [2.15317821, 3.15684602] """ - if isinstance(axis, int): - axis = [axis] - reduce_all = True if axis is None \ - or len(axis)==0 \ - or len(axis) == len(x.shape) else False - if axis is None or len(axis) == 0: - axis = [0] - - if in_dygraph_mode(): - return core.ops.logsumexp(x, 'dim', axis, 'keep_dim', keepdim, - 'reduce_all', reduce_all) - - check_variable_and_dtype(x, 'x', + if not in_dygraph_mode(): + check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'logsumexp') - helper = LayerHelper('logsumexp', **locals()) - attrs = {'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all} - out = helper.create_variable_for_type_inference(x.dtype) - helper.append_op( - type='logsumexp', inputs={'X': x}, outputs={'Out': out}, attrs=attrs) + out = paddle.exp(x, name) + out = paddle.sum(out, axis=axis, keepdim=keepdim, name=name) + out = paddle.log(out, name) return out