From 5b9c62faee4c3c24a6e5c072cd1c81407aeeb237 Mon Sep 17 00:00:00 2001 From: Tao Luo Date: Wed, 14 Nov 2018 09:38:57 +0800 Subject: [PATCH] Revert "Softmax op optimization for inference " --- paddle/fluid/operators/math/softmax.cc | 6 +-- paddle/fluid/operators/math/softmax.cu | 11 ++--- paddle/fluid/operators/math/softmax.h | 2 +- paddle/fluid/operators/math/softmax_impl.h | 41 ++----------------- paddle/fluid/operators/softmax_op.h | 10 +---- .../operators/softmax_with_cross_entropy_op.h | 4 +- .../fluid/tests/unittests/test_softmax_op.py | 9 +--- 7 files changed, 15 insertions(+), 68 deletions(-) diff --git a/paddle/fluid/operators/math/softmax.cc b/paddle/fluid/operators/math/softmax.cc index fa2018178f..78c65af24a 100644 --- a/paddle/fluid/operators/math/softmax.cc +++ b/paddle/fluid/operators/math/softmax.cc @@ -19,10 +19,8 @@ namespace paddle { namespace operators { namespace math { -template class SoftmaxFunctor; -template class SoftmaxFunctor; -template class SoftmaxFunctor; -template class SoftmaxFunctor; +template class SoftmaxFunctor; +template class SoftmaxFunctor; template class SoftmaxGradFunctor; template class SoftmaxGradFunctor; diff --git a/paddle/fluid/operators/math/softmax.cu b/paddle/fluid/operators/math/softmax.cu index 2e9669049e..ce183ed364 100644 --- a/paddle/fluid/operators/math/softmax.cu +++ b/paddle/fluid/operators/math/softmax.cu @@ -98,14 +98,9 @@ template class SoftmaxGradCUDNNFunctor; template class SoftmaxGradCUDNNFunctor; template class SoftmaxGradCUDNNFunctor; -template class SoftmaxFunctor; -template class SoftmaxFunctor; -template class SoftmaxFunctor; -template class SoftmaxFunctor; -template class SoftmaxFunctor; -template class SoftmaxFunctor; +template class SoftmaxFunctor; +template class SoftmaxFunctor; +template class SoftmaxFunctor; template class SoftmaxGradFunctor; template class SoftmaxGradFunctor; template class SoftmaxGradFunctor +template class SoftmaxFunctor { public: void operator()(const DeviceContext& context, const framework::Tensor* X, diff --git a/paddle/fluid/operators/math/softmax_impl.h b/paddle/fluid/operators/math/softmax_impl.h index 7cf98f2725..dd9971ba09 100644 --- a/paddle/fluid/operators/math/softmax_impl.h +++ b/paddle/fluid/operators/math/softmax_impl.h @@ -32,10 +32,10 @@ struct ValueClip { } }; -template -void SoftmaxFunctor::operator()( - const DeviceContext& context, const framework::Tensor* X, - framework::Tensor* Y) { +template +void SoftmaxFunctor::operator()(const DeviceContext& context, + const framework::Tensor* X, + framework::Tensor* Y) { auto logits = EigenMatrix::From(*X); auto softmax = EigenMatrix::From(*Y); @@ -65,39 +65,6 @@ void SoftmaxFunctor::operator()( .broadcast(one_by_class)); } -template -class SoftmaxFunctor { - void operator()(const DeviceContext& context, const framework::Tensor* X, - framework::Tensor* Y) { - auto logits = EigenMatrix::From(*X); - auto softmax = EigenMatrix::From(*Y); - - const int kBatchDim = 0; - const int kClassDim = 1; - - const int batch_size = logits.dimension(kBatchDim); - const int num_classes = logits.dimension(kClassDim); - - Eigen::DSizes along_class(kClassDim); - Eigen::DSizes batch_by_one(batch_size, 1); - Eigen::DSizes one_by_class(1, num_classes); - - auto shifted_logits = (logits - - logits.maximum(along_class) - .eval() - .reshape(batch_by_one) - .broadcast(one_by_class)); - - softmax.device(*context.eigen_device()) = shifted_logits.exp(); - softmax.device(*context.eigen_device()) = (softmax * - softmax.sum(along_class) - .inverse() - .eval() - .reshape(batch_by_one) - .broadcast(one_by_class)); - } -}; - template void SoftmaxGradFunctor::operator()( const DeviceContext& context, const framework::Tensor* y, diff --git a/paddle/fluid/operators/softmax_op.h b/paddle/fluid/operators/softmax_op.h index bcd63eefc7..cf1eeb017d 100644 --- a/paddle/fluid/operators/softmax_op.h +++ b/paddle/fluid/operators/softmax_op.h @@ -35,14 +35,8 @@ class SoftmaxKernel : public framework::OpKernel { Tensor X_2d = framework::ReshapeToMatrix(*X, rank - 1); Tensor Out_2d = framework::ReshapeToMatrix(*Out, rank - 1); - const bool is_test = context.Attr("is_test"); - if (is_test == true) { - math::SoftmaxFunctor()( - context.template device_context(), &X_2d, &Out_2d); - } else { - math::SoftmaxFunctor()( - context.template device_context(), &X_2d, &Out_2d); - } + math::SoftmaxFunctor()( + context.template device_context(), &X_2d, &Out_2d); } }; diff --git a/paddle/fluid/operators/softmax_with_cross_entropy_op.h b/paddle/fluid/operators/softmax_with_cross_entropy_op.h index c0530e3d8b..e9aba3b37b 100644 --- a/paddle/fluid/operators/softmax_with_cross_entropy_op.h +++ b/paddle/fluid/operators/softmax_with_cross_entropy_op.h @@ -42,8 +42,8 @@ class SoftmaxWithCrossEntropyKernel : public framework::OpKernel { auto& dev_ctx = context.template device_context(); - math::SoftmaxFunctor()( - dev_ctx, logits, softmax); + math::SoftmaxFunctor()(dev_ctx, logits, + softmax); math::CrossEntropyFunctor()( dev_ctx, loss, softmax, labels, context.Attr("soft_label"), context.Attr("ignore_index")); diff --git a/python/paddle/fluid/tests/unittests/test_softmax_op.py b/python/paddle/fluid/tests/unittests/test_softmax_op.py index 3bef24430d..40c3135183 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_op.py @@ -35,7 +35,6 @@ class TestSoftmaxOp(OpTest): self.op_type = "softmax" self.use_cudnn = False self.use_mkldnn = False - self.is_test = False self.dtype = np.float32 self.init_kernel_type() self.shape = self.get_x_shape() @@ -49,8 +48,7 @@ class TestSoftmaxOp(OpTest): self.outputs = {'Out': out} self.attrs = { 'use_cudnn': self.use_cudnn, - 'use_mkldnn': self.use_mkldnn, - 'is_test': self.is_test + 'use_mkldnn': self.use_mkldnn } def init_kernel_type(self): @@ -146,11 +144,6 @@ class TestSoftmaxFP16CUDNNOp2(TestSoftmaxFP16CUDNNOp): return [2, 3, 4, 5] -class TestSoftmaxInference(TestSoftmaxOp): - def init_kernel_type(self): - self.is_test = True - - class TestSoftmaxMKLDNNOp(TestSoftmaxOp): def init_kernel_type(self): self.use_mkldnn = True -- GitLab