diff --git a/paddle/fluid/operators/math/softmax.cc b/paddle/fluid/operators/math/softmax.cc index 78c65af24a8c5fa57e33415acc3018790bf70790..fa2018178f44ff4e3b14937c1f508fa8a698e20e 100644 --- a/paddle/fluid/operators/math/softmax.cc +++ b/paddle/fluid/operators/math/softmax.cc @@ -19,8 +19,10 @@ namespace paddle { namespace operators { namespace math { -template class SoftmaxFunctor; -template class SoftmaxFunctor; +template class SoftmaxFunctor; +template class SoftmaxFunctor; +template class SoftmaxFunctor; +template class SoftmaxFunctor; template class SoftmaxGradFunctor; template class SoftmaxGradFunctor; diff --git a/paddle/fluid/operators/math/softmax.cu b/paddle/fluid/operators/math/softmax.cu index ce183ed3649055aab31eb6e3f44f2224475957e9..2e9669049e36478549b793e3fa76220825888e21 100644 --- a/paddle/fluid/operators/math/softmax.cu +++ b/paddle/fluid/operators/math/softmax.cu @@ -98,9 +98,14 @@ template class SoftmaxGradCUDNNFunctor; template class SoftmaxGradCUDNNFunctor; template class SoftmaxGradCUDNNFunctor; -template class SoftmaxFunctor; -template class SoftmaxFunctor; -template class SoftmaxFunctor; +template class SoftmaxFunctor; +template class SoftmaxFunctor; +template class SoftmaxFunctor; +template class SoftmaxFunctor; +template class SoftmaxFunctor; +template class SoftmaxFunctor; template class SoftmaxGradFunctor; template class SoftmaxGradFunctor; template class SoftmaxGradFunctor +template class SoftmaxFunctor { public: void operator()(const DeviceContext& context, const framework::Tensor* X, diff --git a/paddle/fluid/operators/math/softmax_impl.h b/paddle/fluid/operators/math/softmax_impl.h index dd9971ba091cc3ece86654f65c335b98087f45ed..7cf98f27251db3cfe5e8e295ed21056f6e5a2963 100644 --- a/paddle/fluid/operators/math/softmax_impl.h +++ b/paddle/fluid/operators/math/softmax_impl.h @@ -32,10 +32,10 @@ struct ValueClip { } }; -template -void SoftmaxFunctor::operator()(const DeviceContext& context, - const framework::Tensor* X, - framework::Tensor* Y) { +template +void SoftmaxFunctor::operator()( + const DeviceContext& context, const framework::Tensor* X, + framework::Tensor* Y) { auto logits = EigenMatrix::From(*X); auto softmax = EigenMatrix::From(*Y); @@ -65,6 +65,39 @@ void SoftmaxFunctor::operator()(const DeviceContext& context, .broadcast(one_by_class)); } +template +class SoftmaxFunctor { + void operator()(const DeviceContext& context, const framework::Tensor* X, + framework::Tensor* Y) { + auto logits = EigenMatrix::From(*X); + auto softmax = EigenMatrix::From(*Y); + + const int kBatchDim = 0; + const int kClassDim = 1; + + const int batch_size = logits.dimension(kBatchDim); + const int num_classes = logits.dimension(kClassDim); + + Eigen::DSizes along_class(kClassDim); + Eigen::DSizes batch_by_one(batch_size, 1); + Eigen::DSizes one_by_class(1, num_classes); + + auto shifted_logits = (logits - + logits.maximum(along_class) + .eval() + .reshape(batch_by_one) + .broadcast(one_by_class)); + + softmax.device(*context.eigen_device()) = shifted_logits.exp(); + softmax.device(*context.eigen_device()) = (softmax * + softmax.sum(along_class) + .inverse() + .eval() + .reshape(batch_by_one) + .broadcast(one_by_class)); + } +}; + template void SoftmaxGradFunctor::operator()( const DeviceContext& context, const framework::Tensor* y, diff --git a/paddle/fluid/operators/softmax_op.h b/paddle/fluid/operators/softmax_op.h index cf1eeb017d666f605a431aa54637d8cbc99c7c46..bcd63eefc7856d529856378a1f8e2537df94fd9b 100644 --- a/paddle/fluid/operators/softmax_op.h +++ b/paddle/fluid/operators/softmax_op.h @@ -35,8 +35,14 @@ class SoftmaxKernel : public framework::OpKernel { Tensor X_2d = framework::ReshapeToMatrix(*X, rank - 1); Tensor Out_2d = framework::ReshapeToMatrix(*Out, rank - 1); - math::SoftmaxFunctor()( - context.template device_context(), &X_2d, &Out_2d); + const bool is_test = context.Attr("is_test"); + if (is_test == true) { + math::SoftmaxFunctor()( + context.template device_context(), &X_2d, &Out_2d); + } else { + math::SoftmaxFunctor()( + context.template device_context(), &X_2d, &Out_2d); + } } }; diff --git a/paddle/fluid/operators/softmax_with_cross_entropy_op.h b/paddle/fluid/operators/softmax_with_cross_entropy_op.h index e9aba3b37b8cc01d4fe5de5200579d4e93f67e56..c0530e3d8bc407ddd6d7bf6e10a715185d0beb1f 100644 --- a/paddle/fluid/operators/softmax_with_cross_entropy_op.h +++ b/paddle/fluid/operators/softmax_with_cross_entropy_op.h @@ -42,8 +42,8 @@ class SoftmaxWithCrossEntropyKernel : public framework::OpKernel { auto& dev_ctx = context.template device_context(); - math::SoftmaxFunctor()(dev_ctx, logits, - softmax); + math::SoftmaxFunctor()( + dev_ctx, logits, softmax); math::CrossEntropyFunctor()( dev_ctx, loss, softmax, labels, context.Attr("soft_label"), context.Attr("ignore_index")); diff --git a/python/paddle/fluid/tests/unittests/test_softmax_op.py b/python/paddle/fluid/tests/unittests/test_softmax_op.py index 40c3135183a128cd9b7324ce27da798fa2d93afd..3bef24430d9aaeb459555312da0c10a5729fd4bc 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_op.py @@ -35,6 +35,7 @@ class TestSoftmaxOp(OpTest): self.op_type = "softmax" self.use_cudnn = False self.use_mkldnn = False + self.is_test = False self.dtype = np.float32 self.init_kernel_type() self.shape = self.get_x_shape() @@ -48,7 +49,8 @@ class TestSoftmaxOp(OpTest): self.outputs = {'Out': out} self.attrs = { 'use_cudnn': self.use_cudnn, - 'use_mkldnn': self.use_mkldnn + 'use_mkldnn': self.use_mkldnn, + 'is_test': self.is_test } def init_kernel_type(self): @@ -144,6 +146,11 @@ class TestSoftmaxFP16CUDNNOp2(TestSoftmaxFP16CUDNNOp): return [2, 3, 4, 5] +class TestSoftmaxInference(TestSoftmaxOp): + def init_kernel_type(self): + self.is_test = True + + class TestSoftmaxMKLDNNOp(TestSoftmaxOp): def init_kernel_type(self): self.use_mkldnn = True