From 9b0eae3023e3faf6a40a69f5ff79bcc2303c674b Mon Sep 17 00:00:00 2001 From: Jacek Czaja Date: Sun, 18 Nov 2018 13:27:17 +0100 Subject: [PATCH] - Removing partial specialization of sotmax for inference for GPU test=develop --- paddle/fluid/operators/math/softmax.h | 3 ++- paddle/fluid/operators/math/softmax_impl.h | 10 +++++++--- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/operators/math/softmax.h b/paddle/fluid/operators/math/softmax.h index bf698dc2f7..089458e957 100644 --- a/paddle/fluid/operators/math/softmax.h +++ b/paddle/fluid/operators/math/softmax.h @@ -19,7 +19,8 @@ namespace paddle { namespace operators { namespace math { -template +template class SoftmaxFunctor { public: void operator()(const DeviceContext& context, const framework::Tensor* X, diff --git a/paddle/fluid/operators/math/softmax_impl.h b/paddle/fluid/operators/math/softmax_impl.h index e09a243347..0f3e5b2008 100644 --- a/paddle/fluid/operators/math/softmax_impl.h +++ b/paddle/fluid/operators/math/softmax_impl.h @@ -33,8 +33,8 @@ struct ValueClip { } }; -template -void SoftmaxFunctor::operator()( +template +void SoftmaxFunctor::operator()( const DeviceContext& context, const framework::Tensor* X, framework::Tensor* Y) { auto logits = EigenMatrix::From(*X); @@ -66,8 +66,12 @@ void SoftmaxFunctor::operator()( .broadcast(one_by_class)); } +template +using enable_if_CPU = typename std::enable_if< + std::is_same::value>::type; + template -class SoftmaxFunctor { +class SoftmaxFunctor> { void operator()(const DeviceContext& context, const framework::Tensor* X, framework::Tensor* Y) { auto in_dims = X->dims(); -- GitLab