diff --git a/paddle/fluid/operators/math/softmax.h b/paddle/fluid/operators/math/softmax.h index bf698dc2f753f0002557af07ad7ea976c85edada..089458e957dfaac1cbc3bf1bc2b4be4877e702c9 100644 --- a/paddle/fluid/operators/math/softmax.h +++ b/paddle/fluid/operators/math/softmax.h @@ -19,7 +19,8 @@ namespace paddle { namespace operators { namespace math { -template +template class SoftmaxFunctor { public: void operator()(const DeviceContext& context, const framework::Tensor* X, diff --git a/paddle/fluid/operators/math/softmax_impl.h b/paddle/fluid/operators/math/softmax_impl.h index e09a24334762388733ba9941ad1674fbc401f707..0f3e5b20086378da8ef1138a5f5c005b724f7fa2 100644 --- a/paddle/fluid/operators/math/softmax_impl.h +++ b/paddle/fluid/operators/math/softmax_impl.h @@ -33,8 +33,8 @@ struct ValueClip { } }; -template -void SoftmaxFunctor::operator()( +template +void SoftmaxFunctor::operator()( const DeviceContext& context, const framework::Tensor* X, framework::Tensor* Y) { auto logits = EigenMatrix::From(*X); @@ -66,8 +66,12 @@ void SoftmaxFunctor::operator()( .broadcast(one_by_class)); } +template +using enable_if_CPU = typename std::enable_if< + std::is_same::value>::type; + template -class SoftmaxFunctor { +class SoftmaxFunctor> { void operator()(const DeviceContext& context, const framework::Tensor* X, framework::Tensor* Y) { auto in_dims = X->dims();