提交 6ac47a3d 编写于 作者: T tensor-tang

rename Mkldnn to MKLDNN

上级 32ec75ac
...@@ -222,35 +222,35 @@ struct MKLDNNActivationGradFunc : public BaseActivationFunctor<T> { ...@@ -222,35 +222,35 @@ struct MKLDNNActivationGradFunc : public BaseActivationFunctor<T> {
}; };
template <typename T> template <typename T>
using ReluMkldnnFunctor = using ReluMKLDNNFunctor =
MKLDNNActivationFunc<T, mkldnn::algorithm::eltwise_relu>; MKLDNNActivationFunc<T, mkldnn::algorithm::eltwise_relu>;
template <typename T> template <typename T>
using TanhMkldnnFunctor = using TanhMKLDNNFunctor =
MKLDNNActivationFunc<T, mkldnn::algorithm::eltwise_tanh>; MKLDNNActivationFunc<T, mkldnn::algorithm::eltwise_tanh>;
template <typename T> template <typename T>
using SqrtMkldnnFunctor = using SqrtMKLDNNFunctor =
MKLDNNActivationFunc<T, mkldnn::algorithm::eltwise_sqrt>; MKLDNNActivationFunc<T, mkldnn::algorithm::eltwise_sqrt>;
template <typename T> template <typename T>
using AbsMkldnnFunctor = using AbsMKLDNNFunctor =
MKLDNNActivationFunc<T, mkldnn::algorithm::eltwise_abs>; MKLDNNActivationFunc<T, mkldnn::algorithm::eltwise_abs>;
template <typename T> template <typename T>
using ReluMkldnnGradFunctor = using ReluMKLDNNGradFunctor =
MKLDNNActivationGradFunc<T, mkldnn::algorithm::eltwise_relu>; MKLDNNActivationGradFunc<T, mkldnn::algorithm::eltwise_relu>;
template <typename T> template <typename T>
using TanhMkldnnGradFunctor = using TanhMKLDNNGradFunctor =
MKLDNNActivationGradFunc<T, mkldnn::algorithm::eltwise_tanh>; MKLDNNActivationGradFunc<T, mkldnn::algorithm::eltwise_tanh>;
template <typename T> template <typename T>
using SqrtMkldnnGradFunctor = using SqrtMKLDNNGradFunctor =
MKLDNNActivationGradFunc<T, mkldnn::algorithm::eltwise_sqrt>; MKLDNNActivationGradFunc<T, mkldnn::algorithm::eltwise_sqrt>;
template <typename T> template <typename T>
using AbsMkldnnGradFunctor = using AbsMKLDNNGradFunctor =
MKLDNNActivationGradFunc<T, mkldnn::algorithm::eltwise_abs>; MKLDNNActivationGradFunc<T, mkldnn::algorithm::eltwise_abs>;
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
...@@ -265,9 +265,9 @@ namespace ops = paddle::operators; ...@@ -265,9 +265,9 @@ namespace ops = paddle::operators;
ops::MKLDNNActivationGradKernel<ops::grad_functor<float>>); ops::MKLDNNActivationGradKernel<ops::grad_functor<float>>);
#define FOR_EACH_MKLDNN_KERNEL_FUNCTOR(__macro) \ #define FOR_EACH_MKLDNN_KERNEL_FUNCTOR(__macro) \
__macro(relu, ReluMkldnnFunctor, ReluMkldnnGradFunctor); \ __macro(relu, ReluMKLDNNFunctor, ReluMKLDNNGradFunctor); \
__macro(tanh, TanhMkldnnFunctor, TanhMkldnnGradFunctor); \ __macro(tanh, TanhMKLDNNFunctor, TanhMKLDNNGradFunctor); \
__macro(sqrt, SqrtMkldnnFunctor, SqrtMkldnnGradFunctor); \ __macro(sqrt, SqrtMKLDNNFunctor, SqrtMKLDNNGradFunctor); \
__macro(abs, AbsMkldnnFunctor, AbsMkldnnGradFunctor); __macro(abs, AbsMKLDNNFunctor, AbsMKLDNNGradFunctor);
FOR_EACH_MKLDNN_KERNEL_FUNCTOR(REGISTER_ACTIVATION_MKLDNN_KERNEL); FOR_EACH_MKLDNN_KERNEL_FUNCTOR(REGISTER_ACTIVATION_MKLDNN_KERNEL);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册