未验证 提交 abee2210 编写于 作者: H HongyuJia 提交者: GitHub

change mkldnn kernel layout, ALL_LAYOUT->ONEDNN (#46629)

上级 b7b231a6
...@@ -38,7 +38,7 @@ USE_OP_DEVICE_KERNEL(conv2d_transpose, MKLDNN); ...@@ -38,7 +38,7 @@ USE_OP_DEVICE_KERNEL(conv2d_transpose, MKLDNN);
USE_OP_ITSELF(elementwise_add); USE_OP_ITSELF(elementwise_add);
USE_OP_DEVICE_KERNEL(elementwise_add, MKLDNN); USE_OP_DEVICE_KERNEL(elementwise_add, MKLDNN);
USE_OP_ITSELF(gelu); USE_OP_ITSELF(gelu);
PD_DECLARE_KERNEL(gelu, OneDNN, ALL_LAYOUT); PD_DECLARE_KERNEL(gelu, OneDNN, ONEDNN);
PD_DECLARE_ARG_MAPPING_FN(gelu); PD_DECLARE_ARG_MAPPING_FN(gelu);
namespace paddle { namespace paddle {
......
...@@ -27,11 +27,11 @@ USE_OP_DEVICE_KERNEL(softmax, MKLDNN); ...@@ -27,11 +27,11 @@ USE_OP_DEVICE_KERNEL(softmax, MKLDNN);
USE_OP_ITSELF(elementwise_add); USE_OP_ITSELF(elementwise_add);
USE_OP_DEVICE_KERNEL(elementwise_add, MKLDNN); USE_OP_DEVICE_KERNEL(elementwise_add, MKLDNN);
USE_OP_ITSELF(leaky_relu); USE_OP_ITSELF(leaky_relu);
PD_DECLARE_KERNEL(leaky_relu, OneDNN, ALL_LAYOUT); PD_DECLARE_KERNEL(leaky_relu, OneDNN, ONEDNN);
USE_OP_ITSELF(gelu); USE_OP_ITSELF(gelu);
USE_OP_ITSELF(relu); USE_OP_ITSELF(relu);
USE_OP_ITSELF(tanh); USE_OP_ITSELF(tanh);
PD_DECLARE_KERNEL(tanh, OneDNN, ALL_LAYOUT); PD_DECLARE_KERNEL(tanh, OneDNN, ONEDNN);
PD_DECLARE_ARG_MAPPING_FN(gelu); PD_DECLARE_ARG_MAPPING_FN(gelu);
namespace paddle { namespace paddle {
......
...@@ -259,5 +259,5 @@ TEST(test_prepare_op, test_prepare_data_cpu_mkldnn) { ...@@ -259,5 +259,5 @@ TEST(test_prepare_op, test_prepare_data_cpu_mkldnn) {
USE_OP_ITSELF(split); USE_OP_ITSELF(split);
USE_OP_ITSELF(relu); USE_OP_ITSELF(relu);
#ifdef PADDLE_WITH_MKLDNN #ifdef PADDLE_WITH_MKLDNN
PD_DECLARE_KERNEL(relu, OneDNN, ALL_LAYOUT); PD_DECLARE_KERNEL(relu, OneDNN, ONEDNN);
#endif #endif
...@@ -32,7 +32,7 @@ USE_OP_DEVICE_KERNEL(elementwise_add, MKLDNN); ...@@ -32,7 +32,7 @@ USE_OP_DEVICE_KERNEL(elementwise_add, MKLDNN);
USE_OP_ITSELF(elementwise_mul); USE_OP_ITSELF(elementwise_mul);
USE_OP_DEVICE_KERNEL(elementwise_mul, MKLDNN); USE_OP_DEVICE_KERNEL(elementwise_mul, MKLDNN);
USE_OP_ITSELF(relu); USE_OP_ITSELF(relu);
PD_DECLARE_KERNEL(relu, OneDNN, ALL_LAYOUT); PD_DECLARE_KERNEL(relu, OneDNN, ONEDNN);
USE_OP_ITSELF(softmax); USE_OP_ITSELF(softmax);
USE_OP_DEVICE_KERNEL(softmax, MKLDNN); USE_OP_DEVICE_KERNEL(softmax, MKLDNN);
USE_OP_ITSELF(conv2d); USE_OP_ITSELF(conv2d);
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
USE_OP_ITSELF(elementwise_add); USE_OP_ITSELF(elementwise_add);
USE_OP_DEVICE_KERNEL(elementwise_add, MKLDNN); USE_OP_DEVICE_KERNEL(elementwise_add, MKLDNN);
USE_OP_ITSELF(relu); USE_OP_ITSELF(relu);
PD_DECLARE_KERNEL(relu, OneDNN, ALL_LAYOUT); PD_DECLARE_KERNEL(relu, OneDNN, ONEDNN);
USE_OP_ITSELF(softmax); USE_OP_ITSELF(softmax);
USE_OP_DEVICE_KERNEL(softmax, MKLDNN); USE_OP_DEVICE_KERNEL(softmax, MKLDNN);
......
...@@ -30,11 +30,11 @@ ...@@ -30,11 +30,11 @@
USE_OP_ITSELF(pool2d); USE_OP_ITSELF(pool2d);
USE_OP_DEVICE_KERNEL(pool2d, MKLDNN); USE_OP_DEVICE_KERNEL(pool2d, MKLDNN);
USE_OP_ITSELF(relu); USE_OP_ITSELF(relu);
PD_DECLARE_KERNEL(relu, OneDNN, ALL_LAYOUT); PD_DECLARE_KERNEL(relu, OneDNN, ONEDNN);
USE_OP_ITSELF(transpose); USE_OP_ITSELF(transpose);
USE_OP_DEVICE_KERNEL(transpose, MKLDNN); USE_OP_DEVICE_KERNEL(transpose, MKLDNN);
USE_OP_ITSELF(shape); USE_OP_ITSELF(shape);
PD_DECLARE_KERNEL(shape, OneDNN, ALL_LAYOUT); PD_DECLARE_KERNEL(shape, OneDNN, ONEDNN);
USE_OP_ITSELF(crop); USE_OP_ITSELF(crop);
USE_OP_DEVICE_KERNEL(crop, CPU); USE_OP_DEVICE_KERNEL(crop, CPU);
......
...@@ -260,14 +260,14 @@ void Relu6GradKernel(const Context& dev_ctx, ...@@ -260,14 +260,14 @@ void Relu6GradKernel(const Context& dev_ctx,
PD_REGISTER_KERNEL(relu_grad, PD_REGISTER_KERNEL(relu_grad,
OneDNN, OneDNN,
ALL_LAYOUT, ONEDNN,
phi::ReluGradKernel, phi::ReluGradKernel,
float, float,
phi::dtype::bfloat16) {} phi::dtype::bfloat16) {}
#define PD_REGISTER_ACTIVATION_GRAD_KERNEL(name, func) \ #define PD_REGISTER_ACTIVATION_GRAD_KERNEL(name, func) \
PD_REGISTER_KERNEL( \ PD_REGISTER_KERNEL( \
name, OneDNN, ALL_LAYOUT, phi::func, float, phi::dtype::bfloat16) {} name, OneDNN, ONEDNN, phi::func, float, phi::dtype::bfloat16) {}
PD_REGISTER_ACTIVATION_GRAD_KERNEL(abs_grad, AbsGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(abs_grad, AbsGradKernel)
PD_REGISTER_ACTIVATION_GRAD_KERNEL(elu_grad, EluGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(elu_grad, EluGradKernel)
......
...@@ -192,11 +192,11 @@ void Relu6Kernel(const Context& dev_ctx, ...@@ -192,11 +192,11 @@ void Relu6Kernel(const Context& dev_ctx,
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL(round, OneDNN, ALL_LAYOUT, phi::RoundKernel, float) {} PD_REGISTER_KERNEL(round, OneDNN, ONEDNN, phi::RoundKernel, float) {}
#define PD_REGISTER_ACTIVATION_KERNEL(name, func) \ #define PD_REGISTER_ACTIVATION_KERNEL(name, func) \
PD_REGISTER_KERNEL( \ PD_REGISTER_KERNEL( \
name, OneDNN, ALL_LAYOUT, phi::func, float, phi::dtype::bfloat16) {} name, OneDNN, ONEDNN, phi::func, float, phi::dtype::bfloat16) {}
PD_REGISTER_ACTIVATION_KERNEL(abs, AbsKernel) PD_REGISTER_ACTIVATION_KERNEL(abs, AbsKernel)
PD_REGISTER_ACTIVATION_KERNEL(elu, EluKernel) PD_REGISTER_ACTIVATION_KERNEL(elu, EluKernel)
......
...@@ -51,7 +51,7 @@ void ShapeKernel(const Context& dev_ctx, ...@@ -51,7 +51,7 @@ void ShapeKernel(const Context& dev_ctx,
PD_REGISTER_KERNEL(shape, PD_REGISTER_KERNEL(shape,
OneDNN, OneDNN,
ALL_LAYOUT, ONEDNN,
phi::ShapeKernel, phi::ShapeKernel,
float, float,
phi::dtype::bfloat16, phi::dtype::bfloat16,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册