From dabfbba9627fe526b86089f62689ccb7f037272a Mon Sep 17 00:00:00 2001 From: RedContritio Date: Tue, 30 May 2023 10:34:20 +0800 Subject: [PATCH] support auto generate for activation op relu6 (#53979) * support auto generate for activation_op relu6 * add generated_static_op for activation_op in CMakeLists.txt --- paddle/fluid/framework/ir/CMakeLists.txt | 1 + .../framework/paddle2cinn/CMakeLists.txt | 3 ++- paddle/fluid/operators/activation_op.cc | 22 ------------------- paddle/phi/api/yaml/op_compat.yaml | 4 ++++ paddle/phi/api/yaml/static_backward.yaml | 11 ++++++++++ paddle/phi/api/yaml/static_ops.yaml | 10 +++++++++ paddle/phi/ops/compat/activation_sig.cc | 12 ---------- test/cpp/fluid/mkldnn/CMakeLists.txt | 1 + test/cpp/jit/CMakeLists.txt | 1 + test/cpp/new_executor/CMakeLists.txt | 1 + 10 files changed, 31 insertions(+), 35 deletions(-) diff --git a/paddle/fluid/framework/ir/CMakeLists.txt b/paddle/fluid/framework/ir/CMakeLists.txt index a18607595e1..d63af261935 100755 --- a/paddle/fluid/framework/ir/CMakeLists.txt +++ b/paddle/fluid/framework/ir/CMakeLists.txt @@ -460,6 +460,7 @@ if(WITH_MKLDNN) conv_transpose_op batch_norm_op generated_op + generated_static_op activation_op elementwise_add_op concat_and_split diff --git a/paddle/fluid/framework/paddle2cinn/CMakeLists.txt b/paddle/fluid/framework/paddle2cinn/CMakeLists.txt index a415c7d5832..0c028857ce3 100644 --- a/paddle/fluid/framework/paddle2cinn/CMakeLists.txt +++ b/paddle/fluid/framework/paddle2cinn/CMakeLists.txt @@ -60,7 +60,8 @@ if(WITH_TESTING) mul_op activation_op elementwise_add_op - generated_op) + generated_op + generated_static_op) set_tests_properties(build_cinn_pass_test PROPERTIES LABELS "RUN_TYPE=CINN") target_link_libraries(build_cinn_pass_test ${PYTHON_LIBRARIES}) diff --git a/paddle/fluid/operators/activation_op.cc b/paddle/fluid/operators/activation_op.cc index 0edcb3726d0..375665bc8ee 100644 --- a/paddle/fluid/operators/activation_op.cc +++ b/paddle/fluid/operators/activation_op.cc @@ -176,27 +176,6 @@ $$out = \ln(1 + \exp(\max(\min(x, threshold), -threshold)))$$ } }; -class Relu6OpMaker : public framework::OpProtoAndCheckerMaker { - public: - void Make() override { - AddInput("X", - "Input of relu6 operator, an N-D Tensor, " - "with data type float32, float64."); - AddOutput( - "Out", - "Output of relu6 operator, a Tensor with the same shape as input."); - AddAttr("threshold", - "The threshold value of Relu6. Default is 6.0. ") - .SetDefault(6.0f); - AddComment(R"DOC( -Relu6 Activation Operator. - -$$out = \min(\max(0, x), threshold)$$ - -)DOC"); - } -}; - class SwishOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { @@ -452,7 +431,6 @@ FOR_EACH_ACTIVATION_OP(REGISTER_ACTIVATION_OP); REGISTER_ACTIVATION_CPU_KERNEL(soft_relu, SoftRelu) -REGISTER_ACTIVATION_OP(relu6, Relu6, Relu6Functor, Relu6GradFunctor); REGISTER_ACTIVATION_OP(mish, Mish, MishFunctor, MishGradFunctor); REGISTER_ACTIVATION_OP_WITH_COMP(hard_swish, HardSwish, diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index bbe3017e27e..1cfeac6e0fc 100755 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -1902,6 +1902,10 @@ - op : relu6 backward : relu6_grad + inputs : + x : X + outputs : + out : Out extra : attrs : [bool use_mkldnn = false] diff --git a/paddle/phi/api/yaml/static_backward.yaml b/paddle/phi/api/yaml/static_backward.yaml index 27b2589ce9b..a512342311c 100755 --- a/paddle/phi/api/yaml/static_backward.yaml +++ b/paddle/phi/api/yaml/static_backward.yaml @@ -43,6 +43,17 @@ func : frobenius_norm_grad param : [x, out, out_grad, axis, keepdim, reduce_all] +- backward_op : relu6_grad + forward : relu6 (Tensor x, float threshold = 6.0f) -> Tensor(out) + args : (Tensor out, Tensor out_grad) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + param : [out] + kernel : + func : relu6_grad + inplace : (out_grad -> x_grad) + - backward_op : rnn_grad forward : rnn (Tensor x, Tensor[] pre_state, Tensor[] weight_list, Tensor sequence_length, float dropout_prob=0.0, bool is_bidirec=false, int input_size=10, int hidden_size=100, int num_layers=1, str mode="RNN_TANH", int seed=0, bool is_test=false) -> Tensor(out), Tensor(dropout_state_out), Tensor[](state), Tensor(reserve) args : (Tensor x, Tensor[] pre_state, Tensor[] weight_list, Tensor sequence_length, Tensor out, Tensor dropout_state_out, Tensor reserve, Tensor out_grad, Tensor[] state_grad, float dropout_prob, bool is_bidirec, int input_size, int hidden_size, int num_layers, str mode, int seed, bool is_test) diff --git a/paddle/phi/api/yaml/static_ops.yaml b/paddle/phi/api/yaml/static_ops.yaml index 095bb89836e..bcdf38e85fe 100755 --- a/paddle/phi/api/yaml/static_ops.yaml +++ b/paddle/phi/api/yaml/static_ops.yaml @@ -313,6 +313,16 @@ func : reduce_scatter param: [x, nranks] +- op : relu6 + args : (Tensor x, float threshold = 6.0f) + output : Tensor + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : relu6_raw + backward : relu6_grad + - op : rnn args: (Tensor x, Tensor[] pre_state, Tensor[] weight_list, Tensor sequence_length, float dropout_prob=0.0, bool is_bidirec=false, int input_size=10, int hidden_size=100, int num_layers=1, str mode="RNN_TANH", int seed=0, bool is_test=false) output: Tensor(out), Tensor(dropout_state_out), Tensor[](state){pre_state.size()}, Tensor(reserve) diff --git a/paddle/phi/ops/compat/activation_sig.cc b/paddle/phi/ops/compat/activation_sig.cc index 1bd409113ed..2d2ea6fdfdb 100644 --- a/paddle/phi/ops/compat/activation_sig.cc +++ b/paddle/phi/ops/compat/activation_sig.cc @@ -47,11 +47,6 @@ KernelSignature SwishGradOpArgumentMapping( return KernelSignature("swish_grad", {"X", "Out@GRAD"}, {}, {"X@GRAD"}); } -KernelSignature Relu6GradOpArgumentMapping( - const ArgumentMappingContext& ctx UNUSED) { - return KernelSignature("relu6_grad", {"Out", "Out@GRAD"}, {}, {"X@GRAD"}); -} - KernelSignature HardSwishGradOpArgumentMapping( const ArgumentMappingContext& ctx UNUSED) { return KernelSignature("hardswish_grad", {"X", "Out@GRAD"}, {}, {"X@GRAD"}); @@ -67,11 +62,6 @@ KernelSignature SwishOpArgumentMapping( return KernelSignature("swish_raw", {"X"}, {"beta"}, {"Out"}); } -KernelSignature Relu6OpArgumentMapping( - const ArgumentMappingContext& ctx UNUSED) { - return KernelSignature("relu6_raw", {"X"}, {"threshold"}, {"Out"}); -} - } // namespace phi PD_REGISTER_BASE_KERNEL_NAME(hard_swish, hardswish); @@ -79,8 +69,6 @@ PD_REGISTER_BASE_KERNEL_NAME(hard_swish_grad, hardswish_grad); PD_REGISTER_ARG_MAPPING_FN(mish_grad, phi::MishGradOpArgumentMapping); -PD_REGISTER_ARG_MAPPING_FN(relu6_grad, phi::Relu6GradOpArgumentMapping); -PD_REGISTER_ARG_MAPPING_FN(relu6, phi::Relu6OpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(hard_swish_grad, phi::HardSwishGradOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(hard_swish, phi::HardSwishOpArgumentMapping); diff --git a/test/cpp/fluid/mkldnn/CMakeLists.txt b/test/cpp/fluid/mkldnn/CMakeLists.txt index d08e30b346a..0784ad54a78 100644 --- a/test/cpp/fluid/mkldnn/CMakeLists.txt +++ b/test/cpp/fluid/mkldnn/CMakeLists.txt @@ -42,6 +42,7 @@ cc_test_old( crop_op activation_op generated_op + generated_static_op phi transpose_op fused_transpose_op diff --git a/test/cpp/jit/CMakeLists.txt b/test/cpp/jit/CMakeLists.txt index 708f48bbf49..558d7386090 100644 --- a/test/cpp/jit/CMakeLists.txt +++ b/test/cpp/jit/CMakeLists.txt @@ -15,6 +15,7 @@ if(WITH_TESTING AND NOT WIN32) feed_op fetch_op generated_op + generated_static_op transfer_layout_op jit_layer) cc_test( diff --git a/test/cpp/new_executor/CMakeLists.txt b/test/cpp/new_executor/CMakeLists.txt index 30af210725c..710b6e63eaa 100644 --- a/test/cpp/new_executor/CMakeLists.txt +++ b/test/cpp/new_executor/CMakeLists.txt @@ -31,6 +31,7 @@ if(WITH_GPU elementwise_max_op elementwise_div_op generated_op + generated_static_op squared_l2_norm_op memcpy_h2d_op memcpy_d2h_op -- GitLab