diff --git a/paddle/fluid/operators/controlflow/conditional_block_op.h b/paddle/fluid/operators/controlflow/conditional_block_op.h index d85eca8f5cb3a2dae2f29d9b0f909ccc248de751..f2407e9a3f05ac4205b4ecfbafa2fa72eade5d55 100644 --- a/paddle/fluid/operators/controlflow/conditional_block_op.h +++ b/paddle/fluid/operators/controlflow/conditional_block_op.h @@ -119,11 +119,6 @@ class ConditionalBlockOpProtoMaker : public framework::OpProtoAndCheckerMaker { "The conditional variable (Cond) is used as scalar " "condition.") .SetDefault(false); - AddAttr>(ConditionalOp::kSkipEagerDeletionVars, - "Vars that would not be deleted when " - "garbage collection strategy enables") - .SetDefault(std::vector()) - .AsExtra(); AddComment(R"DOC(Conditional block operator If `is_scalar_condition` is True, the conditional variable (Cond) is a scalar, diff --git a/paddle/fluid/operators/controlflow/while_op.cc b/paddle/fluid/operators/controlflow/while_op.cc index 4e0344b3b939165baee3aef1ab2c36f37b356bf8..10fa24b1bd4f5be963c63a22c9b938a4eeb8e04a 100644 --- a/paddle/fluid/operators/controlflow/while_op.cc +++ b/paddle/fluid/operators/controlflow/while_op.cc @@ -221,11 +221,6 @@ class WhileOpMaker : public framework::OpProtoAndCheckerMaker { "(bool, default false) Set to true for inference only, false " "for training. Some layers may run faster when this is true.") .SetDefault(false); - AddAttr>(kSkipEagerDeletionVars, - "Vars that would skip eager deletion." - "Users should not set this manually.") - .SetDefault(std::vector()) - .AsExtra(); AddComment(R"DOC( )DOC"); } diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index e01e6b78633d014d9ea00f9c6f4cab63138d6e45..7720bb5d2e2f4e503745375fe72c6fdc93ccc79c 100644 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -93,6 +93,11 @@ extra : attrs : [bool use_mkldnn = false, bool use_quantizer = false, str mkldnn_data_type = "float32"] +- op : conditional_block + backward : conditional_block_grad + extra : + attrs : ['str[] skip_eager_deletion_vars = {}'] + - op : conv2d backward : conv2d_grad extra : @@ -249,6 +254,11 @@ extra : attrs : [bool use_mkldnn = false, bool use_cudnn = false] +- op : expand (expand_v2) + backward : expand_grad (expand_v2_grad) + extra : + attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"] + - op : expm1 backward : expm1_grad extra : @@ -293,6 +303,15 @@ extra : attrs : [bool use_mkldnn = false] +- op : full (fill_constant) + extra : + attrs : [bool use_mkldnn = false] + +- op : gather + backward : gather_grad + extra : + attrs : [bool overwrite = true] + - op : gelu backward : gelu_grad extra : @@ -392,6 +411,12 @@ str mkldnn_data_type = "float32", 'int[] fused_reshape_X = {}', 'int[] fused_reshape_Y = {}', 'int[] fused_transpose_X = {}', 'int[] fused_transpose_Y = {}'] +- op : matmul_with_flatten (mul) + backward : matmul_with_flatten_grad (mul_grad) + extra : + attrs : [bool use_mkldnn = false, float scale_x = 1.0f, 'float[] scale_y = {1.0f}', + float scale_out = 1.0f, bool force_fp32_output = false] + - op : maximum (elementwise_max) backward : maximum_grad (elementwise_max_grad) extra : @@ -447,6 +472,17 @@ outputs : out : Out +- op : pool2d + backward : pool2d_grad + extra : + attrs : [bool use_mkldnn = false, bool use_quantizer = false, + str mkldnn_data_type = "float32", bool is_test = false] + +- op : pool3d + backward : pool3d_grad + extra : + attrs : [bool use_mkldnn = false] + - op : prelu backward : prelu_grad extra : @@ -619,6 +655,11 @@ extra : attrs : [bool use_mkldnn = false] +- op : stack + backward : stack_grad + extra : + attrs : [bool use_mkldnn = false] + - op : subtract (elementwise_sub) backward : subtract_grad (elementwise_sub_grad) extra : @@ -656,6 +697,12 @@ outputs : out : Out +- op : transpose (transpose2) + backward : transpose_grad (transpose2_grad) + extra : + attrs : [bool use_mkldnn = false, str data_format = "AnyLayout", bool use_quantizer = false, + str mkldnn_data_type = "float32"] + - op : trilinear_interp (trilinear_interp_v2) backward : trilinear_interp_grad (trilinear_interp_v2_grad) extra : @@ -667,45 +714,7 @@ outputs : out : Out -- op : expand (expand_v2) - backward : expand_grad (expand_v2_grad) - extra : - attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"] - -- op : full (fill_constant) - extra : - attrs : [bool use_mkldnn = false] - -- op : gather - backward : gather_grad - extra : - attrs : [bool overwrite = true] - -- op : matmul_with_flatten (mul) - backward : matmul_with_flatten_grad (mul_grad) - extra : - attrs : [bool use_mkldnn = false, float scale_x = 1.0f, 'float[] scale_y = {1.0f}', - float scale_out = 1.0f, bool force_fp32_output = false] - -- op : pool2d - backward : pool2d_grad +- op : while + backward : while_grad extra : - attrs : [bool use_mkldnn = false, bool use_quantizer = false, - str mkldnn_data_type = "float32", bool is_test = false] - -- op : pool3d - backward : pool3d_grad - extra : - attrs : [bool use_mkldnn = false] - -- op : stack - backward : stack_grad - extra : - attrs : [bool use_mkldnn = false] - - -- op : transpose (transpose2) - backward : transpose_grad (transpose2_grad) - extra : - attrs : [bool use_mkldnn = false, str data_format = "AnyLayout", bool use_quantizer = false, - str mkldnn_data_type = "float32"] + attrs : ['str[] skip_eager_deletion_vars = {}']