diff --git a/paddle/fluid/operators/batch_norm_op.cc b/paddle/fluid/operators/batch_norm_op.cc index 3467658e894d547d7a0106d6414624b10351d6f3..bc5bd118dbec4791e79790d304e5aa8729304976 100644 --- a/paddle/fluid/operators/batch_norm_op.cc +++ b/paddle/fluid/operators/batch_norm_op.cc @@ -257,13 +257,16 @@ void BatchNormOpMaker::Make() { AddOutput("ReserveSpace", "Reserve GPU space for triggering the new semi-persistent " "NHWC kernel") - .AsDispensable(); + .AsDispensable() + .AsExtra(); AddAttr("use_mkldnn", "(bool, default false) Only used in mkldnn kernel") - .SetDefault(false); + .SetDefault(false) + .AsExtra(); AddAttr("fuse_with_relu", "(bool, default false) Only used in mkldnn kernel") - .SetDefault(false); + .SetDefault(false) + .AsExtra(); AddAttr("use_global_stats", "(bool, default false) Whether to use global mean and " "variance. In inference or test mode, set use_global_stats " diff --git a/paddle/fluid/operators/instance_norm_op.cc b/paddle/fluid/operators/instance_norm_op.cc index 0a850400686c4949f6cda83f0e386d3c51d323f9..cfdaacf8cb6ee7dc958769322c3c03b44b921662 100644 --- a/paddle/fluid/operators/instance_norm_op.cc +++ b/paddle/fluid/operators/instance_norm_op.cc @@ -149,11 +149,13 @@ void InstanceNormOpMaker::Make() { AddOutput("SavedMean", "Mean of the current mini batch, " "will apply to output when training") - .AsIntermediate(); + .AsIntermediate() + .AsExtra(); AddOutput("SavedVariance", "Variance of the current mini batch, " "will apply to output when training") - .AsIntermediate(); + .AsIntermediate() + .AsExtra(); AddComment(R"DOC( Instance Normalization. diff --git a/paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc index eb241b9157fecd8615797e276ab53f9f6812e21d..bd910e30672a47bab1452d59cef458d80cfcf56d 100644 --- a/paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc @@ -47,7 +47,9 @@ class BatchNormMKLDNNHandler : public platform::MKLDNNHandlerNoCachingT< mkldnn::batch_normalization_backward>( mkldnn_engine, ctx.GetPlace()) { const float epsilon = ctx.Attr("epsilon"); - const bool fuse_with_relu = ctx.Attr("fuse_with_relu"); + const bool fuse_with_relu = ctx.HasAttr("fuse_with_relu") + ? ctx.Attr("fuse_with_relu") + : false; std::vector DataLayout_error_msg = {"kNHWC", "kNCHW", "kAnyLayout", "kMKLDNN"}; diff --git a/paddle/fluid/operators/squeeze_op.cc b/paddle/fluid/operators/squeeze_op.cc index e13245797cff3e348666c21b89954a8eb61a28c4..cf24faf25db70fe931e110be451196fa81120e98 100644 --- a/paddle/fluid/operators/squeeze_op.cc +++ b/paddle/fluid/operators/squeeze_op.cc @@ -225,7 +225,7 @@ class Squeeze2Op : public framework::OperatorWithKernel { ctx->ShareLoD("X", "Out"); } - OP_INOUT_CHECK(ctx->HasOutput("XShape"), "Output", "XShape", "Squeeze2"); + if (!ctx->HasOutput("XShape")) return; std::vector xshape_dims(x_dims.size() + 1); xshape_dims[0] = 0; @@ -323,7 +323,8 @@ class Squeeze2OpMaker : public SqueezeOpMaker { AddOutput("XShape", "XShape is just used to store the shape and lod of X, which will " "be used in SqueezeGradOp.") - .AsIntermediate(); + .AsIntermediate() + .AsExtra(); } };