diff --git a/paddle/fluid/operators/conv_op.cc b/paddle/fluid/operators/conv_op.cc index 2e48d109f20076bb6310b5596f855be5398cbb70..3677a68a7f59527655c5aa8c9b446d603d972d9a 100644 --- a/paddle/fluid/operators/conv_op.cc +++ b/paddle/fluid/operators/conv_op.cc @@ -303,6 +303,9 @@ void Conv3DOpMaker::Make() { "Defaults to \"NHWC\". Specify the data format of the output data, " "the input will be transformed automatically. ") .SetDefault("AnyLayout"); + AddAttr("force_fp32_output", + "(bool, default false) Only used in mkldnn INT8 kernel") + .SetDefault(false); // TODO(dzhwinter): need to registered layout transform function AddAttr("workspace_size_MB", "Only used in cudnn kernel. workspace size for cudnn, in MB, " diff --git a/paddle/fluid/operators/quantize_op.cc b/paddle/fluid/operators/quantize_op.cc index dc2d36c6b8c57b6e09c93ff2cfd859a6c337e2b3..3d2cd64fcdcc228dc9d7d1f357b7b41a62f65980 100644 --- a/paddle/fluid/operators/quantize_op.cc +++ b/paddle/fluid/operators/quantize_op.cc @@ -98,6 +98,9 @@ void QuantOpMaker::Make() { AddInput("Input","input data"); AddInput("Scale","scale data"); AddOutput("Output","output data"); + AddAttr("is_negative_input", + "(bool, default false) Only used in mkldnn INT8 kernel") + .SetDefault(false); AddComment(R"DOC(This op will quantize data from FP32 to INT8)DOC"); }