提交 02fe70ed 编写于 作者: X xiaolil1

remove fp32 attr set_ops, modify conv_op log

上级 d5167b48
...@@ -768,7 +768,6 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> { ...@@ -768,7 +768,6 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
// the scale parameter. It is assumed that when fuse_residual_conn is true, the // the scale parameter. It is assumed that when fuse_residual_conn is true, the
// Output tensor contains the data coming from residual connection. The // Output tensor contains the data coming from residual connection. The
// result of this post_op is: Output = scale * Output + Conv_Out. // result of this post_op is: Output = scale * Output + Conv_Out.
conv_attr.set_output_scales(0, {1.0f});
if (fuse_residual_conn) { if (fuse_residual_conn) {
post_operations.append_sum(1.0f); post_operations.append_sum(1.0f);
} }
......
...@@ -129,7 +129,8 @@ void Conv2DOpMaker::Make() { ...@@ -129,7 +129,8 @@ void Conv2DOpMaker::Make() {
"to the number of output channels. Only used with MKL-DNN.") "to the number of output channels. Only used with MKL-DNN.")
.AsDispensable(); .AsDispensable();
AddInput("Scale_in", AddInput("Scale_in",
"(Tensor) Scale_in to be used for int8 input data. Only used with INT8.") "(Tensor) Scale_in to be used for int8 input data."
"Only used with INT8.")
.AsDispensable(); .AsDispensable();
AddInput("Scale_in_eltwise", AddInput("Scale_in_eltwise",
"(Tensor) Scale_in_eltwise to be used for int8 eltwise input data." "(Tensor) Scale_in_eltwise to be used for int8 eltwise input data."
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册