diff --git a/paddle/fluid/operators/fake_quantize_op.cc b/paddle/fluid/operators/fake_quantize_op.cc index 16a32a3f6cfb12e5e0674219dc5e532d7875199c..358f122c8359fa60f2c99492db8851c8a5fc5293 100644 --- a/paddle/fluid/operators/fake_quantize_op.cc +++ b/paddle/fluid/operators/fake_quantize_op.cc @@ -447,8 +447,6 @@ class MovingAverageAbsMaxScaleOp : public framework::OperatorWithKernel { void InferShape(framework::InferShapeContext* ctx) const override { OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "MovingAverageAbsMaxScale"); - OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", - "MovingAverageAbsMaxScale"); OP_INOUT_CHECK(ctx->HasOutput("OutScale"), "Output", "OutScale", "MovingAverageAbsMaxScale"); if (ctx->HasOutput("OutState")) { @@ -457,9 +455,7 @@ class MovingAverageAbsMaxScaleOp : public framework::OperatorWithKernel { if (ctx->HasOutput("OutAccum")) { ctx->SetOutputDim("OutAccum", {1}); } - ctx->SetOutputDim("Out", ctx->GetInputDim("X")); ctx->SetOutputDim("OutScale", {1}); - ctx->ShareLoD("X", /*->*/ "Out"); } protected: @@ -477,8 +473,6 @@ class MovingAverageAbsMaxScaleOpMaker AddInput("X", "(Tensor) Input is float data type."); AddInput("InAccum", "Last accum.").AsDispensable(); AddInput("InState", "Last state.").AsDispensable(); - AddOutput("Out", - "(Tensor) Output tensor is just equivalent to the input tensor."); AddOutput("OutScale", " Current scale"); AddOutput("OutState", "(Tensor) state buffer.").AsDispensable(); AddOutput("OutAccum", "(Tensor) accum buffer.").AsDispensable(); diff --git a/paddle/fluid/operators/fake_quantize_op.h b/paddle/fluid/operators/fake_quantize_op.h index fa5048852e7532d36c712b31109243bcce8abd33..4136217fb0c5f600971c1c04f803b65de9bbecb4 100644 --- a/paddle/fluid/operators/fake_quantize_op.h +++ b/paddle/fluid/operators/fake_quantize_op.h @@ -277,10 +277,7 @@ class MovingAverageAbsMaxScaleKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* in = context.Input("X"); - auto* out = context.Output("Out"); - out->mutable_data(context.GetPlace()); auto& dev_ctx = context.template device_context(); - framework::TensorCopy(*in, context.GetPlace(), dev_ctx, out); bool is_test = context.Attr("is_test"); // testing diff --git a/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py b/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py index c36cd1f74e6682050d230a176b815f1388619afd..51add2449fdebc57d0f8a95b238d7563b906db29 100644 --- a/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py +++ b/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py @@ -1449,7 +1449,6 @@ class OutScaleForTrainingPass(object): for op in target_ops: for output_var_name in _get_op_output_var_names(op): in_node = graph._find_node_by_name(op.outputs, output_var_name) - out_node = graph.create_var_node_from_desc(in_node.var()) scale_node = graph.create_persistable_node( name=self._scale_name(in_node.name()), var_type=core.VarDesc.VarType.LOD_TENSOR, @@ -1464,7 +1463,7 @@ class OutScaleForTrainingPass(object): self._scope, self._place) ins = {'X': in_node} - outs = {'Out': out_node, 'OutScale': scale_node} + outs = {'OutScale': scale_node} if not self._is_test: state_in_node = graph.create_persistable_node( name=unique_name.generate('scale_state@'), @@ -1509,7 +1508,6 @@ class OutScaleForTrainingPass(object): inputs=ins, outputs=outs) graph.link_to(in_node, scale_op_node) - graph.link_to(scale_op_node, out_node) graph.link_to(scale_op_node, scale_node) if not self._is_test: graph.link_to(state_in_node, scale_op_node) diff --git a/python/paddle/fluid/tests/unittests/test_fake_quantize_op.py b/python/paddle/fluid/tests/unittests/test_fake_quantize_op.py index 4314faaf397a2a53a65368ef6625952bc22c9616..1c8335e3bceab24cba9364a96f6907d2cf585fe0 100644 --- a/python/paddle/fluid/tests/unittests/test_fake_quantize_op.py +++ b/python/paddle/fluid/tests/unittests/test_fake_quantize_op.py @@ -146,7 +146,6 @@ class TestMovingAverageAbsMaxScaleOp(OpTest): out_state[0] = self.attrs['moving_rate'] * state[0] + 1 out_scale = out_accum / out_state self.outputs = { - 'Out': self.inputs['X'], 'OutAccum': out_accum, 'OutState': out_state, 'OutScale': out_scale,