未验证 提交 42189be6 编写于 作者: C cc 提交者: GitHub

[Quant] Remove the output for moving_average_abs_max_scale op (#25697)

* Remove the output for moving_average_abs_max_scale op, test=develop
上级 521e7015
...@@ -447,8 +447,6 @@ class MovingAverageAbsMaxScaleOp : public framework::OperatorWithKernel { ...@@ -447,8 +447,6 @@ class MovingAverageAbsMaxScaleOp : public framework::OperatorWithKernel {
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X",
"MovingAverageAbsMaxScale"); "MovingAverageAbsMaxScale");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out",
"MovingAverageAbsMaxScale");
OP_INOUT_CHECK(ctx->HasOutput("OutScale"), "Output", "OutScale", OP_INOUT_CHECK(ctx->HasOutput("OutScale"), "Output", "OutScale",
"MovingAverageAbsMaxScale"); "MovingAverageAbsMaxScale");
if (ctx->HasOutput("OutState")) { if (ctx->HasOutput("OutState")) {
...@@ -457,9 +455,7 @@ class MovingAverageAbsMaxScaleOp : public framework::OperatorWithKernel { ...@@ -457,9 +455,7 @@ class MovingAverageAbsMaxScaleOp : public framework::OperatorWithKernel {
if (ctx->HasOutput("OutAccum")) { if (ctx->HasOutput("OutAccum")) {
ctx->SetOutputDim("OutAccum", {1}); ctx->SetOutputDim("OutAccum", {1});
} }
ctx->SetOutputDim("Out", ctx->GetInputDim("X"));
ctx->SetOutputDim("OutScale", {1}); ctx->SetOutputDim("OutScale", {1});
ctx->ShareLoD("X", /*->*/ "Out");
} }
protected: protected:
...@@ -477,8 +473,6 @@ class MovingAverageAbsMaxScaleOpMaker ...@@ -477,8 +473,6 @@ class MovingAverageAbsMaxScaleOpMaker
AddInput("X", "(Tensor) Input is float data type."); AddInput("X", "(Tensor) Input is float data type.");
AddInput("InAccum", "Last accum.").AsDispensable(); AddInput("InAccum", "Last accum.").AsDispensable();
AddInput("InState", "Last state.").AsDispensable(); AddInput("InState", "Last state.").AsDispensable();
AddOutput("Out",
"(Tensor) Output tensor is just equivalent to the input tensor.");
AddOutput("OutScale", " Current scale"); AddOutput("OutScale", " Current scale");
AddOutput("OutState", "(Tensor) state buffer.").AsDispensable(); AddOutput("OutState", "(Tensor) state buffer.").AsDispensable();
AddOutput("OutAccum", "(Tensor) accum buffer.").AsDispensable(); AddOutput("OutAccum", "(Tensor) accum buffer.").AsDispensable();
......
...@@ -277,10 +277,7 @@ class MovingAverageAbsMaxScaleKernel : public framework::OpKernel<T> { ...@@ -277,10 +277,7 @@ class MovingAverageAbsMaxScaleKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto* in = context.Input<framework::Tensor>("X"); auto* in = context.Input<framework::Tensor>("X");
auto* out = context.Output<framework::Tensor>("Out");
out->mutable_data<T>(context.GetPlace());
auto& dev_ctx = context.template device_context<DeviceContext>(); auto& dev_ctx = context.template device_context<DeviceContext>();
framework::TensorCopy(*in, context.GetPlace(), dev_ctx, out);
bool is_test = context.Attr<bool>("is_test"); bool is_test = context.Attr<bool>("is_test");
// testing // testing
......
...@@ -1449,7 +1449,6 @@ class OutScaleForTrainingPass(object): ...@@ -1449,7 +1449,6 @@ class OutScaleForTrainingPass(object):
for op in target_ops: for op in target_ops:
for output_var_name in _get_op_output_var_names(op): for output_var_name in _get_op_output_var_names(op):
in_node = graph._find_node_by_name(op.outputs, output_var_name) in_node = graph._find_node_by_name(op.outputs, output_var_name)
out_node = graph.create_var_node_from_desc(in_node.var())
scale_node = graph.create_persistable_node( scale_node = graph.create_persistable_node(
name=self._scale_name(in_node.name()), name=self._scale_name(in_node.name()),
var_type=core.VarDesc.VarType.LOD_TENSOR, var_type=core.VarDesc.VarType.LOD_TENSOR,
...@@ -1464,7 +1463,7 @@ class OutScaleForTrainingPass(object): ...@@ -1464,7 +1463,7 @@ class OutScaleForTrainingPass(object):
self._scope, self._scope,
self._place) self._place)
ins = {'X': in_node} ins = {'X': in_node}
outs = {'Out': out_node, 'OutScale': scale_node} outs = {'OutScale': scale_node}
if not self._is_test: if not self._is_test:
state_in_node = graph.create_persistable_node( state_in_node = graph.create_persistable_node(
name=unique_name.generate('scale_state@'), name=unique_name.generate('scale_state@'),
...@@ -1509,7 +1508,6 @@ class OutScaleForTrainingPass(object): ...@@ -1509,7 +1508,6 @@ class OutScaleForTrainingPass(object):
inputs=ins, inputs=ins,
outputs=outs) outputs=outs)
graph.link_to(in_node, scale_op_node) graph.link_to(in_node, scale_op_node)
graph.link_to(scale_op_node, out_node)
graph.link_to(scale_op_node, scale_node) graph.link_to(scale_op_node, scale_node)
if not self._is_test: if not self._is_test:
graph.link_to(state_in_node, scale_op_node) graph.link_to(state_in_node, scale_op_node)
......
...@@ -146,7 +146,6 @@ class TestMovingAverageAbsMaxScaleOp(OpTest): ...@@ -146,7 +146,6 @@ class TestMovingAverageAbsMaxScaleOp(OpTest):
out_state[0] = self.attrs['moving_rate'] * state[0] + 1 out_state[0] = self.attrs['moving_rate'] * state[0] + 1
out_scale = out_accum / out_state out_scale = out_accum / out_state
self.outputs = { self.outputs = {
'Out': self.inputs['X'],
'OutAccum': out_accum, 'OutAccum': out_accum,
'OutState': out_state, 'OutState': out_state,
'OutScale': out_scale, 'OutScale': out_scale,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册