未验证 提交 42189be6 编写于 作者: C cc 提交者: GitHub

[Quant] Remove the output for moving_average_abs_max_scale op (#25697)

* Remove the output for moving_average_abs_max_scale op, test=develop
上级 521e7015
......@@ -447,8 +447,6 @@ class MovingAverageAbsMaxScaleOp : public framework::OperatorWithKernel {
void InferShape(framework::InferShapeContext* ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X",
"MovingAverageAbsMaxScale");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out",
"MovingAverageAbsMaxScale");
OP_INOUT_CHECK(ctx->HasOutput("OutScale"), "Output", "OutScale",
"MovingAverageAbsMaxScale");
if (ctx->HasOutput("OutState")) {
......@@ -457,9 +455,7 @@ class MovingAverageAbsMaxScaleOp : public framework::OperatorWithKernel {
if (ctx->HasOutput("OutAccum")) {
ctx->SetOutputDim("OutAccum", {1});
}
ctx->SetOutputDim("Out", ctx->GetInputDim("X"));
ctx->SetOutputDim("OutScale", {1});
ctx->ShareLoD("X", /*->*/ "Out");
}
protected:
......@@ -477,8 +473,6 @@ class MovingAverageAbsMaxScaleOpMaker
AddInput("X", "(Tensor) Input is float data type.");
AddInput("InAccum", "Last accum.").AsDispensable();
AddInput("InState", "Last state.").AsDispensable();
AddOutput("Out",
"(Tensor) Output tensor is just equivalent to the input tensor.");
AddOutput("OutScale", " Current scale");
AddOutput("OutState", "(Tensor) state buffer.").AsDispensable();
AddOutput("OutAccum", "(Tensor) accum buffer.").AsDispensable();
......
......@@ -277,10 +277,7 @@ class MovingAverageAbsMaxScaleKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* in = context.Input<framework::Tensor>("X");
auto* out = context.Output<framework::Tensor>("Out");
out->mutable_data<T>(context.GetPlace());
auto& dev_ctx = context.template device_context<DeviceContext>();
framework::TensorCopy(*in, context.GetPlace(), dev_ctx, out);
bool is_test = context.Attr<bool>("is_test");
// testing
......
......@@ -1449,7 +1449,6 @@ class OutScaleForTrainingPass(object):
for op in target_ops:
for output_var_name in _get_op_output_var_names(op):
in_node = graph._find_node_by_name(op.outputs, output_var_name)
out_node = graph.create_var_node_from_desc(in_node.var())
scale_node = graph.create_persistable_node(
name=self._scale_name(in_node.name()),
var_type=core.VarDesc.VarType.LOD_TENSOR,
......@@ -1464,7 +1463,7 @@ class OutScaleForTrainingPass(object):
self._scope,
self._place)
ins = {'X': in_node}
outs = {'Out': out_node, 'OutScale': scale_node}
outs = {'OutScale': scale_node}
if not self._is_test:
state_in_node = graph.create_persistable_node(
name=unique_name.generate('scale_state@'),
......@@ -1509,7 +1508,6 @@ class OutScaleForTrainingPass(object):
inputs=ins,
outputs=outs)
graph.link_to(in_node, scale_op_node)
graph.link_to(scale_op_node, out_node)
graph.link_to(scale_op_node, scale_node)
if not self._is_test:
graph.link_to(state_in_node, scale_op_node)
......
......@@ -146,7 +146,6 @@ class TestMovingAverageAbsMaxScaleOp(OpTest):
out_state[0] = self.attrs['moving_rate'] * state[0] + 1
out_scale = out_accum / out_state
self.outputs = {
'Out': self.inputs['X'],
'OutAccum': out_accum,
'OutState': out_state,
'OutScale': out_scale,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册