From 3dab2e20eae3a0637991eb694111859c155064c0 Mon Sep 17 00:00:00 2001 From: Zhong Hui Date: Wed, 8 Sep 2021 10:37:08 +0800 Subject: [PATCH] Add op define extra for norm and frobenius norm op. (#35329) --- paddle/fluid/operators/norm_op.cc | 18 +++++++++---- paddle/fluid/operators/norm_op.cu | 25 ++++++++++++----- paddle/fluid/operators/norm_op.h | 19 ++++++++++--- paddle/fluid/operators/reduce_ops/reduce_op.h | 3 ++- .../fluid/tests/unittests/test_norm_op.py | 27 +++++++++++++++++++ 5 files changed, 77 insertions(+), 15 deletions(-) diff --git a/paddle/fluid/operators/norm_op.cc b/paddle/fluid/operators/norm_op.cc index 5880141520..f6dbe10023 100644 --- a/paddle/fluid/operators/norm_op.cc +++ b/paddle/fluid/operators/norm_op.cc @@ -35,7 +35,12 @@ class NormOpMaker : public framework::OpProtoAndCheckerMaker { AddOutput("Norm", "(Tensor) A tensor saved the `sqrt(sum(x) + epsion)` will " "be used in backward kernel.") - .AsIntermediate(); + .AsIntermediate() + .AsExtra(); + AddAttr("is_test", + "(bool, default false) Set to true for inference only, false " + "for training.") + .SetDefault(false); AddOutput("Out", "(Tensor) A tensor of the same shape as X."); AddComment(R"DOC( @@ -59,10 +64,13 @@ class NormOp : public framework::OperatorWithKernel { OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "NormOp"); auto xdim = ctx->GetInputDim("X"); ctx->SetOutputDim("Out", xdim); - int axis = ctx->Attrs().Get("axis"); - if (axis < 0) axis = xdim.size() + axis; - xdim[axis] = 1; - ctx->SetOutputDim("Norm", xdim); + + if (ctx->Attrs().Get("is_test") == false) { + int axis = ctx->Attrs().Get("axis"); + if (axis < 0) axis = xdim.size() + axis; + xdim[axis] = 1; + ctx->SetOutputDim("Norm", xdim); + } } }; diff --git a/paddle/fluid/operators/norm_op.cu b/paddle/fluid/operators/norm_op.cu index 4c1674ded1..e2a56cb742 100644 --- a/paddle/fluid/operators/norm_op.cu +++ b/paddle/fluid/operators/norm_op.cu @@ -65,16 +65,29 @@ class NormCUDAKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& ctx) const override { auto* in_x = ctx.Input("X"); auto* out_y = ctx.Output("Out"); - auto* out_norm = ctx.Output("Norm"); - const T* x = in_x->data(); - T* y = out_y->mutable_data(ctx.GetPlace()); - T* norm = out_norm->mutable_data(ctx.GetPlace()); auto xdim = in_x->dims(); - auto ndim = out_norm->dims(); int axis = ctx.Attr("axis"); - T eps = static_cast(ctx.Attr("epsilon")); if (axis < 0) axis = xdim.size() + axis; + T eps = static_cast(ctx.Attr("epsilon")); + + bool is_test = ctx.Attr("is_test"); + + framework::Tensor* out_norm; + framework::Tensor out_norm_tmp; + if (is_test) { + auto out_dim = in_x->dims(); + out_dim[axis] = 1; + out_norm = &out_norm_tmp; + out_norm->Resize(out_dim); + } else { + out_norm = ctx.Output("Norm"); + } + + const T* x = in_x->data(); + T* y = out_y->mutable_data(ctx.GetPlace()); + T* norm = out_norm->mutable_data(ctx.GetPlace()); + int pre, n, post; GetDims(xdim, axis, &pre, &n, &post); diff --git a/paddle/fluid/operators/norm_op.h b/paddle/fluid/operators/norm_op.h index f81cbc2c73..058c523625 100644 --- a/paddle/fluid/operators/norm_op.h +++ b/paddle/fluid/operators/norm_op.h @@ -38,9 +38,6 @@ class NormKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& ctx) const override { auto* in_x = ctx.Input("X"); auto* out_y = ctx.Output("Out"); - auto* out_norm = ctx.Output("Norm"); - out_y->mutable_data(ctx.GetPlace()); - out_norm->mutable_data(ctx.GetPlace()); auto xdim = in_x->dims(); T eps = static_cast(ctx.Attr("epsilon")); @@ -49,6 +46,22 @@ class NormKernel : public framework::OpKernel { int pre, n, post; GetDims(xdim, axis, &pre, &n, &post); + bool is_test = ctx.Attr("is_test"); + + framework::Tensor* out_norm; + framework::Tensor out_norm_tmp; + if (is_test) { + auto out_dim = in_x->dims(); + out_dim[axis] = 1; + out_norm = &out_norm_tmp; + out_norm->Resize(out_dim); + } else { + out_norm = ctx.Output("Norm"); + } + + out_y->mutable_data(ctx.GetPlace()); + out_norm->mutable_data(ctx.GetPlace()); + auto* place = ctx.template device_context().eigen_device(); Eigen::DSizes shape(pre, n, post); diff --git a/paddle/fluid/operators/reduce_ops/reduce_op.h b/paddle/fluid/operators/reduce_ops/reduce_op.h index af01b71adb..6ed4475d1c 100644 --- a/paddle/fluid/operators/reduce_ops/reduce_op.h +++ b/paddle/fluid/operators/reduce_ops/reduce_op.h @@ -645,7 +645,8 @@ class ReduceOpMaker : public framework::OpProtoAndCheckerMaker { .SetDefault(-1); AddAttr("use_mkldnn", "(bool, default false) Only used in mkldnn kernel") - .SetDefault(false); + .SetDefault(false) + .AsExtra(); AddComment(string::Sprintf(R"DOC( %s Operator. diff --git a/python/paddle/fluid/tests/unittests/test_norm_op.py b/python/paddle/fluid/tests/unittests/test_norm_op.py index 8a3632530c..13e5773e98 100644 --- a/python/paddle/fluid/tests/unittests/test_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_norm_op.py @@ -89,6 +89,33 @@ class TestNormOp5(TestNormOp): pass +@skip_check_grad_ci(reason="skip check grad for test mode.") +class TestNormTestOp(OpTest): + def setUp(self): + self.op_type = "norm" + self.init_test_case() + x = np.random.random(self.shape).astype("float64") + y, norm = l2_norm(x, self.axis, self.epsilon) + self.inputs = {'X': x} + self.attrs = { + 'epsilon': self.epsilon, + 'axis': self.axis, + 'is_test': True + } + self.outputs = {'Out': y} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + pass + + def init_test_case(self): + self.shape = [2, 3, 4, 5] + self.axis = 1 + self.epsilon = 1e-8 + + class API_NormTest(unittest.TestCase): def test_errors(self): with fluid.program_guard(fluid.Program()): -- GitLab