From 34d68f24fc5890341a47a124aaa7ed76fc5c12c1 Mon Sep 17 00:00:00 2001 From: wwhu Date: Fri, 3 Nov 2017 15:24:34 +0800 Subject: [PATCH] fix doc and code style --- paddle/operators/clip_by_norm_op.cc | 33 ++++--------------- paddle/operators/clip_by_norm_op.cu | 5 ++- paddle/operators/clip_by_norm_op.h | 3 -- .../framework/tests/test_clip_by_norm_op.py | 8 ++--- 4 files changed, 12 insertions(+), 37 deletions(-) diff --git a/paddle/operators/clip_by_norm_op.cc b/paddle/operators/clip_by_norm_op.cc index 440542d331..b0ca53b525 100644 --- a/paddle/operators/clip_by_norm_op.cc +++ b/paddle/operators/clip_by_norm_op.cc @@ -39,15 +39,14 @@ template class ClipByNormOpMaker : public framework::OpProtoAndCheckerMaker { public: ClipByNormOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", - "(Tensor)The input of clip_by_norm op." + "(Tensor) The input of clip_by_norm op." "The number of dimensions must be between [1, 9]."); AddOutput("Out", - "(Tensor)The output of clip_by_norm op with shape as input(X)"); - AddAttr( - "max_norm", "(float)The maximum norm value."); + "(Tensor) The output of clip_by_norm op with shape as input(X)"); + AddAttr("max_norm", "(float)The maximum norm value."); AddComment(R"DOC( ClipByNorm operator limits the L2 norm of the input 'X' within 'max_norm'. If the L2 norm of 'X' is less than or equal to 'max_norm', 'Out' will be @@ -62,29 +61,11 @@ where norm('X') represents the L2 norm of 'X'. } }; -class ClipByNormOpGrad : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - protected: - void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), - "Input(Out@GRAD) should not be null"); - auto x_dims = ctx->GetInputDim("X"); - if (ctx->HasOutput(framework::GradVarName("X"))) { - ctx->SetOutputDim(framework::GradVarName("X"), x_dims); - } - } -}; - } // namespace operators } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_WITHOUT_GRADIENT(clip_by_norm, - ops::ClipByNormOp, +REGISTER_OP_WITHOUT_GRADIENT(clip_by_norm, ops::ClipByNormOp, ops::ClipByNormOpMaker); -REGISTER_OP_CPU_KERNEL(clip_by_norm, - ops::ClipByNormKernel - ); +REGISTER_OP_CPU_KERNEL( + clip_by_norm, ops::ClipByNormKernel); diff --git a/paddle/operators/clip_by_norm_op.cu b/paddle/operators/clip_by_norm_op.cu index 5f363b999f..2593a24ebb 100644 --- a/paddle/operators/clip_by_norm_op.cu +++ b/paddle/operators/clip_by_norm_op.cu @@ -15,6 +15,5 @@ #include "paddle/operators/clip_by_norm_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(clip_by_norm, - ops::ClipByNormKernel - ); +REGISTER_OP_GPU_KERNEL( + clip_by_norm, ops::ClipByNormKernel); diff --git a/paddle/operators/clip_by_norm_op.h b/paddle/operators/clip_by_norm_op.h index 6f5f8c20bf..b26476cae9 100644 --- a/paddle/operators/clip_by_norm_op.h +++ b/paddle/operators/clip_by_norm_op.h @@ -25,9 +25,6 @@ using Tensor = framework::Tensor; template using EigenVector = framework::EigenVector; -template -using EigenScalar = framework::EigenScalar; template class ClipByNormKernel : public framework::OpKernel { diff --git a/python/paddle/v2/framework/tests/test_clip_by_norm_op.py b/python/paddle/v2/framework/tests/test_clip_by_norm_op.py index bf4f1a794c..02f6108a3a 100644 --- a/python/paddle/v2/framework/tests/test_clip_by_norm_op.py +++ b/python/paddle/v2/framework/tests/test_clip_by_norm_op.py @@ -18,21 +18,19 @@ class TestClipByNormOp(OpTest): output = self.max_norm * input / norm else: output = input - self.outputs = { - 'Out': output - } + self.outputs = {'Out': output} def test_check_output(self): self.check_output() def initTestCase(self): - self.shape = (100,) + self.shape = (100, ) self.max_norm = 1.0 class TestCase1(TestClipByNormOp): def initTestCase(self): - self.shape = (100,) + self.shape = (100, ) self.max_norm = 1e20 -- GitLab