clip_by_norm_op.cc 2.7 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
W
wwhu 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
W
wwhu 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
W
wwhu 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
W
wwhu 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/clip_by_norm_op.h"
W
wwhu 已提交
16 17 18 19 20 21 22 23 24 25 26 27 28 29

namespace paddle {
namespace operators {

class ClipByNormOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

 protected:
  void InferShape(framework::InferShapeContext* ctx) const override {
    PADDLE_ENFORCE(ctx->HasInput("X"),
                   "Input(X) of ClipByNormOp should not be null.");
    PADDLE_ENFORCE(ctx->HasOutput("Out"),
                   "Output(Out) of ClipByNormOp should not be null.");
W
fix CI  
wwhu 已提交
30
    auto max_norm = ctx->Attrs().Get<float>("max_norm");
W
wwhu 已提交
31 32 33 34 35 36 37 38 39
    PADDLE_ENFORCE_GT(max_norm, 0, "max_norm should be greater than 0.");
    auto x_dims = ctx->GetInputDim("X");
    ctx->SetOutputDim("Out", x_dims);
    ctx->ShareLoD("X", /*->*/ "Out");
  }
};

class ClipByNormOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
40
  void Make() override {
W
wwhu 已提交
41
    AddInput("X",
W
wwhu 已提交
42
             "(Tensor) The input of clip_by_norm op."
W
wwhu 已提交
43 44
             "The number of dimensions must be between [1, 9].");
    AddOutput("Out",
W
wwhu 已提交
45
              "(Tensor) The output of clip_by_norm op with shape as input(X)");
W
fix CI  
wwhu 已提交
46
    AddAttr<float>("max_norm", "(float) The maximum norm value.");
W
wwhu 已提交
47
    AddComment(R"DOC(
48
ClipByNorm Operator.
W
wwhu 已提交
49

50 51 52 53 54
This operator limits the L2 norm of the input $X$ within $max\_norm$.
If the L2 norm of $X$ is less than or equal to $max\_norm$, $Out$ will be
the same as $X$. If the L2 norm of $X$ is greater than $max\_norm$, $X$ will
be linearly scaled to make the L2 norm of $Out$ equal to $max\_norm$, as
shown in the following formula:
W
wwhu 已提交
55

56
$$
D
dzhwinter 已提交
57
Out = \\frac{max\\_norm * X}{norm(X)},
58 59 60
$$

where $norm(X)$ represents the L2 norm of $X$.
D
dzhwinter 已提交
61 62 63 64 65 66 67 68 69

Examples:
        .. code-block:: python

            data = fluid.layer.data(
                name='data', shape=[2, 4, 6], dtype='float32')
            reshaped = fluid.layers.clip_by_norm(
                x=data, max_norm=0.5)

W
wwhu 已提交
70 71 72 73 74 75 76 77
)DOC");
  }
};

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
W
wwhu 已提交
78
REGISTER_OP_WITHOUT_GRADIENT(clip_by_norm, ops::ClipByNormOp,
W
fix CI  
wwhu 已提交
79
                             ops::ClipByNormOpMaker);
W
wwhu 已提交
80
REGISTER_OP_CPU_KERNEL(
Q
QI JUN 已提交
81 82
    clip_by_norm,
    ops::ClipByNormKernel<paddle::platform::CPUDeviceContext, float>);