adamax_op.cc 3.5 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15 16 17 18
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/multiary.h"
19 20 21 22 23 24 25 26

namespace paddle {
namespace operators {

class AdamaxOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

D
dzhwinter 已提交
27 28
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
29 30
    return framework::OpKernelType(
        OperatorWithKernel::IndicateVarDataType(ctx, "Param"), ctx.GetPlace());
D
dzhwinter 已提交
31
  }
32 33 34 35
};

class AdamaxOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
36
  void Make() override {
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
    AddInput("Param", "(Tensor) Input parameter");
    AddInput("Grad", "(Tensor) Input gradient");
    AddInput("LearningRate", "(Tensor) Learning rate");
    AddInput("Moment", "(Tensor) First moment");
    AddInput("InfNorm",
             "(Tensor) "
             "Input exponentially weighted infinity norm");
    AddInput("Beta1Pow", "(Tensor) Input beta1 power accumulator");

    AddOutput("ParamOut", "(Tensor) Output parameter");
    AddOutput("MomentOut", "(Tensor) Output first moment");
    AddOutput("InfNormOut",
              "(Tensor) "
              "Output exponentially weighted infinity norm");

    AddAttr<float>("beta1",
                   "(float, default 0.9) "
                   "Exponential decay rate for the "
                   "1st moment estimates.")
        .SetDefault(0.9f);
    AddAttr<float>("beta2",
                   "(float, default 0.999) "
                   "exponential decay rate for the weighted "
                   "infinity norm estimates.")
        .SetDefault(0.999f);
    AddAttr<float>("epsilon",
                   "(float, default 1.0e-8) "
                   "Constant for numerical stability")
        .SetDefault(1.0e-8f);
    AddComment(R"DOC(
67
Adamax Optimizer.
68

69 70
We implement the Adamax optimizer from Section 7 of the Adam
paper: https://arxiv.org/abs/1412.6980. Adamax is a variant of the
71 72 73 74
Adam algorithm based on the infinity norm.

Adamax updates:

75
$$
76 77 78 79
moment\_out = \beta_1 * moment + (1 - \beta_1) * grad \\
inf\_norm\_out = max(\beta_2 * inf\_norm + \epsilon, |grad|) \\
learning\_rate = \frac{learning\_rate}{1 - \beta_{1\_pow}} \\
param\_out = param - learning\_rate * \frac{moment\_out}{inf\_norm\_out}
80
$$
81 82

The original paper does not have an epsilon attribute.
83 84
However, it is added here for numerical stability to prevent the
division by 0 error.
85 86 87 88 89 90 91 92 93

)DOC");
  }
};

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
94 95
DECLARE_INFER_SHAPE_FUNCTOR(adamax,
                            AdamaxInferMetaFunctor,
A
Aurelius84 已提交
96
                            PD_INFER_META(phi::AdamaxInferMeta));
97 98

REGISTER_OPERATOR(
99 100 101
    adamax,
    ops::AdamaxOp,
    ops::AdamaxOpMaker,
102 103 104
    paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>,
    paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>,
    AdamaxInferMetaFunctor);