proximal_adagrad_op.cc 5.0 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

W
Wu Yi 已提交
15
#include "paddle/fluid/operators/optimizers/proximal_adagrad_op.h"
16 17 18 19

namespace paddle {
namespace operators {

D
dzhwinter 已提交
20
using Tensor = framework::Tensor;
21 22 23 24 25 26
class ProximalAdagradOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

 protected:
  void InferShape(framework::InferShapeContext *ctx) const override {
27 28 29 30 31 32 33 34 35 36 37 38
    OP_INOUT_CHECK(ctx->HasInput("Param"), "Input", "Param",
                   "ProximalAdagradOp");
    OP_INOUT_CHECK(ctx->HasInput("Moment"), "Input", "Moment",
                   "ProximalAdagradOp");
    OP_INOUT_CHECK(ctx->HasInput("Grad"), "Input", "Grad", "ProximalAdagradOp");
    OP_INOUT_CHECK(ctx->HasInput("LearningRate"), "Input", "LearningRate",
                   "ProximalAdagradOp");

    OP_INOUT_CHECK(ctx->HasOutput("ParamOut"), "Output", "ParamOut",
                   "ProximalAdagradOp");
    OP_INOUT_CHECK(ctx->HasOutput("MomentOut"), "Output", "MomentOut",
                   "ProximalAdagradOp");
39 40

    auto param_dim = ctx->GetInputDim("Param");
41 42 43 44 45 46 47
    PADDLE_ENFORCE_EQ(param_dim, ctx->GetInputDim("Grad"),
                      platform::errors::InvalidArgument(
                          "The shape of Intput(Param) should be equal to the "
                          "Input(Grad) of ProximalAdagrad Op. But received "
                          "Input(Param).dimensions=[%s], "
                          "Input(Grad).dimensions=[%s]",
                          param_dim, ctx->GetInputDim("Grad")));
48

49 50 51 52 53 54 55
    PADDLE_ENFORCE_EQ(param_dim, ctx->GetInputDim("Moment"),
                      platform::errors::InvalidArgument(
                          "The shape of Intput(Param) should be equal to the "
                          "Input(Moment) of ProximalAdagrad Op. But received "
                          "Input(Param).dimensions=[%s], "
                          "Input(Moment).dimensions=[%s]",
                          param_dim, ctx->GetInputDim("Moment")));
56 57

    auto lr_dim = ctx->GetInputDim("LearningRate");
58 59 60 61 62
    PADDLE_ENFORCE_EQ(
        framework::product(lr_dim), 1,
        platform::errors::InvalidArgument(
            "Learning Rate should be a scalar. But received dimension[%s]",
            lr_dim));
63 64 65 66

    ctx->SetOutputDim("ParamOut", param_dim);
    ctx->SetOutputDim("MomentOut", param_dim);
  }
D
dzhwinter 已提交
67 68
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
69 70
    return framework::OpKernelType(
        OperatorWithKernel::IndicateVarDataType(ctx, "Param"), ctx.GetPlace());
D
dzhwinter 已提交
71
  }
72 73 74 75
};

class ProximalAdagradOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
76
  void Make() override {
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
    AddInput("Param",
             "(Tensor, default Tensor<float>) "
             "Input parameter that has to be updated.");
    AddInput("Moment",
             "(Tensor, default Tensor<float>) "
             "Moment parameter that has to be updated.");
    AddInput("Grad",
             "(Tensor, default Tensor<float>) "
             "Input gradient of the parameter.");
    AddInput("LearningRate",
             "(Tensor, default Tensor<float>) "
             "The learning rate should be a tensor of size 1.");

    AddOutput("ParamOut", "(Tensor) Output updated parameter value.");
    AddOutput("MomentOut", "(Tensor) Output updated moment value.");

    AddAttr<float>("l1",
                   "(float, default 0.0) "
                   "L1 regularization strength.")
        .SetDefault(0.0f);
    AddAttr<float>("l2",
K
kexinzhao 已提交
98
                   "(float, default 0.0) "
99 100 101
                   "L2 regularization strength.")
        .SetDefault(0.0f);
    AddComment(R"DOC(
K
kexinzhao 已提交
102
Proximal Adagrad Optimizer.
103

K
kexinzhao 已提交
104
Optimizer that implements the proximal adagrad algorithm:
105

K
kexinzhao 已提交
106 107 108 109 110 111
$$
moment = moment + grad * grad \\
prox\_param = param - learning\_rate * grad * (1 / \sqrt{moment}) \\
param = sign(prox\_param) / (1 + learning\_rate * l2) *
        \max(|prox\_param| - learning\_rate * l1 , 0)
$$
112 113 114 115 116

The paper that proposed Proximal GD: 
(http://papers.nips.cc/paper/3793-efficient-learning-using-forward-backward-splitting.pdf)
Here, we use the adagrad learning rate as specified here: 
(http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)
K
kexinzhao 已提交
117

118 119 120 121 122 123 124 125 126 127 128
)DOC");
  }
};
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
REGISTER_OP_WITHOUT_GRADIENT(proximal_adagrad, ops::ProximalAdagradOp,
                             ops::ProximalAdagradOpMaker);
REGISTER_OP_CPU_KERNEL(
    proximal_adagrad,
Q
QI JUN 已提交
129
    ops::ProximalAdagradOpKernel<paddle::platform::CPUDeviceContext, float>);