adam_op.h 3.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"

namespace paddle {
namespace operators {

Q
QI JUN 已提交
22
template <typename DeviceContext, typename T>
23 24 25 26 27 28 29 30 31 32 33
class AdamOpKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
    auto param_out_tensor = ctx.Output<framework::Tensor>("ParamOut");
    auto moment1_out_tensor = ctx.Output<framework::Tensor>("Moment1Out");
    auto moment2_out_tensor = ctx.Output<framework::Tensor>("Moment2Out");

    param_out_tensor->mutable_data<T>(ctx.GetPlace());
    moment1_out_tensor->mutable_data<T>(ctx.GetPlace());
    moment2_out_tensor->mutable_data<T>(ctx.GetPlace());

34 35 36
    T beta1 = static_cast<T>(ctx.Attr<float>("beta1"));
    T beta2 = static_cast<T>(ctx.Attr<float>("beta2"));
    T epsilon = static_cast<T>(ctx.Attr<float>("epsilon"));
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54

    auto param = framework::EigenVector<T>::Flatten(
        *ctx.Input<framework::Tensor>("Param"));
    auto grad = framework::EigenVector<T>::Flatten(
        *ctx.Input<framework::Tensor>("Grad"));
    auto moment1 = framework::EigenVector<T>::Flatten(
        *ctx.Input<framework::Tensor>("Moment1"));
    auto moment2 = framework::EigenVector<T>::Flatten(
        *ctx.Input<framework::Tensor>("Moment2"));
    auto lr = framework::EigenVector<T>::Flatten(
        *ctx.Input<framework::Tensor>("LearningRate"));
    auto beta1_pow = framework::EigenVector<T>::Flatten(
        *ctx.Input<framework::Tensor>("Beta1Pow"));
    auto beta2_pow = framework::EigenVector<T>::Flatten(
        *ctx.Input<framework::Tensor>("Beta2Pow"));
    auto param_out = framework::EigenVector<T>::Flatten(*param_out_tensor);
    auto moment1_out = framework::EigenVector<T>::Flatten(*moment1_out_tensor);
    auto moment2_out = framework::EigenVector<T>::Flatten(*moment2_out_tensor);
Q
QI JUN 已提交
55
    auto* place = ctx.template device_context<DeviceContext>().eigen_device();
56

Q
QI JUN 已提交
57 58
    moment1_out.device(*place) = beta1 * moment1 + (1 - beta1) * grad;
    moment2_out.device(*place) = beta2 * moment2 + (1 - beta2) * grad.square();
59

60
    // All of these are tensors of 1 element
61
    auto lr_t = lr * (1 - beta2_pow).sqrt() / (1 - beta1_pow);
62 63 64
    // Eigen does not support automatic broadcast
    // Get dimensions of moment vector to broadcast lr_t
    Eigen::DSizes<int, 1> m_dsize(moment1_out_tensor->numel());
Q
QI JUN 已提交
65
    param_out.device(*place) =
66 67 68 69 70 71 72 73
        param -
        lr_t.broadcast(m_dsize) *
            (moment1_out / (moment2_out.sqrt() + epsilon));
  }
};

}  // namespace operators
}  // namespace paddle