adam_op.h 10.8 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once
Y
Yang Yu 已提交
16
#include <math.h>  // for sqrt in CPU and CUDA
17
#include <Eigen/Dense>
Y
Yi Wang 已提交
18 19 20 21
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/detail/safe_ref.h"
#include "paddle/fluid/operators/math/selected_rows_functor.h"
#include "paddle/fluid/platform/for_range.h"
22 23 24 25

namespace paddle {
namespace operators {

T
wip  
typhoonzero 已提交
26 27
namespace scatter = paddle::operators::math::scatter;

28 29 30 31 32 33
struct GPUAdam;
struct CPUAdam;

template <typename T, typename Flavour>
struct AdamFunctor;

Y
Yang Yu 已提交
34
template <typename T>
35
struct AdamFunctor<T, GPUAdam> {
Y
Yang Yu 已提交
36 37 38 39 40 41 42 43 44 45 46 47 48
  T beta1_;
  T beta2_;
  T epsilon_;

  const T* beta1_pow_;
  const T* beta2_pow_;
  const T* moment1_;
  T* moment1_out_;
  const T* moment2_;
  T* moment2_out_;
  const T* lr_;
  const T* grad_;
  const T* param_;
Y
Yang Yu 已提交
49
  T* param_out_;
Y
Yang Yu 已提交
50 51 52

  AdamFunctor(T beta1, T beta2, T epsilon, const T* beta1_pow,
              const T* beta2_pow, const T* mom1, T* mom1_out, const T* mom2,
Y
Yang Yu 已提交
53 54
              T* mom2_out, const T* lr, const T* grad, const T* param,
              T* param_out)
Y
Yang Yu 已提交
55 56 57 58 59 60 61 62 63 64 65
      : beta1_(beta1),
        beta2_(beta2),
        epsilon_(epsilon),
        beta1_pow_(beta1_pow),
        beta2_pow_(beta2_pow),
        moment1_(mom1),
        moment1_out_(mom1_out),
        moment2_(mom2),
        moment2_out_(mom2_out),
        lr_(lr),
        grad_(grad),
Y
Yang Yu 已提交
66 67
        param_(param),
        param_out_(param_out) {}
Y
Yang Yu 已提交
68

Y
Yang Yu 已提交
69
  inline HOSTDEVICE void operator()(size_t i) const {
Y
Yang Yu 已提交
70 71 72 73 74 75 76
    // Merge all memory access together.
    T g = grad_[i];
    T mom1 = moment1_[i];
    T mom2 = moment2_[i];
    T lr = *lr_;
    T beta1_pow = *beta1_pow_;
    T beta2_pow = *beta2_pow_;
Y
Yang Yu 已提交
77
    T p = param_[i];
Y
Yang Yu 已提交
78 79

    // Calculation
Y
Yang Yu 已提交
80
    lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow);
81

Y
Yang Yu 已提交
82 83
    mom1 = beta1_ * mom1 + (1 - beta1_) * g;
    mom2 = beta2_ * mom2 + (1 - beta2_) * g * g;
Y
Yang Yu 已提交
84
    p -= lr * (mom1 / (sqrt(mom2) + epsilon_));
Y
Yang Yu 已提交
85 86 87 88

    // Write back to global memory
    moment1_out_[i] = mom1;
    moment2_out_[i] = mom2;
Y
Yang Yu 已提交
89
    param_out_[i] = p;
Y
Yang Yu 已提交
90 91 92
  }
};

93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
template <typename T>
struct AdamFunctor<T, CPUAdam> {
  T beta1_;
  T beta2_;
  T epsilon_;

  const T* beta1_pow_;
  const T* beta2_pow_;
  const T* moment1_;
  T* moment1_out_;
  const T* moment2_;
  T* moment2_out_;
  const T* lr_;
  const T* grad_;
  const T* param_;
  T* param_out_;

  AdamFunctor(T beta1, T beta2, T epsilon, const T* beta1_pow,
              const T* beta2_pow, const T* mom1, T* mom1_out, const T* mom2,
              T* mom2_out, const T* lr, const T* grad, const T* param,
              T* param_out)
      : beta1_(beta1),
        beta2_(beta2),
        epsilon_(epsilon),
        beta1_pow_(beta1_pow),
        beta2_pow_(beta2_pow),
        moment1_(mom1),
        moment1_out_(mom1_out),
        moment2_(mom2),
        moment2_out_(mom2_out),
        lr_(lr),
        grad_(grad),
        param_(param),
        param_out_(param_out) {}

  void operator()(size_t numel) const {
    Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>> g{
        grad_, static_cast<Eigen::Index>(numel)};
    Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>> mom1{
        moment1_, static_cast<Eigen::Index>(numel)};
    Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>> mom2{
        moment2_, static_cast<Eigen::Index>(numel)};
    Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>> param{
        param_, static_cast<Eigen::Index>(numel)};

    Eigen::Map<Eigen::Array<T, 1, Eigen::Dynamic>> param_out{
        param_out_, static_cast<Eigen::Index>(numel)};
    Eigen::Map<Eigen::Array<T, 1, Eigen::Dynamic>> moment1_out{
        moment1_out_, static_cast<Eigen::Index>(numel)};
    Eigen::Map<Eigen::Array<T, 1, Eigen::Dynamic>> moment2_out{
        moment2_out_, static_cast<Eigen::Index>(numel)};

    T lr = *lr_;
    T beta1_pow = *beta1_pow_;
    T beta2_pow = *beta2_pow_;

    // Calculation
    lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow);

    moment1_out = beta1_ * mom1 + (1 - beta1_) * g;
    moment2_out = beta2_ * mom2 + (1 - beta2_) * g * g;
    param_out = param - lr * (moment1_out / (moment2_out.sqrt() + epsilon_));
  }
};

T
wip  
typhoonzero 已提交
158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
template <typename T>
struct SparseAdamFunctor {
  T beta1_;
  T beta2_;
  T epsilon_;

  const T* beta1_pow_;
  const T* beta2_pow_;
  const T* moment1_;
  T* moment1_out_;
  const T* moment2_;
  T* moment2_out_;
  const T* lr_;
  const T* grad_;
  const T* param_;
  T* param_out_;

  const int64_t* rows_;
  int64_t row_numel_;

  SparseAdamFunctor(T beta1, T beta2, T epsilon, const T* beta1_pow,
                    const T* beta2_pow, const T* mom1, T* mom1_out,
                    const T* mom2, T* mom2_out, const T* lr, const T* grad,
                    const T* param, T* param_out, const int64_t* rows,
T
typhoonzero 已提交
182
                    int64_t row_numel)
T
wip  
typhoonzero 已提交
183 184 185 186 187 188 189 190 191 192 193 194 195 196
      : beta1_(beta1),
        beta2_(beta2),
        epsilon_(epsilon),
        beta1_pow_(beta1_pow),
        beta2_pow_(beta2_pow),
        moment1_(mom1),
        moment1_out_(mom1_out),
        moment2_(mom2),
        moment2_out_(mom2_out),
        lr_(lr),
        grad_(grad),
        param_(param),
        param_out_(param_out),
        rows_(rows),
T
typhoonzero 已提交
197
        row_numel_(row_numel) {}
T
wip  
typhoonzero 已提交
198 199

  inline HOSTDEVICE void operator()(size_t i) const {
T
typhoonzero 已提交
200 201
    T beta1_pow = *beta1_pow_;
    T beta2_pow = *beta2_pow_;
T
wip  
typhoonzero 已提交
202 203 204 205 206 207 208 209
    for (int64_t j = 0; j < row_numel_; ++j) {
      T g = grad_[i * row_numel_ + j];
      T mom1 = moment1_[rows_[i] * row_numel_ + j];
      T mom2 = moment2_[rows_[i] * row_numel_ + j];
      T lr = *lr_;
      T p = param_[rows_[i] * row_numel_ + j];

      lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow);
210

T
wip  
typhoonzero 已提交
211 212 213
      mom1 = beta1_ * mom1 + (1 - beta1_) * g;
      mom2 = beta2_ * mom2 + (1 - beta2_) * g * g;
      p -= lr * (mom1 / (sqrt(mom2) + epsilon_));
T
typhoonzero 已提交
214

T
wip  
typhoonzero 已提交
215 216 217 218 219 220 221
      moment1_out_[rows_[i] * row_numel_ + j] = mom1;
      moment2_out_[rows_[i] * row_numel_ + j] = mom2;
      param_out_[rows_[i] * row_numel_ + j] = p;
    }  // for col id
  }
};

Q
QI JUN 已提交
222
template <typename DeviceContext, typename T>
223 224 225
class AdamOpKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
Y
Yang Yu 已提交
226 227
    using paddle::framework::LoDTensor;
    using paddle::operators::detail::Ref;
228

229 230 231
    T beta1 = static_cast<T>(ctx.Attr<float>("beta1"));
    T beta2 = static_cast<T>(ctx.Attr<float>("beta2"));
    T epsilon = static_cast<T>(ctx.Attr<float>("epsilon"));
Y
Yang Yu 已提交
232
    auto& param = Ref(ctx.Input<LoDTensor>("Param"), "Must set Param");
T
wip  
typhoonzero 已提交
233 234
    // auto& grad = Ref(ctx.Input<LoDTensor>("Grad"), "Must set Grad");
    auto* grad_var = ctx.InputVar("Grad");
Y
Yang Yu 已提交
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
    auto& mom1 = Ref(ctx.Input<LoDTensor>("Moment1"), "Must set Moment1");
    auto& mom2 = Ref(ctx.Input<LoDTensor>("Moment2"), "Must set Moment2");
    auto& lr =
        Ref(ctx.Input<LoDTensor>("LearningRate"), "Must set LearningRate");

    auto& beta1_pow =
        Ref(ctx.Input<LoDTensor>("Beta1Pow"), "Must set Beta1Pow");
    auto& beta2_pow =
        Ref(ctx.Input<LoDTensor>("Beta2Pow"), "Must set Beta2Pow");

    auto& param_out =
        Ref(ctx.Output<LoDTensor>("ParamOut"), "Must set ParamOut");
    auto& mom1_out =
        Ref(ctx.Output<LoDTensor>("Moment1Out"), "Must set Moment1Out");
    auto& mom2_out =
        Ref(ctx.Output<LoDTensor>("Moment2Out"), "Must set Moment1Out");

T
wip  
typhoonzero 已提交
252 253
    if (grad_var->IsType<framework::LoDTensor>()) {
      auto& grad = Ref(ctx.Input<LoDTensor>("Grad"), "Must set Grad");
254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281

      if (platform::is_cpu_place(ctx.GetPlace())) {
        AdamFunctor<T, CPUAdam> functor(
            beta1, beta2, epsilon, beta1_pow.template data<T>(),
            beta2_pow.template data<T>(), mom1.template data<T>(),
            mom1_out.template mutable_data<T>(ctx.GetPlace()),
            mom2.template data<T>(),
            mom2_out.template mutable_data<T>(ctx.GetPlace()),
            lr.template data<T>(), grad.template data<T>(),
            param.template data<T>(),
            param_out.template mutable_data<T>(ctx.GetPlace()));
        functor(param.numel());
      } else if (platform::is_gpu_place(ctx.GetPlace())) {
        AdamFunctor<T, GPUAdam> functor(
            beta1, beta2, epsilon, beta1_pow.template data<T>(),
            beta2_pow.template data<T>(), mom1.template data<T>(),
            mom1_out.template mutable_data<T>(ctx.GetPlace()),
            mom2.template data<T>(),
            mom2_out.template mutable_data<T>(ctx.GetPlace()),
            lr.template data<T>(), grad.template data<T>(),
            param.template data<T>(),
            param_out.template mutable_data<T>(ctx.GetPlace()));

        platform::ForRange<DeviceContext> for_range(
            static_cast<const DeviceContext&>(ctx.device_context()),
            param.numel());
        for_range(functor);
      }
T
wip  
typhoonzero 已提交
282 283 284
    } else if (grad_var->IsType<framework::SelectedRows>()) {
      auto& grad =
          Ref(ctx.Input<framework::SelectedRows>("Grad"), "Must set Grad");
285 286 287 288
      if (grad.rows().size() == 0) {
        VLOG(3) << "grad row size is 0!!";
        return;
      }
T
wip  
typhoonzero 已提交
289 290 291 292 293
      // merge duplicated rows if any.
      scatter::MergeAdd<DeviceContext, T> merge_func;
      auto grad_merge =
          merge_func(ctx.template device_context<DeviceContext>(), grad);
      auto& grad_tensor = grad_merge.value();
T
wip  
typhoonzero 已提交
294
      const T* grad_data = grad_tensor.template data<T>();
D
dzhwinter 已提交
295
      int64_t* rows = nullptr;
296 297 298
// When compiled without CUDA, the CUDAMutableData() interface should not be
// provided.
#if defined(PADDLE_WITH_CUDA)
D
dzhwinter 已提交
299
      if (platform::is_gpu_place(ctx.GetPlace())) {
Y
Yu Yang 已提交
300
        rows = grad_merge.mutable_rows()->CUDAMutableData(ctx.GetPlace());
D
dzhwinter 已提交
301
      } else {
302
#endif
D
dzhwinter 已提交
303
        rows = grad_merge.mutable_rows()->data();
304 305

#if defined(PADDLE_WITH_CUDA)
D
dzhwinter 已提交
306
      }
307
#endif
T
wip  
typhoonzero 已提交
308
      auto row_numel = grad_tensor.numel() / grad_merge.rows().size();
T
wip  
typhoonzero 已提交
309 310 311 312 313 314 315 316

      SparseAdamFunctor<T> functor(
          beta1, beta2, epsilon, beta1_pow.template data<T>(),
          beta2_pow.template data<T>(), mom1.template data<T>(),
          mom1_out.template mutable_data<T>(ctx.GetPlace()),
          mom2.template data<T>(),
          mom2_out.template mutable_data<T>(ctx.GetPlace()),
          lr.template data<T>(), grad_data, param.template data<T>(),
T
typhoonzero 已提交
317
          param_out.template mutable_data<T>(ctx.GetPlace()), rows, row_numel);
T
wip  
typhoonzero 已提交
318 319
      platform::ForRange<DeviceContext> for_range(
          static_cast<const DeviceContext&>(ctx.device_context()),
T
wip  
typhoonzero 已提交
320
          grad_merge.rows().size());
T
wip  
typhoonzero 已提交
321 322 323 324
      for_range(functor);
    } else {
      PADDLE_THROW("Variable type not supported by adam_op");
    }
325 326 327 328 329
  }
};

}  // namespace operators
}  // namespace paddle