adam_op.h 19.6 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once
Y
Yang Yu 已提交
16
#include <math.h>  // for sqrt in CPU and CUDA
17
#include <Eigen/Dense>
18
#include <string>
S
sneaxiy 已提交
19
#include <unordered_map>
S
sneaxiy 已提交
20
#include <vector>
Y
Yi Wang 已提交
21
#include "paddle/fluid/framework/op_registry.h"
Q
Qiao Longfei 已提交
22
#include "paddle/fluid/framework/threadpool.h"
S
sneaxiy 已提交
23
#include "paddle/fluid/operators/math/algorithm.h"
Y
Yi Wang 已提交
24 25
#include "paddle/fluid/operators/math/selected_rows_functor.h"
#include "paddle/fluid/platform/for_range.h"
26 27 28 29

namespace paddle {
namespace operators {

T
wip  
typhoonzero 已提交
30 31
namespace scatter = paddle::operators::math::scatter;

32 33 34 35 36 37 38 39 40 41
static inline float GetAttrFromTensor(const framework::Tensor* tensor) {
  const float* tensor_data = tensor->data<float>();
  framework::Tensor cpu_tensor;
  if (platform::is_gpu_place(tensor->place())) {
    TensorCopySync(*tensor, platform::CPUPlace(), &cpu_tensor);
    tensor_data = cpu_tensor.data<float>();
  }
  return tensor_data[0];
}

Y
Yibing Liu 已提交
42 43 44 45 46 47 48
class AdamOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override;
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override;
49 50 51
  framework::OpKernelType GetKernelTypeForVar(
      const std::string& var_name, const framework::Tensor& tensor,
      const framework::OpKernelType& expected_kernel_type) const override;
Y
Yibing Liu 已提交
52 53
};

54 55 56 57
struct GPUAdam;
struct CPUAdam;

template <typename T, typename Flavour>
A
Aurelius84 已提交
58
class AdamFunctor;
59

A
Aurelius84 已提交
60 61 62
template <typename T>
class AdamFunctor<T, GPUAdam> {
 private:
Y
Yang Yu 已提交
63 64 65 66 67 68 69 70 71 72 73 74 75
  T beta1_;
  T beta2_;
  T epsilon_;

  const T* beta1_pow_;
  const T* beta2_pow_;
  const T* moment1_;
  T* moment1_out_;
  const T* moment2_;
  T* moment2_out_;
  const T* lr_;
  const T* grad_;
  const T* param_;
Y
Yang Yu 已提交
76
  T* param_out_;
Y
Yang Yu 已提交
77

A
Aurelius84 已提交
78
 public:
Y
Yang Yu 已提交
79 80
  AdamFunctor(T beta1, T beta2, T epsilon, const T* beta1_pow,
              const T* beta2_pow, const T* mom1, T* mom1_out, const T* mom2,
Y
Yang Yu 已提交
81 82
              T* mom2_out, const T* lr, const T* grad, const T* param,
              T* param_out)
Y
Yang Yu 已提交
83 84 85 86 87 88 89 90 91 92 93
      : beta1_(beta1),
        beta2_(beta2),
        epsilon_(epsilon),
        beta1_pow_(beta1_pow),
        beta2_pow_(beta2_pow),
        moment1_(mom1),
        moment1_out_(mom1_out),
        moment2_(mom2),
        moment2_out_(mom2_out),
        lr_(lr),
        grad_(grad),
Y
Yang Yu 已提交
94 95
        param_(param),
        param_out_(param_out) {}
Y
Yang Yu 已提交
96

Y
Yang Yu 已提交
97
  inline HOSTDEVICE void operator()(size_t i) const {
Y
Yang Yu 已提交
98 99 100 101 102 103 104
    // Merge all memory access together.
    T g = grad_[i];
    T mom1 = moment1_[i];
    T mom2 = moment2_[i];
    T lr = *lr_;
    T beta1_pow = *beta1_pow_;
    T beta2_pow = *beta2_pow_;
Y
Yang Yu 已提交
105
    T p = param_[i];
Y
Yang Yu 已提交
106 107

    // Calculation
Y
Yang Yu 已提交
108
    lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow);
109

Y
Yang Yu 已提交
110 111
    mom1 = beta1_ * mom1 + (1 - beta1_) * g;
    mom2 = beta2_ * mom2 + (1 - beta2_) * g * g;
M
MRXLT 已提交
112
    p -= lr * (mom1 / (sqrt(mom2) + epsilon_ * sqrt(1 - beta2_pow)));
Y
Yang Yu 已提交
113 114 115 116

    // Write back to global memory
    moment1_out_[i] = mom1;
    moment2_out_[i] = mom2;
Y
Yang Yu 已提交
117
    param_out_[i] = p;
Y
Yang Yu 已提交
118 119 120
  }
};

121
template <typename T>
A
Aurelius84 已提交
122 123
class AdamFunctor<T, CPUAdam> {
 private:
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
  T beta1_;
  T beta2_;
  T epsilon_;

  const T* beta1_pow_;
  const T* beta2_pow_;
  const T* moment1_;
  T* moment1_out_;
  const T* moment2_;
  T* moment2_out_;
  const T* lr_;
  const T* grad_;
  const T* param_;
  T* param_out_;

A
Aurelius84 已提交
139
 public:
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
  AdamFunctor(T beta1, T beta2, T epsilon, const T* beta1_pow,
              const T* beta2_pow, const T* mom1, T* mom1_out, const T* mom2,
              T* mom2_out, const T* lr, const T* grad, const T* param,
              T* param_out)
      : beta1_(beta1),
        beta2_(beta2),
        epsilon_(epsilon),
        beta1_pow_(beta1_pow),
        beta2_pow_(beta2_pow),
        moment1_(mom1),
        moment1_out_(mom1_out),
        moment2_(mom2),
        moment2_out_(mom2_out),
        lr_(lr),
        grad_(grad),
        param_(param),
        param_out_(param_out) {}

  void operator()(size_t numel) const {
    Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>> g{
        grad_, static_cast<Eigen::Index>(numel)};
    Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>> mom1{
        moment1_, static_cast<Eigen::Index>(numel)};
    Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>> mom2{
        moment2_, static_cast<Eigen::Index>(numel)};
    Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>> param{
        param_, static_cast<Eigen::Index>(numel)};

    Eigen::Map<Eigen::Array<T, 1, Eigen::Dynamic>> param_out{
        param_out_, static_cast<Eigen::Index>(numel)};
    Eigen::Map<Eigen::Array<T, 1, Eigen::Dynamic>> moment1_out{
        moment1_out_, static_cast<Eigen::Index>(numel)};
    Eigen::Map<Eigen::Array<T, 1, Eigen::Dynamic>> moment2_out{
        moment2_out_, static_cast<Eigen::Index>(numel)};

    T lr = *lr_;
    T beta1_pow = *beta1_pow_;
    T beta2_pow = *beta2_pow_;

    // Calculation
    lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow);

    moment1_out = beta1_ * mom1 + (1 - beta1_) * g;
    moment2_out = beta2_ * mom2 + (1 - beta2_) * g * g;
M
MRXLT 已提交
184 185 186
    param_out = param -
                lr * (moment1_out /
                      (moment2_out.sqrt() + epsilon_ * sqrt(1 - beta2_pow)));
187 188 189
  }
};

190
template <typename T, typename Flavour>
A
Aurelius84 已提交
191
class SparseAdamFunctor;
192

T
wip  
typhoonzero 已提交
193
template <typename T>
A
Aurelius84 已提交
194 195
class SparseAdamFunctor<T, GPUAdam> {
 private:
T
wip  
typhoonzero 已提交
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
  T beta1_;
  T beta2_;
  T epsilon_;

  const T* beta1_pow_;
  const T* beta2_pow_;
  const T* moment1_;
  T* moment1_out_;
  const T* moment2_;
  T* moment2_out_;
  const T* lr_;
  const T* grad_;
  const T* param_;
  T* param_out_;

  const int64_t* rows_;
  int64_t row_numel_;
S
sneaxiy 已提交
213
  int64_t row_count_;
Q
Qiao Longfei 已提交
214
  bool lazy_mode_;
T
wip  
typhoonzero 已提交
215

A
Aurelius84 已提交
216
 public:
T
wip  
typhoonzero 已提交
217 218 219 220
  SparseAdamFunctor(T beta1, T beta2, T epsilon, const T* beta1_pow,
                    const T* beta2_pow, const T* mom1, T* mom1_out,
                    const T* mom2, T* mom2_out, const T* lr, const T* grad,
                    const T* param, T* param_out, const int64_t* rows,
Q
Qiao Longfei 已提交
221
                    int64_t row_numel, int64_t row_count, bool lazy_mode)
T
wip  
typhoonzero 已提交
222 223 224 225 226 227 228 229 230 231 232 233 234 235
      : beta1_(beta1),
        beta2_(beta2),
        epsilon_(epsilon),
        beta1_pow_(beta1_pow),
        beta2_pow_(beta2_pow),
        moment1_(mom1),
        moment1_out_(mom1_out),
        moment2_(mom2),
        moment2_out_(mom2_out),
        lr_(lr),
        grad_(grad),
        param_(param),
        param_out_(param_out),
        rows_(rows),
S
sneaxiy 已提交
236
        row_numel_(row_numel),
Q
Qiao Longfei 已提交
237
        row_count_(row_count),
Q
Qiao Longfei 已提交
238
        lazy_mode_(lazy_mode) {}
S
sneaxiy 已提交
239

Q
Qiao Longfei 已提交
240
  inline HOSTDEVICE void adam_update(size_t i, T g) const {
S
sneaxiy 已提交
241 242 243 244
    // The following code is the same as dense
    T mom1 = moment1_[i];
    T mom2 = moment2_[i];
    T lr = *lr_;
T
typhoonzero 已提交
245 246
    T beta1_pow = *beta1_pow_;
    T beta2_pow = *beta2_pow_;
S
sneaxiy 已提交
247 248 249 250 251 252 253
    T p = param_[i];

    // Calculation
    lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow);

    mom1 = beta1_ * mom1 + (1 - beta1_) * g;
    mom2 = beta2_ * mom2 + (1 - beta2_) * g * g;
M
MRXLT 已提交
254
    p -= lr * (mom1 / (sqrt(mom2) + epsilon_ * sqrt(1 - beta2_pow)));
S
sneaxiy 已提交
255 256 257 258 259

    // Write back to global memory
    moment1_out_[i] = mom1;
    moment2_out_[i] = mom2;
    param_out_[i] = p;
T
wip  
typhoonzero 已提交
260
  }
Q
Qiao Longfei 已提交
261 262 263 264

  inline HOSTDEVICE void operator()(size_t i) const {
    auto row_idx =
        math::BinarySearch<int64_t>(rows_, row_count_, i / row_numel_);
Q
Qiao Longfei 已提交
265 266 267
    if (lazy_mode_ && row_idx < 0) {
      return;
    } else {
Q
Qiao Longfei 已提交
268 269 270
      T g = row_idx >= 0 ? grad_[row_idx * row_numel_ + i % row_numel_] : 0;
      adam_update(i, g);
    }
Q
Qiao Longfei 已提交
271
  }
T
wip  
typhoonzero 已提交
272 273
};

M
minqiyang 已提交
274
template <typename T>
A
Aurelius84 已提交
275 276
class SparseAdamFunctor<T, CPUAdam> {
 private:
M
minqiyang 已提交
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
  T beta1_;
  T beta2_;
  T epsilon_;

  const T* beta1_pow_;
  const T* beta2_pow_;
  const T* moment1_;
  T* moment1_out_;
  const T* moment2_;
  T* moment2_out_;
  const T* lr_;
  const T* grad_;
  const T* param_;
  T* param_out_;

  const int64_t* rows_;
  int64_t row_numel_;
  int64_t row_count_;

A
Aurelius84 已提交
296
 public:
M
minqiyang 已提交
297 298 299 300
  SparseAdamFunctor(T beta1, T beta2, T epsilon, const T* beta1_pow,
                    const T* beta2_pow, const T* mom1, T* mom1_out,
                    const T* mom2, T* mom2_out, const T* lr, const T* grad,
                    const T* param, T* param_out, const int64_t* rows,
301
                    int64_t row_numel, int64_t row_count, bool lazy_mode)
M
minqiyang 已提交
302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
      : beta1_(beta1),
        beta2_(beta2),
        epsilon_(epsilon),
        beta1_pow_(beta1_pow),
        beta2_pow_(beta2_pow),
        moment1_(mom1),
        moment1_out_(mom1_out),
        moment2_(mom2),
        moment2_out_(mom2_out),
        lr_(lr),
        grad_(grad),
        param_(param),
        param_out_(param_out),
        rows_(rows),
        row_numel_(row_numel),
        row_count_(row_count) {}

319 320 321 322 323 324 325 326 327 328 329 330 331 332
  inline HOSTDEVICE void adam_update(size_t i, T g) const {
    // The following code is the same as dense
    T mom1 = moment1_[i];
    T mom2 = moment2_[i];
    T lr = *lr_;
    T beta1_pow = *beta1_pow_;
    T beta2_pow = *beta2_pow_;
    T p = param_[i];

    // Calculation
    lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow);

    mom1 = beta1_ * mom1 + (1 - beta1_) * g;
    mom2 = beta2_ * mom2 + (1 - beta2_) * g * g;
M
MRXLT 已提交
333
    p -= lr * (mom1 / (sqrt(mom2) + epsilon_ * sqrt(1 - beta2_pow)));
334 335 336 337 338 339 340

    // Write back to global memory
    moment1_out_[i] = mom1;
    moment2_out_[i] = mom2;
    param_out_[i] = p;
  }

M
minqiyang 已提交
341 342 343 344 345 346
  inline void operator()(size_t numel) const {
    // lr could be reuse
    T lr = *lr_;
    T beta1_pow = *beta1_pow_;
    T beta2_pow = *beta2_pow_;
    lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow);
S
sneaxiy 已提交
347
    int64_t row_count = static_cast<int64_t>(numel / row_numel_);
M
minqiyang 已提交
348

S
sneaxiy 已提交
349
    for (int64_t i = 0, j = 0; i != row_count; ++i) {
M
minqiyang 已提交
350
      if (i == *(rows_ + j)) {
S
sneaxiy 已提交
351
        for (int64_t k = 0; k != row_numel_; ++k) {
M
Fix bug  
minqiyang 已提交
352
          T g = grad_[j * row_numel_ + k];
M
minqiyang 已提交
353
          adam_update(i * row_numel_ + k, g);
M
Fix bug  
minqiyang 已提交
354
        }
M
minqiyang 已提交
355 356
        ++j;
      } else {
S
sneaxiy 已提交
357
        for (int64_t k = 0; k != row_numel_; ++k) {
M
Fix bug  
minqiyang 已提交
358 359 360 361 362 363 364 365 366 367 368 369 370
          T mom1 = moment1_[i * row_numel_ + k];
          T mom2 = moment2_[i * row_numel_ + k];
          T p = param_[i * row_numel_ + k];

          mom1 = beta1_ * mom1;
          mom2 = beta2_ * mom2;

          p -= lr * (mom1 / (sqrt(mom2) + epsilon_));
          // Write back to global memory
          moment1_out_[i * row_numel_ + k] = mom1;
          moment2_out_[i * row_numel_ + k] = mom2;
          param_out_[i * row_numel_ + k] = p;
        }
M
minqiyang 已提交
371 372 373 374 375
      }
    }
  }
};

Q
QI JUN 已提交
376
template <typename DeviceContext, typename T>
377 378 379
class AdamOpKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
C
chengduo 已提交
380
    const auto* param_var = ctx.InputVar("Param");
381 382 383 384 385 386
    PADDLE_ENFORCE_EQ(param_var->IsType<framework::LoDTensor>(), true,
                      platform::errors::InvalidArgument(
                          "The Var(%s)'s type should be LoDTensor, "
                          "but the received is %s",
                          ctx.InputNames("Param").front(),
                          framework::ToTypeName(param_var->Type())));
C
chengduo 已提交
387

Y
Yang Yu 已提交
388
    using paddle::framework::LoDTensor;
389

390 391
    int64_t min_row_size_to_use_multithread =
        ctx.Attr<int64_t>("min_row_size_to_use_multithread");
Q
Qiao Longfei 已提交
392
    bool lazy_mode = ctx.Attr<bool>("lazy_mode");
393
    T epsilon = static_cast<T>(ctx.Attr<float>("epsilon"));
394
    auto* param = ctx.Input<LoDTensor>("Param");
T
wip  
typhoonzero 已提交
395
    auto* grad_var = ctx.InputVar("Grad");
396 397 398 399 400 401 402 403 404 405 406 407
    auto* mom1 = ctx.Input<LoDTensor>("Moment1");
    auto* mom2 = ctx.Input<LoDTensor>("Moment2");
    auto* lr = ctx.Input<LoDTensor>("LearningRate");

    auto* beta1_pow = ctx.Input<LoDTensor>("Beta1Pow");
    auto* beta2_pow = ctx.Input<LoDTensor>("Beta2Pow");

    auto* param_out = ctx.Output<LoDTensor>("ParamOut");
    auto* mom1_out = ctx.Output<LoDTensor>("Moment1Out");
    auto* mom2_out = ctx.Output<LoDTensor>("Moment2Out");
    auto* beta1_pow_out = ctx.Output<LoDTensor>("Beta1PowOut");
    auto* beta2_pow_out = ctx.Output<LoDTensor>("Beta2PowOut");
Y
Yang Yu 已提交
408

409 410 411
    T beta1 = static_cast<T>(ctx.Attr<float>("beta1"));
    if (ctx.HasInput("Beta1Tensor")) {
      auto* beta1_tensor = ctx.Input<framework::Tensor>("Beta1Tensor");
W
wangchaochaohu 已提交
412 413 414 415
      PADDLE_ENFORCE_EQ(beta1_tensor->numel(), 1,
                        platform::errors::InvalidArgument(
                            "Input(Beta1Tensor) size must be 1, but get %d",
                            beta1_tensor->numel()));
416 417 418 419 420
      beta1 = static_cast<T>(GetAttrFromTensor(beta1_tensor));
    }
    T beta2 = static_cast<T>(ctx.Attr<float>("beta2"));
    if (ctx.HasInput("Beta2Tensor")) {
      auto* beta2_tensor = ctx.Input<framework::Tensor>("Beta2Tensor");
W
wangchaochaohu 已提交
421 422 423 424
      PADDLE_ENFORCE_EQ(beta2_tensor->numel(), 1,
                        platform::errors::InvalidArgument(
                            "Input(Beta2Tensor) size must be 1, but get %d",
                            beta2_tensor->numel()));
425 426
      beta2 = static_cast<T>(GetAttrFromTensor(beta2_tensor));
    }
427 428 429 430 431 432 433 434 435 436 437 438 439 440 441
    VLOG(3) << "beta1_pow.numel() : " << beta1_pow->numel()
            << "beta2_pow.numel() : " << beta2_pow->numel();
    VLOG(3) << "param.numel(): " << param->numel();

    PADDLE_ENFORCE_EQ(beta1_pow_out->numel(), 1,
                      platform::errors::InvalidArgument(
                          "beta1 pow output size should be 1, but received "
                          "value is:%d.",
                          beta1_pow_out->numel()));

    PADDLE_ENFORCE_EQ(beta2_pow_out->numel(), 1,
                      platform::errors::InvalidArgument(
                          "beta2 pow output size should be 1, but received "
                          "value is:%d.",
                          beta2_pow_out->numel()));
442

T
wip  
typhoonzero 已提交
443
    if (grad_var->IsType<framework::LoDTensor>()) {
444 445 446 447 448 449 450 451 452 453 454 455 456 457
      auto* grad = ctx.Input<LoDTensor>("Grad");

      AdamFunctor<T, CPUAdam> functor(
          beta1, beta2, epsilon, beta1_pow->data<T>(), beta2_pow->data<T>(),
          mom1->data<T>(), mom1_out->mutable_data<T>(ctx.GetPlace()),
          mom2->data<T>(), mom2_out->mutable_data<T>(ctx.GetPlace()),
          lr->data<T>(), grad->data<T>(), param->data<T>(),
          param_out->mutable_data<T>(ctx.GetPlace()));
      functor(param->numel());
      beta1_pow_out->mutable_data<T>(ctx.GetPlace())[0] =
          beta1 * beta1_pow->data<T>()[0];
      beta2_pow_out->mutable_data<T>(ctx.GetPlace())[0] =
          beta2 * beta2_pow->data<T>()[0];

T
wip  
typhoonzero 已提交
458
    } else if (grad_var->IsType<framework::SelectedRows>()) {
459 460
      auto* grad = ctx.Input<framework::SelectedRows>("Grad");
      if (grad->rows().size() == 0) {
M
minqiyang 已提交
461
        VLOG(3) << "grad row size is 0!!";
462 463
        return;
      }
S
sneaxiy 已提交
464

465
      std::vector<int64_t> cpu_rows(grad->rows().begin(), grad->rows().end());
S
sneaxiy 已提交
466 467 468 469 470 471 472 473
      bool is_strict_sorted = true;
      for (size_t i = 1; i < cpu_rows.size(); ++i) {
        if (cpu_rows[i - 1] >= cpu_rows[i]) {
          is_strict_sorted = false;
          break;
        }
      }

S
sneaxiy 已提交
474
      framework::SelectedRows tmp_grad_merge;
S
sneaxiy 已提交
475 476
      const framework::SelectedRows* grad_merge_ptr;
      if (is_strict_sorted) {
477
        grad_merge_ptr = grad;
S
sneaxiy 已提交
478 479 480 481
      } else {
        // merge duplicated rows if any.
        // The rows of grad_merge have been sorted inside MergeAdd functor
        scatter::MergeAdd<DeviceContext, T> merge_func;
482
        merge_func(ctx.template device_context<DeviceContext>(), *grad,
S
sneaxiy 已提交
483 484
                   &tmp_grad_merge, true);
        grad_merge_ptr = &tmp_grad_merge;
S
sneaxiy 已提交
485 486 487
      }

      auto& grad_merge = *grad_merge_ptr;
T
wip  
typhoonzero 已提交
488
      auto& grad_tensor = grad_merge.value();
T
wip  
typhoonzero 已提交
489
      const T* grad_data = grad_tensor.template data<T>();
S
sneaxiy 已提交
490
      const int64_t* rows = grad_merge.rows().Data(ctx.GetPlace());
T
wip  
typhoonzero 已提交
491
      auto row_numel = grad_tensor.numel() / grad_merge.rows().size();
T
wip  
typhoonzero 已提交
492

493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512
      SparseAdamFunctor<T, CPUAdam> functor(
          beta1, beta2, epsilon, beta1_pow->data<T>(), beta2_pow->data<T>(),
          mom1->data<T>(), mom1_out->mutable_data<T>(ctx.GetPlace()),
          mom2->data<T>(), mom2_out->mutable_data<T>(ctx.GetPlace()),
          lr->data<T>(), grad_data, param->data<T>(),
          param_out->mutable_data<T>(ctx.GetPlace()), rows, row_numel,
          grad_merge.rows().size(), lazy_mode);
      // update beta1 and beta2
      beta1_pow_out->mutable_data<T>(ctx.GetPlace())[0] =
          beta1 * beta1_pow->data<T>()[0];
      beta2_pow_out->mutable_data<T>(ctx.GetPlace())[0] =
          beta2 * beta2_pow->data<T>()[0];
      if (lazy_mode) {
        VLOG(3) << "run cpu lazy mode";
        size_t row_count = grad_merge.rows().size();
        std::vector<int64_t> cpu_rows(grad_merge.rows());
        for (size_t row_index = 0; row_index < row_count; ++row_index) {
          for (size_t offset = 0; offset < row_numel; ++offset) {
            size_t i = cpu_rows[row_index] * row_numel + offset;
            functor.adam_update(i, grad_data[row_index * row_numel + offset]);
513
          }
514
        }
515
      }
516
#ifndef _WIN32
517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546
      else if (FLAGS_inner_op_parallelism > 1 &&  // NOLINT
               min_row_size_to_use_multithread > 0 &&
               param->dims()[0] > min_row_size_to_use_multithread) {
        VLOG(3) << "use multi thread, inner_op_parallelism="
                << FLAGS_inner_op_parallelism
                << " min_row_size_to_use_multithread="
                << min_row_size_to_use_multithread;
        if (FLAGS_inner_op_parallelism > 10) {
          VLOG(1) << "FLAGS_inner_op_parallelism " << FLAGS_inner_op_parallelism
                  << " is two large!";
        }
        auto& grad_rows = grad_merge.rows();
        std::unordered_map<size_t, int> row_id_to_grad_row_offset;
        size_t param_row_count = param->numel() / row_numel;
        if (param_row_count < 1000) {
          VLOG(1) << "param_row_count should be larger then 1000 to use "
                     "multi thread, currently "
                  << param_row_count;
        }
        for (size_t i = 0; i < grad_rows.size(); ++i) {
          row_id_to_grad_row_offset[grad_rows[i]] = i;
        }
        std::vector<std::future<void>> fs;
        int64_t line_in_each_thread =
            param_row_count / FLAGS_inner_op_parallelism + 1;
        for (int i = 0; i < FLAGS_inner_op_parallelism; ++i) {
          int64_t start = i * line_in_each_thread;
          int64_t end = (i + 1) * line_in_each_thread;
          if (start >= static_cast<int64_t>(param_row_count)) {
            break;
Q
Qiao Longfei 已提交
547
          }
548 549
          if (end > static_cast<int64_t>(param_row_count)) {
            end = static_cast<int64_t>(param_row_count);
Q
Qiao Longfei 已提交
550
          }
551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567
          fs.push_back(framework::Async([&functor, &row_id_to_grad_row_offset,
                                         &grad_data, row_numel, start, end]() {
            for (int64_t row_id = start; row_id < end; ++row_id) {
              auto iter = row_id_to_grad_row_offset.find(row_id);
              if (iter != row_id_to_grad_row_offset.end()) {
                for (size_t row_offset = 0U; row_offset < row_numel;
                     ++row_offset) {
                  functor.adam_update(
                      row_id * row_numel + row_offset,
                      grad_data[iter->second * row_numel + row_offset]);
                }
              } else {
                for (size_t row_offset = 0U; row_offset < row_numel;
                     ++row_offset) {
                  functor.adam_update(row_id * row_numel + row_offset, 0);
                }
              }
Q
Qiao Longfei 已提交
568
            }
569
          }));
570
        }
571 572 573 574 575
        for (size_t i = 0; i < fs.size(); ++i) fs[i].wait();
      }
#endif        // !_WIN32
      else {  // NOLINT
        functor(param->numel());
Q
Qiao Longfei 已提交
576
      }
T
wip  
typhoonzero 已提交
577
    } else {
578 579
      PADDLE_THROW(platform::errors::InvalidArgument(
          "Variable type not supported by adam_op"));
T
wip  
typhoonzero 已提交
580
    }
581 582 583 584 585
  }
};

}  // namespace operators
}  // namespace paddle