adam_op.h 18.8 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once
Y
Yang Yu 已提交
16
#include <math.h>  // for sqrt in CPU and CUDA
17
#include <Eigen/Dense>
18
#include <string>
S
sneaxiy 已提交
19
#include <unordered_map>
S
sneaxiy 已提交
20
#include <vector>
Y
Yi Wang 已提交
21
#include "paddle/fluid/framework/op_registry.h"
Q
Qiao Longfei 已提交
22
#include "paddle/fluid/framework/threadpool.h"
S
sneaxiy 已提交
23
#include "paddle/fluid/operators/math/algorithm.h"
Y
Yi Wang 已提交
24 25
#include "paddle/fluid/operators/math/selected_rows_functor.h"
#include "paddle/fluid/platform/for_range.h"
26 27 28 29

namespace paddle {
namespace operators {

T
wip  
typhoonzero 已提交
30 31
namespace scatter = paddle::operators::math::scatter;

32 33 34 35 36 37 38 39 40 41
static inline float GetAttrFromTensor(const framework::Tensor* tensor) {
  const float* tensor_data = tensor->data<float>();
  framework::Tensor cpu_tensor;
  if (platform::is_gpu_place(tensor->place())) {
    TensorCopySync(*tensor, platform::CPUPlace(), &cpu_tensor);
    tensor_data = cpu_tensor.data<float>();
  }
  return tensor_data[0];
}

Y
Yibing Liu 已提交
42 43 44 45 46 47 48
class AdamOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override;
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override;
49 50 51
  framework::OpKernelType GetKernelTypeForVar(
      const std::string& var_name, const framework::Tensor& tensor,
      const framework::OpKernelType& expected_kernel_type) const override;
Y
Yibing Liu 已提交
52 53
};

54 55 56 57
struct GPUAdam;
struct CPUAdam;

template <typename T, typename Flavour>
A
Aurelius84 已提交
58
class AdamFunctor;
59

A
Aurelius84 已提交
60 61 62
template <typename T>
class AdamFunctor<T, GPUAdam> {
 private:
Y
Yang Yu 已提交
63 64 65 66 67 68 69 70 71 72 73 74 75
  T beta1_;
  T beta2_;
  T epsilon_;

  const T* beta1_pow_;
  const T* beta2_pow_;
  const T* moment1_;
  T* moment1_out_;
  const T* moment2_;
  T* moment2_out_;
  const T* lr_;
  const T* grad_;
  const T* param_;
Y
Yang Yu 已提交
76
  T* param_out_;
Y
Yang Yu 已提交
77

A
Aurelius84 已提交
78
 public:
Y
Yang Yu 已提交
79 80
  AdamFunctor(T beta1, T beta2, T epsilon, const T* beta1_pow,
              const T* beta2_pow, const T* mom1, T* mom1_out, const T* mom2,
Y
Yang Yu 已提交
81 82
              T* mom2_out, const T* lr, const T* grad, const T* param,
              T* param_out)
Y
Yang Yu 已提交
83 84 85 86 87 88 89 90 91 92 93
      : beta1_(beta1),
        beta2_(beta2),
        epsilon_(epsilon),
        beta1_pow_(beta1_pow),
        beta2_pow_(beta2_pow),
        moment1_(mom1),
        moment1_out_(mom1_out),
        moment2_(mom2),
        moment2_out_(mom2_out),
        lr_(lr),
        grad_(grad),
Y
Yang Yu 已提交
94 95
        param_(param),
        param_out_(param_out) {}
Y
Yang Yu 已提交
96

Y
Yang Yu 已提交
97
  inline HOSTDEVICE void operator()(size_t i) const {
Y
Yang Yu 已提交
98 99 100 101 102 103 104
    // Merge all memory access together.
    T g = grad_[i];
    T mom1 = moment1_[i];
    T mom2 = moment2_[i];
    T lr = *lr_;
    T beta1_pow = *beta1_pow_;
    T beta2_pow = *beta2_pow_;
Y
Yang Yu 已提交
105
    T p = param_[i];
Y
Yang Yu 已提交
106 107

    // Calculation
Y
Yang Yu 已提交
108
    lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow);
109

Y
Yang Yu 已提交
110 111
    mom1 = beta1_ * mom1 + (1 - beta1_) * g;
    mom2 = beta2_ * mom2 + (1 - beta2_) * g * g;
Y
Yang Yu 已提交
112
    p -= lr * (mom1 / (sqrt(mom2) + epsilon_));
Y
Yang Yu 已提交
113 114 115 116

    // Write back to global memory
    moment1_out_[i] = mom1;
    moment2_out_[i] = mom2;
Y
Yang Yu 已提交
117
    param_out_[i] = p;
Y
Yang Yu 已提交
118 119 120
  }
};

121
template <typename T>
A
Aurelius84 已提交
122 123
class AdamFunctor<T, CPUAdam> {
 private:
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
  T beta1_;
  T beta2_;
  T epsilon_;

  const T* beta1_pow_;
  const T* beta2_pow_;
  const T* moment1_;
  T* moment1_out_;
  const T* moment2_;
  T* moment2_out_;
  const T* lr_;
  const T* grad_;
  const T* param_;
  T* param_out_;

A
Aurelius84 已提交
139
 public:
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
  AdamFunctor(T beta1, T beta2, T epsilon, const T* beta1_pow,
              const T* beta2_pow, const T* mom1, T* mom1_out, const T* mom2,
              T* mom2_out, const T* lr, const T* grad, const T* param,
              T* param_out)
      : beta1_(beta1),
        beta2_(beta2),
        epsilon_(epsilon),
        beta1_pow_(beta1_pow),
        beta2_pow_(beta2_pow),
        moment1_(mom1),
        moment1_out_(mom1_out),
        moment2_(mom2),
        moment2_out_(mom2_out),
        lr_(lr),
        grad_(grad),
        param_(param),
        param_out_(param_out) {}

  void operator()(size_t numel) const {
    Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>> g{
        grad_, static_cast<Eigen::Index>(numel)};
    Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>> mom1{
        moment1_, static_cast<Eigen::Index>(numel)};
    Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>> mom2{
        moment2_, static_cast<Eigen::Index>(numel)};
    Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>> param{
        param_, static_cast<Eigen::Index>(numel)};

    Eigen::Map<Eigen::Array<T, 1, Eigen::Dynamic>> param_out{
        param_out_, static_cast<Eigen::Index>(numel)};
    Eigen::Map<Eigen::Array<T, 1, Eigen::Dynamic>> moment1_out{
        moment1_out_, static_cast<Eigen::Index>(numel)};
    Eigen::Map<Eigen::Array<T, 1, Eigen::Dynamic>> moment2_out{
        moment2_out_, static_cast<Eigen::Index>(numel)};

    T lr = *lr_;
    T beta1_pow = *beta1_pow_;
    T beta2_pow = *beta2_pow_;

    // Calculation
    lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow);

    moment1_out = beta1_ * mom1 + (1 - beta1_) * g;
    moment2_out = beta2_ * mom2 + (1 - beta2_) * g * g;
    param_out = param - lr * (moment1_out / (moment2_out.sqrt() + epsilon_));
  }
};

188
template <typename T, typename Flavour>
A
Aurelius84 已提交
189
class SparseAdamFunctor;
190

T
wip  
typhoonzero 已提交
191
template <typename T>
A
Aurelius84 已提交
192 193
class SparseAdamFunctor<T, GPUAdam> {
 private:
T
wip  
typhoonzero 已提交
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
  T beta1_;
  T beta2_;
  T epsilon_;

  const T* beta1_pow_;
  const T* beta2_pow_;
  const T* moment1_;
  T* moment1_out_;
  const T* moment2_;
  T* moment2_out_;
  const T* lr_;
  const T* grad_;
  const T* param_;
  T* param_out_;

  const int64_t* rows_;
  int64_t row_numel_;
S
sneaxiy 已提交
211
  int64_t row_count_;
Q
Qiao Longfei 已提交
212
  bool lazy_mode_;
T
wip  
typhoonzero 已提交
213

A
Aurelius84 已提交
214
 public:
T
wip  
typhoonzero 已提交
215 216 217 218
  SparseAdamFunctor(T beta1, T beta2, T epsilon, const T* beta1_pow,
                    const T* beta2_pow, const T* mom1, T* mom1_out,
                    const T* mom2, T* mom2_out, const T* lr, const T* grad,
                    const T* param, T* param_out, const int64_t* rows,
Q
Qiao Longfei 已提交
219
                    int64_t row_numel, int64_t row_count, bool lazy_mode)
T
wip  
typhoonzero 已提交
220 221 222 223 224 225 226 227 228 229 230 231 232 233
      : beta1_(beta1),
        beta2_(beta2),
        epsilon_(epsilon),
        beta1_pow_(beta1_pow),
        beta2_pow_(beta2_pow),
        moment1_(mom1),
        moment1_out_(mom1_out),
        moment2_(mom2),
        moment2_out_(mom2_out),
        lr_(lr),
        grad_(grad),
        param_(param),
        param_out_(param_out),
        rows_(rows),
S
sneaxiy 已提交
234
        row_numel_(row_numel),
Q
Qiao Longfei 已提交
235
        row_count_(row_count),
Q
Qiao Longfei 已提交
236
        lazy_mode_(lazy_mode) {}
S
sneaxiy 已提交
237

Q
Qiao Longfei 已提交
238
  inline HOSTDEVICE void adam_update(size_t i, T g) const {
S
sneaxiy 已提交
239 240 241 242
    // The following code is the same as dense
    T mom1 = moment1_[i];
    T mom2 = moment2_[i];
    T lr = *lr_;
T
typhoonzero 已提交
243 244
    T beta1_pow = *beta1_pow_;
    T beta2_pow = *beta2_pow_;
S
sneaxiy 已提交
245 246 247 248 249 250 251 252 253 254 255 256 257
    T p = param_[i];

    // Calculation
    lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow);

    mom1 = beta1_ * mom1 + (1 - beta1_) * g;
    mom2 = beta2_ * mom2 + (1 - beta2_) * g * g;
    p -= lr * (mom1 / (sqrt(mom2) + epsilon_));

    // Write back to global memory
    moment1_out_[i] = mom1;
    moment2_out_[i] = mom2;
    param_out_[i] = p;
T
wip  
typhoonzero 已提交
258
  }
Q
Qiao Longfei 已提交
259 260 261 262

  inline HOSTDEVICE void operator()(size_t i) const {
    auto row_idx =
        math::BinarySearch<int64_t>(rows_, row_count_, i / row_numel_);
Q
Qiao Longfei 已提交
263 264 265
    if (lazy_mode_ && row_idx < 0) {
      return;
    } else {
Q
Qiao Longfei 已提交
266 267 268
      T g = row_idx >= 0 ? grad_[row_idx * row_numel_ + i % row_numel_] : 0;
      adam_update(i, g);
    }
Q
Qiao Longfei 已提交
269
  }
T
wip  
typhoonzero 已提交
270 271
};

M
minqiyang 已提交
272
template <typename T>
A
Aurelius84 已提交
273 274
class SparseAdamFunctor<T, CPUAdam> {
 private:
M
minqiyang 已提交
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
  T beta1_;
  T beta2_;
  T epsilon_;

  const T* beta1_pow_;
  const T* beta2_pow_;
  const T* moment1_;
  T* moment1_out_;
  const T* moment2_;
  T* moment2_out_;
  const T* lr_;
  const T* grad_;
  const T* param_;
  T* param_out_;

  const int64_t* rows_;
  int64_t row_numel_;
  int64_t row_count_;

A
Aurelius84 已提交
294
 public:
M
minqiyang 已提交
295 296 297 298
  SparseAdamFunctor(T beta1, T beta2, T epsilon, const T* beta1_pow,
                    const T* beta2_pow, const T* mom1, T* mom1_out,
                    const T* mom2, T* mom2_out, const T* lr, const T* grad,
                    const T* param, T* param_out, const int64_t* rows,
299
                    int64_t row_numel, int64_t row_count, bool lazy_mode)
M
minqiyang 已提交
300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
      : beta1_(beta1),
        beta2_(beta2),
        epsilon_(epsilon),
        beta1_pow_(beta1_pow),
        beta2_pow_(beta2_pow),
        moment1_(mom1),
        moment1_out_(mom1_out),
        moment2_(mom2),
        moment2_out_(mom2_out),
        lr_(lr),
        grad_(grad),
        param_(param),
        param_out_(param_out),
        rows_(rows),
        row_numel_(row_numel),
        row_count_(row_count) {}

317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338
  inline HOSTDEVICE void adam_update(size_t i, T g) const {
    // The following code is the same as dense
    T mom1 = moment1_[i];
    T mom2 = moment2_[i];
    T lr = *lr_;
    T beta1_pow = *beta1_pow_;
    T beta2_pow = *beta2_pow_;
    T p = param_[i];

    // Calculation
    lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow);

    mom1 = beta1_ * mom1 + (1 - beta1_) * g;
    mom2 = beta2_ * mom2 + (1 - beta2_) * g * g;
    p -= lr * (mom1 / (sqrt(mom2) + epsilon_));

    // Write back to global memory
    moment1_out_[i] = mom1;
    moment2_out_[i] = mom2;
    param_out_[i] = p;
  }

M
minqiyang 已提交
339 340 341 342 343 344
  inline void operator()(size_t numel) const {
    // lr could be reuse
    T lr = *lr_;
    T beta1_pow = *beta1_pow_;
    T beta2_pow = *beta2_pow_;
    lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow);
S
sneaxiy 已提交
345
    int64_t row_count = static_cast<int64_t>(numel / row_numel_);
M
minqiyang 已提交
346

S
sneaxiy 已提交
347
    for (int64_t i = 0, j = 0; i != row_count; ++i) {
M
minqiyang 已提交
348
      if (i == *(rows_ + j)) {
S
sneaxiy 已提交
349
        for (int64_t k = 0; k != row_numel_; ++k) {
M
Fix bug  
minqiyang 已提交
350
          T g = grad_[j * row_numel_ + k];
M
minqiyang 已提交
351
          adam_update(i * row_numel_ + k, g);
M
Fix bug  
minqiyang 已提交
352
        }
M
minqiyang 已提交
353 354
        ++j;
      } else {
S
sneaxiy 已提交
355
        for (int64_t k = 0; k != row_numel_; ++k) {
M
Fix bug  
minqiyang 已提交
356 357 358 359 360 361 362 363 364 365 366 367 368
          T mom1 = moment1_[i * row_numel_ + k];
          T mom2 = moment2_[i * row_numel_ + k];
          T p = param_[i * row_numel_ + k];

          mom1 = beta1_ * mom1;
          mom2 = beta2_ * mom2;

          p -= lr * (mom1 / (sqrt(mom2) + epsilon_));
          // Write back to global memory
          moment1_out_[i * row_numel_ + k] = mom1;
          moment2_out_[i * row_numel_ + k] = mom2;
          param_out_[i * row_numel_ + k] = p;
        }
M
minqiyang 已提交
369 370 371 372 373
      }
    }
  }
};

Q
QI JUN 已提交
374
template <typename DeviceContext, typename T>
375 376 377
class AdamOpKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
C
chengduo 已提交
378 379 380 381
    const auto* param_var = ctx.InputVar("Param");
    PADDLE_ENFORCE(param_var->IsType<framework::LoDTensor>(),
                   "The Var(%s)'s type should be LoDTensor, "
                   "but the received is %s",
H
hong 已提交
382
                   ctx.InputNames("Param").front(),
S
sneaxiy 已提交
383
                   framework::ToTypeName(param_var->Type()));
C
chengduo 已提交
384

Y
Yang Yu 已提交
385
    using paddle::framework::LoDTensor;
386

387 388
    int64_t min_row_size_to_use_multithread =
        ctx.Attr<int64_t>("min_row_size_to_use_multithread");
Q
Qiao Longfei 已提交
389
    bool lazy_mode = ctx.Attr<bool>("lazy_mode");
390
    T epsilon = static_cast<T>(ctx.Attr<float>("epsilon"));
391
    auto* param = ctx.Input<LoDTensor>("Param");
T
wip  
typhoonzero 已提交
392
    auto* grad_var = ctx.InputVar("Grad");
393 394 395 396 397 398 399 400 401 402 403 404
    auto* mom1 = ctx.Input<LoDTensor>("Moment1");
    auto* mom2 = ctx.Input<LoDTensor>("Moment2");
    auto* lr = ctx.Input<LoDTensor>("LearningRate");

    auto* beta1_pow = ctx.Input<LoDTensor>("Beta1Pow");
    auto* beta2_pow = ctx.Input<LoDTensor>("Beta2Pow");

    auto* param_out = ctx.Output<LoDTensor>("ParamOut");
    auto* mom1_out = ctx.Output<LoDTensor>("Moment1Out");
    auto* mom2_out = ctx.Output<LoDTensor>("Moment2Out");
    auto* beta1_pow_out = ctx.Output<LoDTensor>("Beta1PowOut");
    auto* beta2_pow_out = ctx.Output<LoDTensor>("Beta2PowOut");
Y
Yang Yu 已提交
405

406 407 408 409 410 411 412 413 414 415
    T beta1 = static_cast<T>(ctx.Attr<float>("beta1"));
    if (ctx.HasInput("Beta1Tensor")) {
      auto* beta1_tensor = ctx.Input<framework::Tensor>("Beta1Tensor");
      beta1 = static_cast<T>(GetAttrFromTensor(beta1_tensor));
    }
    T beta2 = static_cast<T>(ctx.Attr<float>("beta2"));
    if (ctx.HasInput("Beta2Tensor")) {
      auto* beta2_tensor = ctx.Input<framework::Tensor>("Beta2Tensor");
      beta2 = static_cast<T>(GetAttrFromTensor(beta2_tensor));
    }
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
    VLOG(3) << "beta1_pow.numel() : " << beta1_pow->numel()
            << "beta2_pow.numel() : " << beta2_pow->numel();
    VLOG(3) << "param.numel(): " << param->numel();

    PADDLE_ENFORCE_EQ(beta1_pow_out->numel(), 1,
                      platform::errors::InvalidArgument(
                          "beta1 pow output size should be 1, but received "
                          "value is:%d.",
                          beta1_pow_out->numel()));

    PADDLE_ENFORCE_EQ(beta2_pow_out->numel(), 1,
                      platform::errors::InvalidArgument(
                          "beta2 pow output size should be 1, but received "
                          "value is:%d.",
                          beta2_pow_out->numel()));
431

T
wip  
typhoonzero 已提交
432
    if (grad_var->IsType<framework::LoDTensor>()) {
433 434 435 436 437 438 439 440 441 442 443 444 445 446
      auto* grad = ctx.Input<LoDTensor>("Grad");

      AdamFunctor<T, CPUAdam> functor(
          beta1, beta2, epsilon, beta1_pow->data<T>(), beta2_pow->data<T>(),
          mom1->data<T>(), mom1_out->mutable_data<T>(ctx.GetPlace()),
          mom2->data<T>(), mom2_out->mutable_data<T>(ctx.GetPlace()),
          lr->data<T>(), grad->data<T>(), param->data<T>(),
          param_out->mutable_data<T>(ctx.GetPlace()));
      functor(param->numel());
      beta1_pow_out->mutable_data<T>(ctx.GetPlace())[0] =
          beta1 * beta1_pow->data<T>()[0];
      beta2_pow_out->mutable_data<T>(ctx.GetPlace())[0] =
          beta2 * beta2_pow->data<T>()[0];

T
wip  
typhoonzero 已提交
447
    } else if (grad_var->IsType<framework::SelectedRows>()) {
448 449
      auto* grad = ctx.Input<framework::SelectedRows>("Grad");
      if (grad->rows().size() == 0) {
M
minqiyang 已提交
450
        VLOG(3) << "grad row size is 0!!";
451 452
        return;
      }
S
sneaxiy 已提交
453

454
      std::vector<int64_t> cpu_rows(grad->rows().begin(), grad->rows().end());
S
sneaxiy 已提交
455 456 457 458 459 460 461 462
      bool is_strict_sorted = true;
      for (size_t i = 1; i < cpu_rows.size(); ++i) {
        if (cpu_rows[i - 1] >= cpu_rows[i]) {
          is_strict_sorted = false;
          break;
        }
      }

S
sneaxiy 已提交
463
      framework::SelectedRows tmp_grad_merge;
S
sneaxiy 已提交
464 465
      const framework::SelectedRows* grad_merge_ptr;
      if (is_strict_sorted) {
466
        grad_merge_ptr = grad;
S
sneaxiy 已提交
467 468 469 470
      } else {
        // merge duplicated rows if any.
        // The rows of grad_merge have been sorted inside MergeAdd functor
        scatter::MergeAdd<DeviceContext, T> merge_func;
471
        merge_func(ctx.template device_context<DeviceContext>(), *grad,
S
sneaxiy 已提交
472 473
                   &tmp_grad_merge, true);
        grad_merge_ptr = &tmp_grad_merge;
S
sneaxiy 已提交
474 475 476
      }

      auto& grad_merge = *grad_merge_ptr;
T
wip  
typhoonzero 已提交
477
      auto& grad_tensor = grad_merge.value();
T
wip  
typhoonzero 已提交
478
      const T* grad_data = grad_tensor.template data<T>();
S
sneaxiy 已提交
479
      const int64_t* rows = grad_merge.rows().Data(ctx.GetPlace());
T
wip  
typhoonzero 已提交
480
      auto row_numel = grad_tensor.numel() / grad_merge.rows().size();
T
wip  
typhoonzero 已提交
481

482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501
      SparseAdamFunctor<T, CPUAdam> functor(
          beta1, beta2, epsilon, beta1_pow->data<T>(), beta2_pow->data<T>(),
          mom1->data<T>(), mom1_out->mutable_data<T>(ctx.GetPlace()),
          mom2->data<T>(), mom2_out->mutable_data<T>(ctx.GetPlace()),
          lr->data<T>(), grad_data, param->data<T>(),
          param_out->mutable_data<T>(ctx.GetPlace()), rows, row_numel,
          grad_merge.rows().size(), lazy_mode);
      // update beta1 and beta2
      beta1_pow_out->mutable_data<T>(ctx.GetPlace())[0] =
          beta1 * beta1_pow->data<T>()[0];
      beta2_pow_out->mutable_data<T>(ctx.GetPlace())[0] =
          beta2 * beta2_pow->data<T>()[0];
      if (lazy_mode) {
        VLOG(3) << "run cpu lazy mode";
        size_t row_count = grad_merge.rows().size();
        std::vector<int64_t> cpu_rows(grad_merge.rows());
        for (size_t row_index = 0; row_index < row_count; ++row_index) {
          for (size_t offset = 0; offset < row_numel; ++offset) {
            size_t i = cpu_rows[row_index] * row_numel + offset;
            functor.adam_update(i, grad_data[row_index * row_numel + offset]);
502
          }
503
        }
504
      }
505
#ifndef _WIN32
506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535
      else if (FLAGS_inner_op_parallelism > 1 &&  // NOLINT
               min_row_size_to_use_multithread > 0 &&
               param->dims()[0] > min_row_size_to_use_multithread) {
        VLOG(3) << "use multi thread, inner_op_parallelism="
                << FLAGS_inner_op_parallelism
                << " min_row_size_to_use_multithread="
                << min_row_size_to_use_multithread;
        if (FLAGS_inner_op_parallelism > 10) {
          VLOG(1) << "FLAGS_inner_op_parallelism " << FLAGS_inner_op_parallelism
                  << " is two large!";
        }
        auto& grad_rows = grad_merge.rows();
        std::unordered_map<size_t, int> row_id_to_grad_row_offset;
        size_t param_row_count = param->numel() / row_numel;
        if (param_row_count < 1000) {
          VLOG(1) << "param_row_count should be larger then 1000 to use "
                     "multi thread, currently "
                  << param_row_count;
        }
        for (size_t i = 0; i < grad_rows.size(); ++i) {
          row_id_to_grad_row_offset[grad_rows[i]] = i;
        }
        std::vector<std::future<void>> fs;
        int64_t line_in_each_thread =
            param_row_count / FLAGS_inner_op_parallelism + 1;
        for (int i = 0; i < FLAGS_inner_op_parallelism; ++i) {
          int64_t start = i * line_in_each_thread;
          int64_t end = (i + 1) * line_in_each_thread;
          if (start >= static_cast<int64_t>(param_row_count)) {
            break;
Q
Qiao Longfei 已提交
536
          }
537 538
          if (end > static_cast<int64_t>(param_row_count)) {
            end = static_cast<int64_t>(param_row_count);
Q
Qiao Longfei 已提交
539
          }
540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556
          fs.push_back(framework::Async([&functor, &row_id_to_grad_row_offset,
                                         &grad_data, row_numel, start, end]() {
            for (int64_t row_id = start; row_id < end; ++row_id) {
              auto iter = row_id_to_grad_row_offset.find(row_id);
              if (iter != row_id_to_grad_row_offset.end()) {
                for (size_t row_offset = 0U; row_offset < row_numel;
                     ++row_offset) {
                  functor.adam_update(
                      row_id * row_numel + row_offset,
                      grad_data[iter->second * row_numel + row_offset]);
                }
              } else {
                for (size_t row_offset = 0U; row_offset < row_numel;
                     ++row_offset) {
                  functor.adam_update(row_id * row_numel + row_offset, 0);
                }
              }
Q
Qiao Longfei 已提交
557
            }
558
          }));
559
        }
560 561 562 563 564
        for (size_t i = 0; i < fs.size(); ++i) fs[i].wait();
      }
#endif        // !_WIN32
      else {  // NOLINT
        functor(param->numel());
Q
Qiao Longfei 已提交
565
      }
T
wip  
typhoonzero 已提交
566 567 568
    } else {
      PADDLE_THROW("Variable type not supported by adam_op");
    }
569 570 571 572 573
  }
};

}  // namespace operators
}  // namespace paddle