momentum_op.h 13.8 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
S
sidgoyal78 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once
S
sneaxiy 已提交
16
#include <memory>
D
dzhwinter 已提交
17
#include <string>
Y
Yi Wang 已提交
18 19
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
D
dzhwinter 已提交
20 21
#include "paddle/fluid/operators/math/algorithm.h"
#include "paddle/fluid/operators/math/selected_rows_functor.h"
22
#include "paddle/fluid/platform/float16.h"
D
dzhwinter 已提交
23
#include "paddle/fluid/platform/for_range.h"
S
sidgoyal78 已提交
24 25 26 27

namespace paddle {
namespace operators {

28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
namespace details {
template <typename T>
struct LearningRateType {
  using Type = T;
};

template <>
struct LearningRateType<platform::float16> {
  using Type = float;
};
}  // namespace details

template <typename T>
using DataType = typename details::LearningRateType<T>::Type;

D
dzhwinter 已提交
43 44 45 46 47
using framework::Tensor;
using framework::SelectedRows;
struct NoNesterov;
struct UseNesterov;

48 49 50 51 52
class MomentumOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override;
};

53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
class MomentumOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

 protected:
  void InferShape(framework::InferShapeContext* ctx) const override {
    PADDLE_ENFORCE(ctx->HasInput("Param"),
                   "Input(param) of Momentum should not be null.");
    PADDLE_ENFORCE(ctx->HasInput("Grad"),
                   "Input(grad) of Momentum should not be null.");
    PADDLE_ENFORCE(ctx->HasInput("Velocity"),
                   "Input(velocity) of Momentum should not be null.");
    PADDLE_ENFORCE(ctx->HasInput("LearningRate"),
                   "Input(LearningRate) of Momentum should not be null.");
    PADDLE_ENFORCE(
        ctx->GetInputsVarType("Param").front() ==
            framework::proto::VarType::LOD_TENSOR,
        "The input var's type should be LoDTensor, but the received is %s",
        ctx->Inputs("Param").front(), ctx->GetInputsVarType("Param").front());

    PADDLE_ENFORCE(ctx->HasOutput("ParamOut"),
                   "Output(ParamOut) of Momentum should not be null.");
    PADDLE_ENFORCE(ctx->HasOutput("VelocityOut"),
                   "Output(VelocityOut) of Momentum should not be null.");

78 79 80 81 82 83 84 85 86
    auto lr_dims = ctx->GetInputDim("LearningRate");
    PADDLE_ENFORCE_NE(framework::product(lr_dims), 0,
                      "Maybe the Input variable LearningRate has not "
                      "been initialized. You may need to confirm "
                      "if you put exe.run(startup_program) "
                      "after optimizer.minimize function.");
    PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1,
                      "Learning_rate should be a scalar");

87 88 89 90 91 92 93 94 95 96 97 98 99 100
    auto param_dim = ctx->GetInputDim("Param");
    if (ctx->GetInputsVarType("Grad")[0] ==
        framework::proto::VarType::LOD_TENSOR) {
      PADDLE_ENFORCE_EQ(
          param_dim, ctx->GetInputDim("Grad"),
          "Param and Grad input of MomentumOp should have the same dimension.");
      PADDLE_ENFORCE_EQ(
          param_dim, ctx->GetInputDim("Velocity"),
          "Param and Velocity of MomentumOp should have the same dimension.");
    }

    ctx->SetOutputDim("ParamOut", param_dim);
    ctx->SetOutputDim("VelocityOut", param_dim);
  }
S
sneaxiy 已提交
101

102 103
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
104 105
    auto input_data_type =
        OperatorWithKernel::IndicateVarDataType(ctx, "Param");
106 107 108 109
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
  }
};

D
dzhwinter 已提交
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
template <typename T>
class CPUDenseMomentumFunctor {
 private:
  const Tensor* param;
  const Tensor* grad;
  const Tensor* velocity;
  const Tensor* learning_rate;
  const T mu;
  const T use_nesterov;
  Tensor* param_out;
  Tensor* velocity_out;

 public:
  CPUDenseMomentumFunctor(const Tensor* param, const Tensor* grad,
                          const Tensor* velocity, const Tensor* learning_rate,
                          const T mu, const bool use_nesterov,
                          Tensor* param_out, Tensor* velocity_out)
      : param(param),
        grad(grad),
        velocity(velocity),
        learning_rate(learning_rate),
        mu(mu),
        use_nesterov(use_nesterov),
        param_out(param_out),
        velocity_out(velocity_out) {}

  inline void operator()() {
    auto p_out = framework::EigenVector<T>::Flatten(*param_out);
    auto v_out = framework::EigenVector<T>::Flatten(*velocity_out);

    auto p = framework::EigenVector<T>::Flatten(*param);
    auto v = framework::EigenVector<T>::Flatten(*velocity);
    auto g = framework::EigenVector<T>::Flatten(*grad);
143
    const auto* lr = learning_rate->data<DataType<T>>();
D
dzhwinter 已提交
144 145 146

    v_out = v * mu + g;
    if (use_nesterov) {
Z
Zhen Wang 已提交
147
      p_out = p - (g + v_out * mu) * static_cast<T>(lr[0]);
D
dzhwinter 已提交
148
    } else {
Z
Zhen Wang 已提交
149
      p_out = p - static_cast<T>(lr[0]) * v_out;
D
dzhwinter 已提交
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
    }
  }
};

template <typename T, typename UpdateMethod>
class DenseMomentumFunctor;

// NOTE(dzh) for performance.
// avoid if/else in inside kernel, implement GPU UseNesterov/NoNesterov as two
// functor.
template <typename T>
class DenseMomentumFunctor<T, UseNesterov> {
 private:
  const T* p_;
  const T* g_;
  const T* v_;
166
  const DataType<T>* lr_;
D
dzhwinter 已提交
167 168 169 170 171 172 173
  const T mu_;
  const int64_t num_;
  T* p_out_;
  T* v_out_;

 public:
  DenseMomentumFunctor(const T* p, const T* g, const T* v,
174
                       const DataType<T>* learning_rate, const T mu,
Z
Zhen Wang 已提交
175
                       const int64_t num, T* p_out, T* v_out)
D
dzhwinter 已提交
176 177 178 179 180 181 182 183 184 185 186 187
      : p_(p),
        g_(g),
        v_(v),
        lr_(learning_rate),
        mu_(mu),
        num_(num),
        p_out_(p_out),
        v_out_(v_out) {}
  inline HOSTDEVICE void operator()(size_t i) const {
    // put memory access in register
    const T p = p_[i];
    const T g = g_[i];
188
    const auto lr = lr_[0];
D
dzhwinter 已提交
189 190
    const T v = v_[i];
    T v_out = v * mu_ + g;
Z
Zhen Wang 已提交
191
    T p_out = p - (g + v_out * mu_) * static_cast<T>(lr);
D
dzhwinter 已提交
192 193 194 195 196 197 198 199 200 201 202 203
    // write reigster to memory
    v_out_[i] = v_out;
    p_out_[i] = p_out;
  }
};

template <typename T>
class DenseMomentumFunctor<T, NoNesterov> {
 private:
  const T* p_;
  const T* g_;
  const T* v_;
204
  const DataType<T>* lr_;
D
dzhwinter 已提交
205 206 207 208 209 210 211
  const T mu_;
  const int64_t num_;
  T* p_out_;
  T* v_out_;

 public:
  DenseMomentumFunctor(const T* p, const T* g, const T* v,
212
                       const DataType<T>* learning_rate, const T mu,
Z
Zhen Wang 已提交
213
                       const int64_t num, T* p_out, T* v_out)
D
dzhwinter 已提交
214 215 216 217 218 219 220 221 222 223 224 225
      : p_(p),
        g_(g),
        v_(v),
        lr_(learning_rate),
        mu_(mu),
        num_(num),
        p_out_(p_out),
        v_out_(v_out) {}
  inline HOSTDEVICE void operator()(size_t i) const {
    // put memory access in register
    const T p = p_[i];
    const T g = g_[i];
Z
Zhen Wang 已提交
226
    const T lr = static_cast<T>(lr_[0]);
D
dzhwinter 已提交
227 228 229 230 231 232 233 234 235 236 237 238
    const T v = v_[i];
    T v_out = v * mu_ + g;
    T p_out = p - lr * v_out;
    // write reigster to memory
    v_out_[i] = v_out;
    p_out_[i] = p_out;
  }
};

template <typename T, typename UpdateMethod>
class SparseMomentumFunctor;

239
template <typename T>
D
dzhwinter 已提交
240 241 242 243 244
class SparseMomentumFunctor<T, UseNesterov> {
 private:
  const T* p_;
  const T* g_;
  const T* v_;
245
  const DataType<T>* lr_;
D
dzhwinter 已提交
246 247 248 249 250 251 252 253
  const T mu_;
  const int64_t* rows_;
  const int64_t row_numel_;
  const int64_t row_height_;
  T* p_out_;
  T* v_out_;

 public:
254 255 256 257
  SparseMomentumFunctor(const T* p, const T* g, const T* v,
                        const DataType<T>* lr, const T mu, const int64_t* rows,
                        int64_t row_numel, int64_t row_height, T* p_out,
                        T* v_out)
D
dzhwinter 已提交
258 259 260 261 262 263 264 265 266 267 268 269 270 271
      : p_(p),
        g_(g),
        v_(v),
        lr_(lr),
        mu_(mu),
        rows_(rows),
        row_numel_(row_numel),
        row_height_(row_height),
        p_out_(p_out),
        v_out_(v_out) {}

  inline HOSTDEVICE void operator()(size_t i) {
    auto row_idx =
        math::BinarySearch<int64_t>(rows_, row_height_, i / row_numel_);
272 273
    T g = row_idx >= 0 ? g_[row_idx * row_numel_ + i % row_numel_]
                       : static_cast<T>(0);
D
dzhwinter 已提交
274 275
    // put memory access in register
    const T p = p_[i];
276
    const auto lr = lr_[0];
D
dzhwinter 已提交
277 278
    const T v = v_[i];
    T v_out = v * mu_ + g;
Z
Zhen Wang 已提交
279
    T p_out = p - (g + v_out * mu_) * static_cast<T>(lr);
D
dzhwinter 已提交
280 281 282 283 284 285 286 287 288 289 290 291
    // write reigster to memory
    v_out_[i] = v_out;
    p_out_[i] = p_out;
  }
};

template <typename T>
class SparseMomentumFunctor<T, NoNesterov> {
 private:
  const T* p_;
  const T* g_;
  const T* v_;
292
  const DataType<T>* lr_;
D
dzhwinter 已提交
293 294 295 296 297 298 299 300
  const T mu_;
  const int64_t* rows_;
  const int64_t row_numel_;
  const int64_t row_height_;
  T* p_out_;
  T* v_out_;

 public:
301 302 303 304
  SparseMomentumFunctor(const T* p, const T* g, const T* v,
                        const DataType<T>* lr, const T mu, const int64_t* rows,
                        int64_t row_numel, int64_t row_height, T* p_out,
                        T* v_out)
D
dzhwinter 已提交
305 306 307 308 309 310 311 312 313 314 315 316 317 318
      : p_(p),
        g_(g),
        v_(v),
        lr_(lr),
        mu_(mu),
        rows_(rows),
        row_numel_(row_numel),
        row_height_(row_height),
        p_out_(p_out),
        v_out_(v_out) {}

  inline HOSTDEVICE void operator()(size_t i) {
    auto row_idx =
        math::BinarySearch<int64_t>(rows_, row_height_, i / row_numel_);
319 320
    T g = row_idx >= 0 ? g_[row_idx * row_numel_ + i % row_numel_]
                       : static_cast<T>(0);
D
dzhwinter 已提交
321 322
    // put memory access in register
    const T p = p_[i];
Z
Zhen Wang 已提交
323
    const T lr = static_cast<T>(lr_[0]);
D
dzhwinter 已提交
324 325 326 327 328 329 330 331 332 333
    const T v = v_[i];
    T v_out = v * mu_ + g;
    T p_out = p - v_out * lr;
    // write reigster to memory
    v_out_[i] = v_out;
    p_out_[i] = p_out;
  }
};

template <typename DeviceContext, typename T>
S
sidgoyal78 已提交
334 335 336
class MomentumOpKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
337
    T mu = static_cast<T>(ctx.Attr<float>("mu"));
338
    bool use_nesterov = ctx.Attr<bool>("use_nesterov");
S
sidgoyal78 已提交
339

340 341 342
    auto learning_rate = ctx.Input<framework::Tensor>("LearningRate");
    auto param = ctx.Input<framework::Tensor>("Param");
    auto param_out = ctx.Output<framework::Tensor>("ParamOut");
D
dzhwinter 已提交
343 344 345 346 347
    auto* velocity = ctx.Input<framework::Tensor>("Velocity");
    auto velocity_out = ctx.Output<framework::Tensor>("VelocityOut");
    param_out->mutable_data<T>(ctx.GetPlace());
    velocity_out->mutable_data<T>(ctx.GetPlace());

348 349 350
    auto* grad_var = ctx.InputVar("Grad");
    if (grad_var->IsType<framework::LoDTensor>()) {
      auto grad = ctx.Input<framework::Tensor>("Grad");
D
dzhwinter 已提交
351 352 353 354 355 356 357 358 359 360 361 362
      if (platform::is_cpu_place(ctx.GetPlace())) {
        CPUDenseMomentumFunctor<T> functor(param, grad, velocity, learning_rate,
                                           mu, use_nesterov, param_out,
                                           velocity_out);
        functor();
      } else if (platform::is_gpu_place(ctx.GetPlace())) {
        platform::ForRange<DeviceContext> for_range(
            static_cast<const DeviceContext&>(ctx.device_context()),
            param->numel());
        if (use_nesterov) {
          DenseMomentumFunctor<T, UseNesterov> functor(
              param->data<T>(), grad->data<T>(), velocity->data<T>(),
363
              learning_rate->data<DataType<T>>(), mu, param->numel(),
D
dzhwinter 已提交
364 365 366 367 368 369 370
              param_out->mutable_data<T>(ctx.GetPlace()),
              velocity_out->mutable_data<T>(ctx.GetPlace()));
          for_range(functor);

        } else {
          DenseMomentumFunctor<T, NoNesterov> functor(
              param->data<T>(), grad->data<T>(), velocity->data<T>(),
371
              learning_rate->data<DataType<T>>(), mu, param->numel(),
D
dzhwinter 已提交
372 373 374 375
              param_out->mutable_data<T>(ctx.GetPlace()),
              velocity_out->mutable_data<T>(ctx.GetPlace()));
          for_range(functor);
        }
376
      }
D
dzhwinter 已提交
377

378 379 380
    } else if (grad_var->IsType<framework::SelectedRows>()) {
      // sparse update embedding with selectedrows
      auto grad = ctx.Input<framework::SelectedRows>("Grad");
S
sidgoyal78 已提交
381

382 383
      // sparse update maybe empty.
      if (grad->rows().size() == 0) {
M
minqiyang 已提交
384
        VLOG(3) << "Grad SelectedRows contains no data!";
385 386
        return;
      }
S
sneaxiy 已提交
387 388 389

      framework::SelectedRows tmp_merged_grad;
      framework::SelectedRows* merged_grad = &tmp_merged_grad;
D
dzhwinter 已提交
390 391 392 393
      math::scatter::MergeAdd<DeviceContext, T> merge_func;
      merge_func(ctx.template device_context<DeviceContext>(), *grad,
                 merged_grad);

S
sneaxiy 已提交
394
      const int64_t* rows = merged_grad->rows().Data(ctx.GetPlace());
D
dzhwinter 已提交
395 396 397 398 399
      int64_t row_numel =
          merged_grad->value().numel() / merged_grad->rows().size();
      platform::ForRange<DeviceContext> for_range(
          static_cast<const DeviceContext&>(ctx.device_context()),
          param->numel());
D
dzhwinter 已提交
400 401 402
      if (use_nesterov) {
        SparseMomentumFunctor<T, UseNesterov> functor(
            param->data<T>(), merged_grad->value().data<T>(),
403
            velocity->data<T>(), learning_rate->data<DataType<T>>(), mu, rows,
Z
Zhen Wang 已提交
404
            row_numel, static_cast<int64_t>(merged_grad->rows().size()),
D
dzhwinter 已提交
405 406 407 408 409 410 411
            param_out->mutable_data<T>(ctx.GetPlace()),
            velocity_out->mutable_data<T>(ctx.GetPlace()));
        for_range(functor);

      } else {
        SparseMomentumFunctor<T, NoNesterov> functor(
            param->data<T>(), merged_grad->value().data<T>(),
412
            velocity->data<T>(), learning_rate->data<DataType<T>>(), mu, rows,
Z
Zhen Wang 已提交
413
            row_numel, static_cast<int64_t>(merged_grad->rows().size()),
D
dzhwinter 已提交
414 415 416
            param_out->mutable_data<T>(ctx.GetPlace()),
            velocity_out->mutable_data<T>(ctx.GetPlace()));
        for_range(functor);
417
      }
K
kavyasrinet 已提交
418
    } else {
D
dzhwinter 已提交
419 420 421
      PADDLE_THROW(
          string::Sprintf("MomentumOp only supports LoDTensor or SelectedRows "
                          "gradient, but the received Variable Type is %s",
S
sneaxiy 已提交
422
                          framework::ToTypeName(grad_var->Type())));
K
kavyasrinet 已提交
423
    }
S
sidgoyal78 已提交
424 425 426 427 428
  }
};

}  // namespace operators
}  // namespace paddle