sgd_optmizer.cc 1.1 KB
Newer Older
1 2 3 4 5
#include "sgd_optimizer.h"

namespace paddle {
namespace optimizer {

D
dzhwinter 已提交
6
void SGDOptimizer::set_weight(Tensor *p) {
7
  //  ParameterOptimizer::set_weight(p);
D
dzhwinter 已提交
8
  parameter_ = p;
9
  size_t size = p->size();
10
  // TODO: fix it with align aware allocator bind to Tensor
D
dzhwinter 已提交
11
  if (momentum_ != 0.0) {
D
dzhwinter 已提交
12 13
    real *ptr = new real[size];
    momentums_ = new Tensor(ptr, size);
14 15 16
  }
}

D
dzhwinter 已提交
17 18 19
void SGDOptimizer::Update(const Tensor *gradient) {
  num_sample_passed_ += 1;
  double learning_rate = lr_policy_->LearningRate(num_sample_passed_);
D
dzhwinter 已提交
20 21
  real velocity = 0.0;
  Tensor &param = *parameter_;
22 23 24
  const Tensor &grad = *gradient;
  Tensor &m = *momentums_;
  for (size_t i = 0; i < param.size(); ++i) {
D
dzhwinter 已提交
25 26
    if (momentum_ == 0.0) {
      velocity = -learning_rate * grad[i] - learning_rate * decay_ * param[i];
27
    } else {
D
dzhwinter 已提交
28 29
      m[i] = momentum_ * m[i] - learning_rate * grad[i] -
             learning_rate * decay_ * param[i];
30
      velocity = m[i];
31
    }
D
dzhwinter 已提交
32 33
    if (nesterov_) {
      param[i] += momentum_ * velocity - learning_rate * grad[i];
34
    } else {
35
      param[i] += velocity;
36 37 38 39 40 41
    }
  }
}

}  // namespace optimizer
}  // namespace paddle