adam_optimizer.cc 1.0 KB
Newer Older
1
#include "adam_optimizer.h"
2
#include <cmath>
3 4 5 6

namespace paddle {
namespace optimizer {

D
dzhwinter 已提交
7
void AdamOptimizer::set_weight(Tensor *p) {
8
  size_t size = p->size();
D
dzhwinter 已提交
9
  real *mptr = new real[size];
10
  momentums_ = new Tensor(mptr, size);
D
dzhwinter 已提交
11
  real *vptr = new real[size];
12
  velocitys_ = new Tensor(vptr, size);
13 14
}

15
void AdamOptimizer::update(const Tensor *gradient) {
16
  num_sample_passed += 1;
17 18 19 20
  double learning_rate = lr_policy->get_learning_rate(num_sample_passed);
  double coef1 = 1.0 - std::pow(beta_1, num_sample_passed);
  double coef2 = 1.0 - std::pow(beta_2, num_sample_passed);
  learning_rate *= std::sqrt(coef2) / coef1;
21 22 23 24 25 26 27 28 29
  Tensor &param = *parameter_;
  const Tensor &grad = *gradient;
  Tensor &m = *momentums_;
  Tensor &v = *velocitys_;
  for (size_t i = 0; i < param.size(); ++i) {
    m[i] = beta_1 * m[i] + (1.0 - beta_1) * grad[i];
    v[i] = beta_2 * v[i] + (1.0 - beta_2) * grad[i] * grad[i];
    param[i] -=
        learning_rate * (m[i] / std::sqrt(v[i] + epsilon) + decay * param[i]);
30 31 32 33
  }
}
}  // namespace optimizer
}  // namespace paddle