adagrad_optimizer.cc 766 字节
Newer Older
1 2
#include <cmath>

3 4 5 6 7
#include "adagrad_optimizer.h"

namespace paddle {
namespace optimizer {

D
dzhwinter 已提交
8
void AdagradOptimizer::set_weight(Tensor* p) {
9
  size_t size = p->size();
D
dzhwinter 已提交
10
  real* gptr = new real[size];
11
  accum_gradient = new Tensor(gptr, size);
12 13
}

14
void AdagradOptimizer::update(const Tensor* gradient) {
15
  num_sample_passed += 1;
16 17 18 19 20 21 22 23
  double learning_rate = lr_policy->get_learning_rate(num_sample_passed);
  Tensor& param = *parameter_;
  const Tensor& grad = *gradient;
  Tensor& accum_g = *accum_gradient;
  for (size_t i = 0; i < param.size(); ++i) {
    accum_g[i] += grad[i] * grad[i];
    param[i] += learning_rate * grad[i] / std::sqrt(accum_g[i] + epsilon) +
                learning_rate * decay * param[i];
24 25 26 27 28
  }
}

}  // namespace optimizer
}  // namespace paddle