adagrad_optimizer.cc 786 字节
Newer Older
1 2
#include <cmath>

3 4 5 6 7
#include "adagrad_optimizer.h"

namespace paddle {
namespace optimizer {

D
dzhwinter 已提交
8
void AdagradOptimizer::set_weight(Tensor* p) {
D
dzhwinter 已提交
9
  parameter_ = p;
10
  size_t size = p->size();
D
dzhwinter 已提交
11
  real* gptr = new real[size];
D
dzhwinter 已提交
12
  accum_gradient_ = new Tensor(gptr, size);
13 14
}

D
dzhwinter 已提交
15 16 17
void AdagradOptimizer::Update(const Tensor* gradient) {
  num_sample_passed_ += 1;
  double learning_rate = lr_policy_->LearningRate(num_sample_passed_);
18
  Tensor& param = *parameter_;
D
dzhwinter 已提交
19
  Tensor& accum_g = *accum_gradient_;
20 21 22
  const Tensor& grad = *gradient;
  for (size_t i = 0; i < param.size(); ++i) {
    accum_g[i] += grad[i] * grad[i];
D
dzhwinter 已提交
23 24
    param[i] += learning_rate * grad[i] / std::sqrt(accum_g[i] + epsilon_) +
                learning_rate * decay_ * param[i];
25 26 27 28 29
  }
}

}  // namespace optimizer
}  // namespace paddle