adagrad_optimizer.cc 749 字节
Newer Older
1 2
#include <cmath>

3 4 5 6 7
#include "adagrad_optimizer.h"

namespace paddle {
namespace optimizer {

D
dzhwinter 已提交
8
void AdagradOptimizer::set_weight(Tensor* p) {
D
dzhwinter 已提交
9
  parameter_ = p;
10
  size_t size = p->size();
D
dzhwinter 已提交
11
  accum_gradient_ = new Tensor(size);
12 13
}

D
dzhwinter 已提交
14 15 16
void AdagradOptimizer::Update(const Tensor* gradient) {
  num_sample_passed_ += 1;
  double learning_rate = lr_policy_->LearningRate(num_sample_passed_);
17
  Tensor& param = *parameter_;
D
dzhwinter 已提交
18
  Tensor& accum_g = *accum_gradient_;
19 20 21
  const Tensor& grad = *gradient;
  for (size_t i = 0; i < param.size(); ++i) {
    accum_g[i] += grad[i] * grad[i];
D
dzhwinter 已提交
22 23
    param[i] += learning_rate * grad[i] / std::sqrt(accum_g[i] + epsilon_) +
                learning_rate * decay_ * param[i];
24 25 26 27 28
  }
}

}  // namespace optimizer
}  // namespace paddle