adadelta_optimizer.cc 1.1 KB
Newer Older
1 2
#include "adadelta_optimizer.h"
#include <algorithm>
3
#include <cmath>
4 5 6 7

namespace paddle {
namespace optimizer {

D
dzhwinter 已提交
8
void AdadeltaOptimizer::set_weight(Tensor* p) {
D
dzhwinter 已提交
9
  parameter_ = p;
10
  size_t size = p->size();
D
dzhwinter 已提交
11 12 13
  accum_gradient_ = new Tensor(size);
  accum_delta_ = new Tensor(size);
  update_delta_ = new Tensor(size);
14 15
}

D
dzhwinter 已提交
16 17 18
void AdadeltaOptimizer::Update(const Tensor* gradient) {
  num_sample_passed_ += 1;
  double learning_rate = lr_policy_->LearningRate(num_sample_passed_);
19 20
  Tensor& param = *parameter_;
  const Tensor& grad = *gradient;
D
dzhwinter 已提交
21 22 23
  Tensor& accum_g = *accum_gradient_;
  Tensor& accum_d = *accum_delta_;
  Tensor& update_d = *update_delta_;
24
  for (size_t i = 0; i < param.size(); ++i) {
D
dzhwinter 已提交
25
    accum_g[i] = rho_ * accum_g[i] + (1.0 - rho_) * grad[i] * grad[i];
26

D
dzhwinter 已提交
27 28
    update_d[i] = std::sqrt(accum_d[i] + epsilon_) /
                  std::sqrt(accum_g[i] + epsilon_) * grad[i];
29

D
dzhwinter 已提交
30
    accum_d[i] = rho_ * accum_d[i] + (1.0 - rho_) * update_d[i] * update_d[i];
31

D
dzhwinter 已提交
32
    param[i] -= learning_rate * update_d[i] + learning_rate * decay_ * param[i];
33 34 35 36
  }
}
}  // namespace optimizer
}  // namespace paddle