adadelta_optimizer.cc 1.2 KB
Newer Older
1 2
#include "adadelta_optimizer.h"
#include <algorithm>
3
#include <cmath>
4 5 6 7

namespace paddle {
namespace optimizer {

D
dzhwinter 已提交
8
void AdadeltaOptimizer::set_weight(Tensor* p) {
D
dzhwinter 已提交
9
  parameter_ = p;
10
  size_t size = p->size();
D
dzhwinter 已提交
11
  real* gptr = new real[size];
D
dzhwinter 已提交
12
  accum_gradient_ = new Tensor(gptr, size);
D
dzhwinter 已提交
13
  real* dptr = new real[size];
D
dzhwinter 已提交
14
  accum_delta_ = new Tensor(dptr, size);
D
dzhwinter 已提交
15
  real* dptr_current = new real[size];
D
dzhwinter 已提交
16
  update_delta_ = new Tensor(dptr_current, size);
17 18
}

D
dzhwinter 已提交
19 20 21
void AdadeltaOptimizer::Update(const Tensor* gradient) {
  num_sample_passed_ += 1;
  double learning_rate = lr_policy_->LearningRate(num_sample_passed_);
22 23
  Tensor& param = *parameter_;
  const Tensor& grad = *gradient;
D
dzhwinter 已提交
24 25 26
  Tensor& accum_g = *accum_gradient_;
  Tensor& accum_d = *accum_delta_;
  Tensor& update_d = *update_delta_;
27
  for (size_t i = 0; i < param.size(); ++i) {
D
dzhwinter 已提交
28
    accum_g[i] = rho_ * accum_g[i] + (1.0 - rho_) * grad[i] * grad[i];
29

D
dzhwinter 已提交
30 31
    update_d[i] = std::sqrt(accum_d[i] + epsilon_) /
                  std::sqrt(accum_g[i] + epsilon_) * grad[i];
32

D
dzhwinter 已提交
33
    accum_d[i] = rho_ * accum_d[i] + (1.0 - rho_) * update_d[i] * update_d[i];
34

D
dzhwinter 已提交
35
    param[i] -= learning_rate * update_d[i] + learning_rate * decay_ * param[i];
36 37 38 39
  }
}
}  // namespace optimizer
}  // namespace paddle