adagrad_optimizer.cc 755 字节
Newer Older
1 2
#include <cmath>

3 4 5 6 7
#include "adagrad_optimizer.h"

namespace paddle {
namespace optimizer {

D
dzhwinter 已提交
8 9 10
void AdagradOptimizer::Update(const Tensor* gradient) {
  num_sample_passed_ += 1;
  double learning_rate = lr_policy_->LearningRate(num_sample_passed_);
11
  Tensor& param = *parameter_;
D
dzhwinter 已提交
12
  Tensor& accum_g = *accum_gradient_;
13 14 15
  const Tensor& grad = *gradient;
  for (size_t i = 0; i < param.size(); ++i) {
    accum_g[i] += grad[i] * grad[i];
D
dzhwinter 已提交
16 17
    param[i] += learning_rate * grad[i] / std::sqrt(accum_g[i] + epsilon_) +
                learning_rate * decay_ * param[i];
18 19
  }
}
D
dzhwinter 已提交
20
const char* SGDOptimizer::SerializeState(int* state_len) { NIMPL; }
21

D
dzhwinter 已提交
22 23
void SGDOptimizer::DeSerializeState(const std::string& str) { NIMPL; }
// namespace optimizer
24
}  // namespace optimizer