lr_policy.h 1.1 KB
Newer Older
D
dzhwinter 已提交
1
#pragma once
2

D
dzhwinter 已提交
3
#include <algorithm>
4
#include "OptimizerConfig.pb.h"
5 6 7 8

namespace paddle {
namespace optimizer {

D
dzhwinter 已提交
9
class LrPolicy {
10
public:
D
dzhwinter 已提交
11 12
  virtual ~LrPolicy() {}
  virtual double LearningRate(const uint64_t num_sample_passed) = 0;
D
dzhwinter 已提交
13
  virtual void set(double current_learning_rate) = 0;
14 15 16
};

// constant learning rate policy
D
dzhwinter 已提交
17
class ConstLr final : public LrPolicy {
18
public:
D
dzhwinter 已提交
19 20
  ConstLr(double lr) : learning_rate(lr){};
  double LearningRate(const uint64_t num_sample_passed) {
21 22
    return learning_rate;
  }
D
dzhwinter 已提交
23 24 25
  void set(double current_learning_rate) {
    learning_rate = current_learning_rate;
  }
D
dzhwinter 已提交
26

27
private:
D
dzhwinter 已提交
28
  double learning_rate;
29 30
};

D
dzhwinter 已提交
31
class LinearLr final : public LrPolicy {
D
dzhwinter 已提交
32 33
public:
  LinearLr(double lr, double lr_decay_a, double lr_decay_b)
D
dzhwinter 已提交
34 35
      : learning_rate(lr), lr_decay_a(lr_decay_a), lr_decay_b(lr_decay_b) {}
  double LearningRate(const uint64_t num_sample_passed) {
D
dzhwinter 已提交
36 37
    return std::max(learning_rate - lr_decay_a * num_sample_passed, lr_decay_b);
  }
D
dzhwinter 已提交
38 39 40
  void set(double current_learning_rate) {
    learning_rate = current_learning_rate;
  }
D
dzhwinter 已提交
41 42

private:
D
dzhwinter 已提交
43
  double learning_rate;
D
dzhwinter 已提交
44 45 46 47
  double lr_decay_a;
  double lr_decay_b;
};

48 49
}  // namespace optimizer
}  // namespace paddle