#include "adam_optimizer.h" namespace paddle { namespace optimizer { template AdamOptimizer::AdamOptimizer(const ::paddle::OptimizerConfig &config) : ParameterOptimizer(config) { beta_1 = config.adam().beta_1(); beta_2 = config.adam().beta_2(); epsilon = config.adam().epsilon(); decay = config.adam().decay(); } template void AdamOptimizer::set_weight(const Tensor *p) { size_t size = p->width(); T *mptr = new T[size]; momentums_ = Tensor(mptr, size); T *vptr = new T[size]; velocitys_ = Tensor(vtpr, size); } template void AdamOptimizer::update(const Tensor &gradient) { num_sample_passed += 1; double learning_rate = lr_policy->get_learning_rate(); for (size_t i = 0; i < parameter_.size(); ++i) { accum_gradient[i] += gradient[i] * gradient[i]; parameter_[i] += learning_rate * (gradient[i] / std::sqrt(accum_gradient[i] + epsilon) + decay * parameter_[i]); } } template class AdamOptimizer; template class AdamOptimizer; } // namespace optimizer } // namespace paddle