/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include // for sqrt in CPU and CUDA #include #include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/detail/safe_ref.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" #include "paddle/fluid/platform/for_range.h" namespace paddle { namespace operators { namespace scatter = paddle::operators::math::scatter; struct GPUAdam; struct CPUAdam; template struct AdamFunctor; template struct AdamFunctor { T beta1_; T beta2_; T epsilon_; const T* beta1_pow_; const T* beta2_pow_; const T* moment1_; T* moment1_out_; const T* moment2_; T* moment2_out_; const T* lr_; const T* grad_; const T* param_; T* param_out_; AdamFunctor(T beta1, T beta2, T epsilon, const T* beta1_pow, const T* beta2_pow, const T* mom1, T* mom1_out, const T* mom2, T* mom2_out, const T* lr, const T* grad, const T* param, T* param_out) : beta1_(beta1), beta2_(beta2), epsilon_(epsilon), beta1_pow_(beta1_pow), beta2_pow_(beta2_pow), moment1_(mom1), moment1_out_(mom1_out), moment2_(mom2), moment2_out_(mom2_out), lr_(lr), grad_(grad), param_(param), param_out_(param_out) {} inline HOSTDEVICE void operator()(size_t i) const { // Merge all memory access together. T g = grad_[i]; T mom1 = moment1_[i]; T mom2 = moment2_[i]; T lr = *lr_; T beta1_pow = *beta1_pow_; T beta2_pow = *beta2_pow_; T p = param_[i]; // Calculation lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow); mom1 = beta1_ * mom1 + (1 - beta1_) * g; mom2 = beta2_ * mom2 + (1 - beta2_) * g * g; p -= lr * (mom1 / (sqrt(mom2) + epsilon_)); // Write back to global memory moment1_out_[i] = mom1; moment2_out_[i] = mom2; param_out_[i] = p; } }; template struct AdamFunctor { T beta1_; T beta2_; T epsilon_; const T* beta1_pow_; const T* beta2_pow_; const T* moment1_; T* moment1_out_; const T* moment2_; T* moment2_out_; const T* lr_; const T* grad_; const T* param_; T* param_out_; AdamFunctor(T beta1, T beta2, T epsilon, const T* beta1_pow, const T* beta2_pow, const T* mom1, T* mom1_out, const T* mom2, T* mom2_out, const T* lr, const T* grad, const T* param, T* param_out) : beta1_(beta1), beta2_(beta2), epsilon_(epsilon), beta1_pow_(beta1_pow), beta2_pow_(beta2_pow), moment1_(mom1), moment1_out_(mom1_out), moment2_(mom2), moment2_out_(mom2_out), lr_(lr), grad_(grad), param_(param), param_out_(param_out) {} void operator()(size_t numel) const { Eigen::Map> g{ grad_, static_cast(numel)}; Eigen::Map> mom1{ moment1_, static_cast(numel)}; Eigen::Map> mom2{ moment2_, static_cast(numel)}; Eigen::Map> param{ param_, static_cast(numel)}; Eigen::Map> param_out{ param_out_, static_cast(numel)}; Eigen::Map> moment1_out{ moment1_out_, static_cast(numel)}; Eigen::Map> moment2_out{ moment2_out_, static_cast(numel)}; T lr = *lr_; T beta1_pow = *beta1_pow_; T beta2_pow = *beta2_pow_; // Calculation lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow); moment1_out = beta1_ * mom1 + (1 - beta1_) * g; moment2_out = beta2_ * mom2 + (1 - beta2_) * g * g; param_out = param - lr * (moment1_out / (moment2_out.sqrt() + epsilon_)); } }; template struct SparseAdamFunctor { T beta1_; T beta2_; T epsilon_; const T* beta1_pow_; const T* beta2_pow_; const T* moment1_; T* moment1_out_; const T* moment2_; T* moment2_out_; const T* lr_; const T* grad_; const T* param_; T* param_out_; const int64_t* rows_; int64_t row_numel_; int64_t row_count_; SparseAdamFunctor(T beta1, T beta2, T epsilon, const T* beta1_pow, const T* beta2_pow, const T* mom1, T* mom1_out, const T* mom2, T* mom2_out, const T* lr, const T* grad, const T* param, T* param_out, const int64_t* rows, int64_t row_numel, int64_t row_count) : beta1_(beta1), beta2_(beta2), epsilon_(epsilon), beta1_pow_(beta1_pow), beta2_pow_(beta2_pow), moment1_(mom1), moment1_out_(mom1_out), moment2_(mom2), moment2_out_(mom2_out), lr_(lr), grad_(grad), param_(param), param_out_(param_out), rows_(rows), row_numel_(row_numel), row_count_(row_count) {} inline HOSTDEVICE int64_t BinarySearchInRows(int64_t row) const { int64_t beg = 0, end = row_count_ - 1; while (beg <= end) { auto mid = ((beg + end) >> 1); if (rows_[mid] == row) return mid; else if (rows_[mid] < row) beg = mid + 1; else end = mid - 1; } return -1; } inline HOSTDEVICE void operator()(size_t i) const { int64_t row = i / row_numel_; auto row_idx = BinarySearchInRows(row); T g = row_idx >= 0 ? grad_[row_idx * row_numel_ + i % row_numel_] : 0; // The following code is the same as dense T mom1 = moment1_[i]; T mom2 = moment2_[i]; T lr = *lr_; T beta1_pow = *beta1_pow_; T beta2_pow = *beta2_pow_; T p = param_[i]; // Calculation lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow); mom1 = beta1_ * mom1 + (1 - beta1_) * g; mom2 = beta2_ * mom2 + (1 - beta2_) * g * g; p -= lr * (mom1 / (sqrt(mom2) + epsilon_)); // Write back to global memory moment1_out_[i] = mom1; moment2_out_[i] = mom2; param_out_[i] = p; } }; template class AdamOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { using paddle::framework::LoDTensor; using paddle::operators::detail::Ref; T beta1 = static_cast(ctx.Attr("beta1")); T beta2 = static_cast(ctx.Attr("beta2")); T epsilon = static_cast(ctx.Attr("epsilon")); auto& param = Ref(ctx.Input("Param"), "Must set Param"); // auto& grad = Ref(ctx.Input("Grad"), "Must set Grad"); auto* grad_var = ctx.InputVar("Grad"); auto& mom1 = Ref(ctx.Input("Moment1"), "Must set Moment1"); auto& mom2 = Ref(ctx.Input("Moment2"), "Must set Moment2"); auto& lr = Ref(ctx.Input("LearningRate"), "Must set LearningRate"); auto& beta1_pow = Ref(ctx.Input("Beta1Pow"), "Must set Beta1Pow"); auto& beta2_pow = Ref(ctx.Input("Beta2Pow"), "Must set Beta2Pow"); auto& param_out = Ref(ctx.Output("ParamOut"), "Must set ParamOut"); auto& mom1_out = Ref(ctx.Output("Moment1Out"), "Must set Moment1Out"); auto& mom2_out = Ref(ctx.Output("Moment2Out"), "Must set Moment1Out"); if (grad_var->IsType()) { auto& grad = Ref(ctx.Input("Grad"), "Must set Grad"); if (platform::is_cpu_place(ctx.GetPlace())) { AdamFunctor functor( beta1, beta2, epsilon, beta1_pow.template data(), beta2_pow.template data(), mom1.template data(), mom1_out.template mutable_data(ctx.GetPlace()), mom2.template data(), mom2_out.template mutable_data(ctx.GetPlace()), lr.template data(), grad.template data(), param.template data(), param_out.template mutable_data(ctx.GetPlace())); functor(param.numel()); } else if (platform::is_gpu_place(ctx.GetPlace())) { AdamFunctor functor( beta1, beta2, epsilon, beta1_pow.template data(), beta2_pow.template data(), mom1.template data(), mom1_out.template mutable_data(ctx.GetPlace()), mom2.template data(), mom2_out.template mutable_data(ctx.GetPlace()), lr.template data(), grad.template data(), param.template data(), param_out.template mutable_data(ctx.GetPlace())); platform::ForRange for_range( static_cast(ctx.device_context()), param.numel()); for_range(functor); } } else if (grad_var->IsType()) { auto& grad = Ref(ctx.Input("Grad"), "Must set Grad"); if (grad.rows().size() == 0) { VLOG(3) << "grad row size is 0!!"; return; } std::vector cpu_rows(grad.rows().begin(), grad.rows().end()); bool is_strict_sorted = true; for (size_t i = 1; i < cpu_rows.size(); ++i) { if (cpu_rows[i - 1] >= cpu_rows[i]) { is_strict_sorted = false; break; } } const framework::SelectedRows* grad_merge_ptr; if (is_strict_sorted) { grad_merge_ptr = &grad; } else { // merge duplicated rows if any. // The rows of grad_merge have been sorted inside MergeAdd functor scatter::MergeAdd merge_func; auto* grad_merge_var = const_cast(ctx.scope()) .Var() ->GetMutable(); merge_func(ctx.template device_context(), grad, grad_merge_var); grad_merge_ptr = grad_merge_var; std::cerr << "Create new variables in adam_op" << std::endl; } auto& grad_merge = *grad_merge_ptr; auto& grad_tensor = grad_merge.value(); const T* grad_data = grad_tensor.template data(); const int64_t* rows = nullptr; // When compiled without CUDA, the CUDAData() interface should not be // provided. #if defined(PADDLE_WITH_CUDA) if (platform::is_gpu_place(ctx.GetPlace())) { rows = grad_merge.rows().CUDAData(ctx.GetPlace()); } else { #endif rows = grad_merge.rows().data(); #if defined(PADDLE_WITH_CUDA) } #endif auto row_numel = grad_tensor.numel() / grad_merge.rows().size(); SparseAdamFunctor functor( beta1, beta2, epsilon, beta1_pow.template data(), beta2_pow.template data(), mom1.template data(), mom1_out.template mutable_data(ctx.GetPlace()), mom2.template data(), mom2_out.template mutable_data(ctx.GetPlace()), lr.template data(), grad_data, param.template data(), param_out.template mutable_data(ctx.GetPlace()), rows, row_numel, grad_merge.rows().size()); platform::ForRange for_range( static_cast(ctx.device_context()), param.numel()); for_range(functor); } else { PADDLE_THROW("Variable type not supported by adam_op"); } } }; } // namespace operators } // namespace paddle