/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/optimizers/adam_op.h" #include "gflags/gflags.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; #ifdef PADDLE_WITH_XPU template class AdamOpXPUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { const auto* param_var = ctx.InputVar("Param"); PADDLE_ENFORCE_EQ(param_var->IsType(), true, platform::errors::InvalidArgument( "Tensor holds the wrong type,Expected Var(%s)'s " "type is LoDTensor, " "but the received is %s", ctx.InputNames("Param").front(), framework::ToTypeName(param_var->Type()))); using paddle::framework::LoDTensor; T epsilon = static_cast(ctx.Attr("epsilon")); auto& param = GET_DATA_SAFELY(ctx.Input("Param"), "Input", "Param", "Adam"); // auto& grad = Ref(ctx.Input("Grad"), "Must set Grad"); auto* grad_var = ctx.InputVar("Grad"); auto& mom1 = GET_DATA_SAFELY(ctx.Input("Moment1"), "Input", "Moment1", "Adam"); auto& mom2 = GET_DATA_SAFELY(ctx.Input("Moment2"), "Input", "Moment2", "Adam"); auto& lr = GET_DATA_SAFELY(ctx.Input("LearningRate"), "Input", "LearningRate", "Adam"); auto& beta1_pow = GET_DATA_SAFELY(ctx.Input("Beta1Pow"), "Input", "Beta1Pow", "Adam"); auto& beta2_pow = GET_DATA_SAFELY(ctx.Input("Beta2Pow"), "Input", "Beta2Pow", "Adam"); auto& param_out = GET_DATA_SAFELY(ctx.Output("ParamOut"), "Output", "ParamOut", "Adam"); auto& mom1_out = GET_DATA_SAFELY(ctx.Output("Moment1Out"), "Output", "Moment1Out", "Adam"); auto& mom2_out = GET_DATA_SAFELY(ctx.Output("Moment2Out"), "Output", "Moment2Out", "Adam"); auto* beta1_pow_out = ctx.Output("Beta1PowOut"); auto* beta2_pow_out = ctx.Output("Beta2PowOut"); PADDLE_ENFORCE_EQ(beta1_pow_out->numel(), 1, platform::errors::InvalidArgument( "Tensor holds the wrong size, Expected beta1 pow " "output size is 1, but received " "value is:%d.", beta1_pow_out->numel())); PADDLE_ENFORCE_EQ(beta2_pow_out->numel(), 1, platform::errors::InvalidArgument( "Tensor holds the wrong size, Expected beta2 pow " "output size is 1, but received " "value is:%d.", beta2_pow_out->numel())); T beta1 = static_cast(ctx.Attr("beta1")); if (ctx.HasInput("Beta1Tensor")) { auto* beta1_tensor = ctx.Input("Beta1Tensor"); beta1 = static_cast(GetAttrFromTensor(beta1_tensor)); } T beta2 = static_cast(ctx.Attr("beta2")); if (ctx.HasInput("Beta2Tensor")) { auto* beta2_tensor = ctx.Input("Beta2Tensor"); beta2 = static_cast(GetAttrFromTensor(beta2_tensor)); } if (grad_var->IsType()) { auto& grad = GET_DATA_SAFELY(ctx.Input("Grad"), "Input", "Grad", "Adam"); auto& dev_ctx = ctx.template device_context(); const T* beta1_pow_ptr = beta1_pow.template data(); const T* beta2_pow_ptr = beta2_pow.template data(); Tensor xpu_beta1_pow; Tensor xpu_beta2_pow; if (beta1_pow.place() == platform::CPUPlace() && beta2_pow.place() == platform::CPUPlace()) { TensorCopy(beta1_pow, ctx.GetPlace(), dev_ctx, &xpu_beta1_pow); TensorCopy(beta2_pow, ctx.GetPlace(), dev_ctx, &xpu_beta2_pow); dev_ctx.Wait(); beta1_pow_ptr = xpu_beta1_pow.template data(); beta2_pow_ptr = xpu_beta2_pow.template data(); } int r = xpu::adam( dev_ctx.x_context(), grad.template data(), mom1.template data(), mom2.template data(), param.template data(), beta1_pow_ptr, beta2_pow_ptr, beta1, beta2, epsilon, lr.template data(), mom1_out.template mutable_data(ctx.GetPlace()), mom2_out.template mutable_data(ctx.GetPlace()), param_out.template mutable_data(ctx.GetPlace()), param.numel()); // update in cpu and then copy to xpu if (beta1_pow.place() == platform::CPUPlace() && beta2_pow.place() == platform::CPUPlace()) { const T* beta1_pow_p = beta1_pow.template data(); beta1_pow_out->mutable_data(platform::CPUPlace())[0] = beta1 * beta1_pow_p[0]; const T* beta2_pow_p = beta2_pow.template data(); beta2_pow_out->mutable_data(platform::CPUPlace())[0] = beta2 * beta2_pow_p[0]; } else { T cpu_beta1_pow_out_data; T cpu_beta2_pow_out_data; memory::Copy(platform::CPUPlace(), &cpu_beta1_pow_out_data, BOOST_GET_CONST(platform::XPUPlace, beta1_pow.place()), beta1_pow_ptr, sizeof(T)); cpu_beta1_pow_out_data = cpu_beta1_pow_out_data * beta1; memory::Copy(platform::CPUPlace(), &cpu_beta2_pow_out_data, BOOST_GET_CONST(platform::XPUPlace, beta2_pow.place()), beta2_pow_ptr, sizeof(T)); cpu_beta2_pow_out_data = cpu_beta2_pow_out_data * beta2; T* beta1_pow_out_p = beta1_pow_out->mutable_data(ctx.GetPlace()); T* beta2_pow_out_p = beta2_pow_out->mutable_data(ctx.GetPlace()); memory::Copy(BOOST_GET_CONST(platform::XPUPlace, ctx.GetPlace()), beta1_pow_out_p, platform::CPUPlace(), &cpu_beta1_pow_out_data, sizeof(T)); memory::Copy(BOOST_GET_CONST(platform::XPUPlace, ctx.GetPlace()), beta2_pow_out_p, platform::CPUPlace(), &cpu_beta2_pow_out_data, sizeof(T)); } PADDLE_ENFORCE_EQ(r == xpu::Error_t::SUCCESS, true, platform::errors::External( "XPU API return wrong value[%d], please check " "where Baidu Kunlun Card is properly installed.", r)); } else { PADDLE_ENFORCE_EQ(1, 2, platform::errors::InvalidArgument( "Variable type not supported by adam_op")); } } }; #endif } // namespace operators } // namespace paddle namespace ops = paddle::operators; #ifdef PADDLE_WITH_XPU REGISTER_OP_XPU_KERNEL( adam, ops::AdamOpXPUKernel); #endif