adam_op_npu.cc 7.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include <memory>
#include <string>

18
#include "paddle/fluid/framework/tensor_util.h"
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
#include "paddle/fluid/operators/npu_op_runner.h"
#include "paddle/fluid/operators/optimizers/adam_op.h"

namespace paddle {
namespace operators {

using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;

template <typename DeviceContext, typename T>
class AdamNPUKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
    const auto* param_var = ctx.InputVar("Param");
    PADDLE_ENFORCE_EQ(param_var->IsType<framework::LoDTensor>(), true,
                      platform::errors::InvalidArgument(
                          "The Var(%s)'s type should be LoDTensor, "
                          "but the received is %s",
                          ctx.InputNames("Param").front(),
                          framework::ToTypeName(param_var->Type())));
    auto* param = ctx.Input<LoDTensor>("Param");
    auto* grad_var = ctx.InputVar("Grad");
    PADDLE_ENFORCE_EQ(grad_var->IsType<framework::LoDTensor>(), true,
                      platform::errors::InvalidArgument(
                          "The Grad(%s)'s type should be LoDTensor, "
                          "but the received is %s",
                          ctx.InputNames("Grad").front(),
                          framework::ToTypeName(param_var->Type())));
    auto* grad = ctx.Input<LoDTensor>("Grad");
    auto* mom1 = ctx.Input<LoDTensor>("Moment1");
    auto* mom2 = ctx.Input<LoDTensor>("Moment2");
    auto* lr = ctx.Input<LoDTensor>("LearningRate");

    auto* beta1_pow = ctx.Input<LoDTensor>("Beta1Pow");
    auto* beta2_pow = ctx.Input<LoDTensor>("Beta2Pow");

    auto* param_out = ctx.Output<LoDTensor>("ParamOut");
    auto* mom1_out = ctx.Output<LoDTensor>("Moment1Out");
    auto* mom2_out = ctx.Output<LoDTensor>("Moment2Out");
    auto* beta1_pow_out = ctx.Output<LoDTensor>("Beta1PowOut");
    auto* beta2_pow_out = ctx.Output<LoDTensor>("Beta2PowOut");

    param_out->mutable_data<T>(ctx.GetPlace());
    mom1_out->mutable_data<T>(ctx.GetPlace());
    mom2_out->mutable_data<T>(ctx.GetPlace());
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80

    // NOTE(zhiqiu): beta1_pow and beta2_pow may on CPU and not transform place.
    if (beta1_pow->place() == platform::CPUPlace()) {
      T beta1 = *beta1_pow->data<T>();
      // `mutable_data` operation needs to be done after getting data
      beta1_pow_out->mutable_data<T>(ctx.GetPlace());
      FillNpuTensorWithConstant<T>(beta1_pow_out, beta1);
    } else {
      beta1_pow_out->mutable_data<T>(ctx.GetPlace());
    }
    if (beta2_pow->place() == platform::CPUPlace()) {
      T beta2 = *beta2_pow->data<T>();
      beta2_pow_out->mutable_data<T>(ctx.GetPlace());
      FillNpuTensorWithConstant<T>(beta2_pow_out, beta2);
    } else {
      beta2_pow_out->mutable_data<T>(ctx.GetPlace());
    }
81

82 83 84 85 86 87 88 89
    const Tensor* beta1_tensor = nullptr;
    const Tensor* beta2_tensor = nullptr;
    const Tensor* epsilon_tensor = nullptr;

    Tensor beta1_tmp(framework::proto::VarType::FP32);
    Tensor beta2_tmp(framework::proto::VarType::FP32);
    Tensor epsilon_tmp(framework::proto::VarType::FP32);

90
    if (ctx.HasInput("Beta1Tensor")) {
91
      beta1_tensor = ctx.Input<framework::Tensor>("Beta1Tensor");
92 93 94 95
      PADDLE_ENFORCE_EQ(beta1_tensor->numel(), 1,
                        platform::errors::InvalidArgument(
                            "Input(Beta1Tensor) size must be 1, but get %d",
                            beta1_tensor->numel()));
96 97 98 99 100
    } else {
      T beta1 = static_cast<T>(ctx.Attr<float>("beta1"));
      beta1_tmp.mutable_data<T>({1}, ctx.GetPlace());
      FillNpuTensorWithConstant<T>(&beta1_tmp, beta1);
      beta1_tensor = &beta1_tmp;
101
    }
102

103
    if (ctx.HasInput("Beta2Tensor")) {
104 105
      beta2_tensor = ctx.Input<framework::Tensor>("Beta2Tensor");
      PADDLE_ENFORCE_EQ(beta1_tensor->numel(), 1,
106 107 108
                        platform::errors::InvalidArgument(
                            "Input(Beta2Tensor) size must be 1, but get %d",
                            beta2_tensor->numel()));
109 110 111 112 113
    } else {
      T beta2 = static_cast<T>(ctx.Attr<float>("beta2"));
      beta2_tmp.mutable_data<T>({1}, ctx.GetPlace());
      FillNpuTensorWithConstant<T>(&beta2_tmp, beta2);
      beta2_tensor = &beta2_tmp;
114
    }
115 116 117 118 119 120 121 122 123 124 125 126 127 128

    if (ctx.HasInput("EpsilonTensor")) {
      epsilon_tensor = ctx.Input<framework::Tensor>("EpsilonTensor");
      PADDLE_ENFORCE_EQ(epsilon_tensor->numel(), 1,
                        platform::errors::InvalidArgument(
                            "Input(EpsilonTensor) size must be 1, but get %d",
                            epsilon_tensor->numel()));
    } else {
      T epsilon = static_cast<T>(ctx.Attr<float>("epsilon"));
      epsilon_tmp.mutable_data<T>({1}, ctx.GetPlace());
      FillNpuTensorWithConstant<T>(&epsilon_tmp, epsilon);
      epsilon_tensor = &epsilon_tmp;
    }

129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
    VLOG(3) << "beta1_pow.numel() : " << beta1_pow->numel()
            << "beta2_pow.numel() : " << beta2_pow->numel();
    VLOG(3) << "param.numel(): " << param->numel();

    PADDLE_ENFORCE_EQ(beta1_pow_out->numel(), 1,
                      platform::errors::InvalidArgument(
                          "beta1 pow output size should be 1, but received "
                          "value is:%d.",
                          beta1_pow_out->numel()));

    PADDLE_ENFORCE_EQ(beta2_pow_out->numel(), 1,
                      platform::errors::InvalidArgument(
                          "beta2 pow output size should be 1, but received "
                          "value is:%d.",
                          beta2_pow_out->numel()));
    auto stream =
        ctx.template device_context<paddle::platform::NPUDeviceContext>()
            .stream();
    auto runner =
        NpuOpRunner("ApplyAdamD",
                    {
                        *param, *mom1, *mom2, *beta1_pow, *beta2_pow, *lr,
151
                        *beta1_tensor, *beta2_tensor, *epsilon_tensor, *grad,
152 153 154 155 156 157 158 159 160 161
                    },
                    {
                        *param_out, *mom1_out, *mom2_out,
                    },
                    {});
    runner.Run(stream);

    // NOTE(zhiqiu): ApplyAdamD updates params inplace, so
    // if param and param_out is not same, we need to do copy.
    if (param_out->data<T>() != param->data<T>()) {
162 163 164
      framework::TensorCopy(
          *param, ctx.GetPlace(),
          ctx.template device_context<platform::DeviceContext>(), param_out);
165 166
    }
    if (mom1_out->data<T>() != mom1->data<T>()) {
167 168 169
      framework::TensorCopy(
          *mom1, ctx.GetPlace(),
          ctx.template device_context<platform::DeviceContext>(), mom1_out);
170 171
    }
    if (mom2_out->data<T>() != mom2->data<T>()) {
172 173 174
      framework::TensorCopy(
          *mom2, ctx.GetPlace(),
          ctx.template device_context<platform::DeviceContext>(), mom2_out);
175 176
    }
    auto runner_m1 =
177
        NpuOpRunner("Mul", {*beta1_pow, *beta1_tensor}, {*beta1_pow_out}, {});
178 179
    runner_m1.Run(stream);
    auto runner_m2 =
180
        NpuOpRunner("Mul", {*beta2_pow, *beta2_tensor}, {*beta2_pow_out}, {});
181 182 183 184 185 186 187 188 189 190 191 192 193
    runner_m2.Run(stream);
  }
};

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;

REGISTER_OP_NPU_KERNEL(
    adam, ops::AdamNPUKernel<paddle::platform::NPUDeviceContext, float>,
    ops::AdamNPUKernel<paddle::platform::NPUDeviceContext,
                       paddle::platform::float16>);