rmsprop_op_xpu.cc 5.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#ifdef PADDLE_WITH_XPU

#include <gflags/gflags.h>
18

19
#include <iostream>
20

H
hong 已提交
21
#include "paddle/fluid/framework/op_registry.h"
22
#include "paddle/fluid/platform/device/device_wrapper.h"
23 24 25 26 27 28 29 30 31

namespace paddle {
namespace operators {

static inline float GetAttrFromTensor(const framework::Tensor* tensor) {
  const float* tensor_data = tensor->data<float>();
  framework::Tensor cpu_tensor;
  if (platform::is_gpu_place(tensor->place()) ||
      platform::is_xpu_place(tensor->place())) {
32 33
    paddle::framework::TensorCopySync(
        *tensor, platform::CPUPlace(), &cpu_tensor);
34 35 36 37 38 39 40 41 42 43 44 45
    tensor_data = cpu_tensor.data<float>();
  }
  return tensor_data[0];
}

using framework::OpKernelType;
using framework::Tensor;

template <typename DeviceContext, typename T>
class RmspropOpXPUKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
46 47 48 49
    using paddle::framework::LoDTensor;

    // check Param & Grad tensor type
    const auto* param_var = ctx.InputVar("Param");
50 51
    PADDLE_ENFORCE_EQ(param_var->IsType<LoDTensor>(),
                      true,
52 53 54 55 56 57 58 59
                      platform::errors::InvalidArgument(
                          "Tensor holds the wrong type,Expected Var(%s)'s "
                          "type is LoDTensor, "
                          "but the received is %s",
                          ctx.InputNames("Param").front(),
                          framework::ToTypeName(param_var->Type())));

    const auto* grad_var = ctx.InputVar("Grad");
60 61
    PADDLE_ENFORCE_EQ(grad_var->IsType<LoDTensor>(),
                      true,
62 63 64 65 66 67 68 69
                      platform::errors::InvalidArgument(
                          "Tensor holds the wrong type,Expected Var(%s)'s "
                          "type is LoDTensor, "
                          "but the received is %s",
                          ctx.InputNames("Grad").front(),
                          framework::ToTypeName(grad_var->Type())));

    // inputs
70 71 72 73 74 75 76 77
    auto& param = GET_DATA_SAFELY(
        ctx.Input<LoDTensor>("Param"), "Input", "Param", "Rmsprop");
    auto& meanSquare = GET_DATA_SAFELY(
        ctx.Input<LoDTensor>("MeanSquare"), "Input", "MeanSquare", "Rmsprop");
    auto& grad = GET_DATA_SAFELY(
        ctx.Input<LoDTensor>("Grad"), "Input", "Grad", "Rmsprop");
    auto& mom = GET_DATA_SAFELY(
        ctx.Input<LoDTensor>("Moment"), "Input", "Moment", "Rmsprop");
78 79

    auto* learning_rate = ctx.Input<Tensor>("LearningRate");
80 81
    PADDLE_ENFORCE_EQ(learning_rate->dims().size(),
                      1,
82 83 84 85 86 87 88 89 90 91 92 93
                      platform::errors::InvalidArgument(
                          "learining rate should have dimension = 1."
                          " But received learning rate dim [%s] ",
                          learning_rate->dims().size()));
    T lr = static_cast<T>(GetAttrFromTensor(learning_rate));

    // constants
    T epsilon = static_cast<T>(ctx.Attr<float>("epsilon"));
    T decay = static_cast<T>(ctx.Attr<float>("decay"));
    T momentum = static_cast<T>(ctx.Attr<float>("momentum"));

    // outputs
94 95 96 97
    auto& param_out = GET_DATA_SAFELY(
        ctx.Output<LoDTensor>("ParamOut"), "Output", "ParamOut", "Rmsprop");
    auto& mom_out = GET_DATA_SAFELY(
        ctx.Output<LoDTensor>("MomentOut"), "Output", "MomentOut", "Rmsprop");
98
    auto& mom_sqrt_out = GET_DATA_SAFELY(ctx.Output<LoDTensor>("MeanSquareOut"),
99 100 101
                                         "Output",
                                         "MeanSquareOut",
                                         "Rmsprop");
102 103 104 105 106 107 108 109 110 111 112 113 114 115
    auto& dev_ctx = ctx.template device_context<DeviceContext>();

    ///// rmsprop优化算法
    ///
    /// ms_out[i] = rho * ms[i] + (1 - rho) * (g[i] * g[i]);
    ///
    /// mom_out[i] = momentum * mom[i] + lr *
    /// (g[i] / ((float)sqrt(ms_out[i] + epsilon)));
    ///
    /// p_out[i] = p[i] - mom_out[i];
    /// DLL_EXPORT int rmsprop(Context* ctx, const float* p,
    /// const float* ms, const float* g, const float* mom,
    /// float epsilon, float rho, float momentum, float lr,
    /// float *ms_out, float *mom_out, float *p_out, int n)
116 117
    int r = xpu::rmsprop(dev_ctx.x_context(),
                         grad.template data<T>(),
118
                         param.template data<T>(),
119 120
                         meanSquare.template data<T>(),
                         mom.template data<T>(),
121 122 123
                         param_out.template mutable_data<T>(ctx.GetPlace()),
                         mom_sqrt_out.template mutable_data<T>(ctx.GetPlace()),
                         mom_out.template mutable_data<T>(ctx.GetPlace()),
124 125 126 127 128
                         epsilon,
                         decay,
                         momentum,
                         lr,
                         param.numel());
129 130

    PADDLE_ENFORCE_XDNN_SUCCESS(r, "rmsprop");
131 132 133 134 135 136
  }
};

}  // namespace operators
}  // namespace paddle

137 138 139 140
namespace ops = paddle::operators;
REGISTER_OP_XPU_KERNEL(
    rmsprop,
    ops::RmspropOpXPUKernel<paddle::platform::XPUDeviceContext, float>);
141
#endif