qr_op.cu 11.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#ifndef PADDLE_WITH_HIP
// HIP not support cusolver

#include <thrust/device_vector.h>
#include <algorithm>
#include <vector>
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/operators/qr_op.h"
#include "paddle/fluid/platform/dynload/cusolver.h"

// Reuse some helper functions from svd
#include "paddle/fluid/operators/svd_helper.h"

namespace paddle {
namespace operators {

template <typename T>
class QrGPUKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    bool compute_q;
    bool reduced_mode;
    auto& dev_ctx =
        context.template device_context<platform::CUDADeviceContext>();
    const Tensor& x = *context.Input<Tensor>("X");
    Tensor& q = *context.Output<Tensor>("Q");
    Tensor& r = *context.Output<Tensor>("R");
    const std::string mode = context.Attr<std::string>("mode");
    std::tie(compute_q, reduced_mode) = _parse_qr_mode(mode);

    auto numel = x.numel();
    PADDLE_ENFORCE_GT(numel, 0, platform::errors::PreconditionNotMet(
                                    "The input of QR is empty."));
    auto x_dims = x.dims();
    int x_rank = x_dims.size();
    int m = x_dims[x_rank - 2];
    int n = x_dims[x_rank - 1];
    int min_mn = std::min(m, n);
    int k = reduced_mode ? min_mn : m;
    int batch_size = numel / (m * n);
    int qr_stride = m * n;
    int tau_stride = min_mn;

    if (compute_q) {
      q.mutable_data<math::Real<T>>(
          context.GetPlace(),
          size_t(batch_size * m * k * sizeof(math::Real<T>)));
    }
    r.mutable_data<math::Real<T>>(
        context.GetPlace(), size_t(batch_size * k * n * sizeof(math::Real<T>)));

    auto dito =
        math::DeviceIndependenceTensorOperations<platform::CUDADeviceContext,
                                                 T>(context);

    // Note: allocate temporary tensors because of lacking in-place operatios.
    // Prepare qr
    Tensor qr;
    qr.mutable_data<math::Real<T>>(
        context.GetPlace(), size_t(batch_size * m * n * sizeof(math::Real<T>)));
    // BatchedGeqrf performs computation in-place and 'qr' must be a copy of
    // input
    TensorCopy(x, context.GetPlace(), &qr);

    // Prepare tau
    auto tau_dims_vec = framework::vectorize<int>(x_dims);
    tau_dims_vec.pop_back();
    tau_dims_vec[tau_dims_vec.size() - 1] = min_mn;
    Tensor tau = dito.Fill(tau_dims_vec, 0);

    // Transpose 'qr' to conform the column-major order
    auto tmp_qr = dito.Transpose(qr);
    framework::TensorCopy(tmp_qr, qr.place(), &qr);
    auto qr_data = qr.mutable_data<T>(context.GetPlace());
    auto tau_data = tau.mutable_data<T>(context.GetPlace());

91 92
    BatchedGeqrf<platform::CUDADeviceContext, T>(
        dev_ctx, batch_size, m, n, qr_data, m, tau_data, qr_stride, tau_stride);
93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110

    if (reduced_mode) {
      auto trans_qr = dito.Transpose(qr);
      auto sliced_qr = dito.Slice(trans_qr, {-2}, {0}, {min_mn});
      auto tmp_r = dito.TrilTriu(sliced_qr, 0, false);
      // Transpose 'tmp_r' to retore the original row-major order
      framework::TensorCopy(tmp_r, r.place(), &r);
    } else {
      auto trans_qr = dito.Transpose(qr);
      auto tmp_r = dito.TrilTriu(trans_qr, 0, false);
      // Transpose 'tmp_r' to retore the original row-major order
      framework::TensorCopy(tmp_r, r.place(), &r);
    }

    if (compute_q) {
      // Perform QRGQR for Q using the result from GEQRF
      // Transpose 'q' to retore the original row-major order
      if (reduced_mode) {
111 112 113
        BatchedOrgqr<platform::CUDADeviceContext, T>(
            dev_ctx, batch_size, m, min_mn, min_mn, qr_data, m, tau_data,
            qr_stride, tau_stride);
114 115 116 117 118 119 120 121 122 123 124
        auto trans_q = dito.Transpose(qr);
        auto sliced_q = dito.Slice(trans_q, {-1}, {0}, {min_mn});
        framework::TensorCopy(sliced_q, q.place(), &q);
      } else {
        if (m > n) {
          auto new_qr_dims_vec = framework::vectorize<int>(x_dims);
          new_qr_dims_vec[new_qr_dims_vec.size() - 1] = m;
          Tensor new_qr = dito.Fill(new_qr_dims_vec, 0);
          auto new_qr_data = new_qr.mutable_data<T>(context.GetPlace());
          auto new_qr_stride = m * m;
          for (int i = 0; i < batch_size; ++i) {
125 126 127
            memory::Copy(dev_ctx.GetPlace(), (new_qr_data + i * new_qr_stride),
                         dev_ctx.GetPlace(), (qr_data + i * qr_stride),
                         qr_stride * sizeof(math::Real<T>), dev_ctx.stream());
128
          }
129 130 131
          BatchedOrgqr<platform::CUDADeviceContext, T>(
              dev_ctx, batch_size, m, m, min_mn, new_qr_data, m, tau_data,
              new_qr_stride, tau_stride);
132 133 134
          auto trans_q = dito.Transpose(new_qr);
          framework::TensorCopy(trans_q, q.place(), &q);
        } else {
135 136 137
          BatchedOrgqr<platform::CUDADeviceContext, T>(
              dev_ctx, batch_size, m, m, min_mn, qr_data, m, tau_data,
              qr_stride, tau_stride);
138 139 140 141 142 143 144 145 146 147
          auto trans_q = dito.Transpose(qr);
          auto sliced_q = dito.Slice(trans_q, {-1}, {0}, {m});
          framework::TensorCopy(sliced_q, q.place(), &q);
        }
      }
    }
  }
};

template <>
148
void BatchedGeqrf<platform::CUDADeviceContext, float>(
149
    const platform::CUDADeviceContext& dev_ctx, int batch_size, int m, int n,
150
    float* a, int lda, float* tau, int a_stride, int tau_stride) {
151 152 153
  int lwork = 0;

  auto handle = dev_ctx.cusolver_dn_handle();
154
  PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnSgeqrf_bufferSize(
155 156 157 158 159 160 161 162 163 164
      handle, m, n, a, lda, &lwork));
  auto workspace = memory::Alloc(dev_ctx, lwork * sizeof(float));
  float* workspace_ptr = reinterpret_cast<float*>(workspace->ptr());
  auto info = memory::Alloc(dev_ctx, sizeof(int));
  int* info_d = reinterpret_cast<int*>(info->ptr());

  for (int i = 0; i < batch_size; ++i) {
    float* a_working_ptr = &a[i * a_stride];
    float* tau_working_ptr = &tau[i * tau_stride];
    // compute geqrf
165
    PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnSgeqrf(
166 167 168 169 170
        handle, m, n, a_working_ptr, lda, tau_working_ptr, workspace_ptr, lwork,
        info_d));
    // Do we need synchronized here?
    // check the error info
    int info_h;
171 172
    memory::Copy(platform::CPUPlace(), &info_h, dev_ctx.GetPlace(), info_d,
                 sizeof(int), dev_ctx.stream());
173 174 175 176 177 178 179 180
    PADDLE_ENFORCE_EQ(
        info_h, 0,
        platform::errors::PreconditionNotMet(
            "For batch [%d]: CUSolver geqrf is not zero. [%d]", i, info_h));
  }
}

template <>
181
void BatchedGeqrf<platform::CUDADeviceContext, double>(
182
    const platform::CUDADeviceContext& dev_ctx, int batch_size, int m, int n,
183
    double* a, int lda, double* tau, int a_stride, int tau_stride) {
184 185 186
  int lwork = 0;

  auto handle = dev_ctx.cusolver_dn_handle();
187
  PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnDgeqrf_bufferSize(
188 189 190 191 192 193 194 195 196 197
      handle, m, n, a, lda, &lwork));
  auto workspace = memory::Alloc(dev_ctx, lwork * sizeof(double));
  double* workspace_ptr = reinterpret_cast<double*>(workspace->ptr());
  auto info = memory::Alloc(dev_ctx, sizeof(int));
  int* info_d = reinterpret_cast<int*>(info->ptr());

  for (int i = 0; i < batch_size; ++i) {
    double* a_working_ptr = &a[i * a_stride];
    double* tau_working_ptr = &tau[i * tau_stride];
    // compute geqrf
198
    PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnDgeqrf(
199 200 201 202 203
        handle, m, n, a_working_ptr, lda, tau_working_ptr, workspace_ptr, lwork,
        info_d));
    // Do we need synchronized here?
    // check the error info
    int info_h;
204 205
    memory::Copy(platform::CPUPlace(), &info_h, dev_ctx.GetPlace(), info_d,
                 sizeof(int), dev_ctx.stream());
206 207 208 209 210 211 212 213
    PADDLE_ENFORCE_EQ(
        info_h, 0,
        platform::errors::PreconditionNotMet(
            "For batch [%d]: CUSolver geqrf is not zero. [%d]", i, info_h));
  }
}

template <>
214
void BatchedOrgqr<platform::CUDADeviceContext, float>(
215
    const platform::CUDADeviceContext& dev_ctx, int batch_size, int m, int n,
216
    int k, float* a, int lda, float* tau, int a_stride, int tau_stride) {
217 218 219
  int lwork = 0;

  auto handle = dev_ctx.cusolver_dn_handle();
220
  PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnSorgqr_bufferSize(
221 222 223 224 225 226 227 228 229 230
      handle, m, n, k, a, lda, tau, &lwork));
  auto workspace = memory::Alloc(dev_ctx, lwork * sizeof(float));
  float* workspace_ptr = reinterpret_cast<float*>(workspace->ptr());
  auto info = memory::Alloc(dev_ctx, sizeof(int));
  int* info_d = reinterpret_cast<int*>(info->ptr());

  for (int i = 0; i < batch_size; ++i) {
    float* a_working_ptr = &a[i * a_stride];
    float* tau_working_ptr = &tau[i * tau_stride];
    // compute orggr
231
    PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnSorgqr(
232 233 234 235 236
        handle, m, n, k, a_working_ptr, lda, tau_working_ptr, workspace_ptr,
        lwork, info_d));
    // Do we need synchronized here?
    // check the error info
    int info_h;
237 238
    memory::Copy(platform::CPUPlace(), &info_h, dev_ctx.GetPlace(), info_d,
                 sizeof(int), dev_ctx.stream());
239 240 241 242 243 244 245 246
    PADDLE_ENFORCE_EQ(
        info_h, 0,
        platform::errors::PreconditionNotMet(
            "For batch [%d]: CUSolver QR is not zero. [%d]", i, info_h));
  }
}

template <>
247
void BatchedOrgqr<platform::CUDADeviceContext, double>(
248
    const platform::CUDADeviceContext& dev_ctx, int batch_size, int m, int n,
249
    int k, double* a, int lda, double* tau, int a_stride, int tau_stride) {
250 251 252
  int lwork = 0;

  auto handle = dev_ctx.cusolver_dn_handle();
253
  PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnDorgqr_bufferSize(
254 255 256 257 258 259 260 261 262 263
      handle, m, n, k, a, lda, tau, &lwork));
  auto workspace = memory::Alloc(dev_ctx, lwork * sizeof(double));
  double* workspace_ptr = reinterpret_cast<double*>(workspace->ptr());
  auto info = memory::Alloc(dev_ctx, sizeof(int));
  int* info_d = reinterpret_cast<int*>(info->ptr());

  for (int i = 0; i < batch_size; ++i) {
    double* a_working_ptr = &a[i * a_stride];
    double* tau_working_ptr = &tau[i * tau_stride];
    // compute orggr
264
    PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnDorgqr(
265 266 267 268 269
        handle, m, n, k, a_working_ptr, lda, tau_working_ptr, workspace_ptr,
        lwork, info_d));
    // Do we need synchronized here?
    // check the error info
    int info_h;
270 271
    memory::Copy(platform::CPUPlace(), &info_h, dev_ctx.GetPlace(), info_d,
                 sizeof(int), dev_ctx.stream());
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
    PADDLE_ENFORCE_EQ(
        info_h, 0,
        platform::errors::PreconditionNotMet(
            "For batch [%d]: CUSolver QR is not zero. [%d]", i, info_h));
  }
}

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(qr, ops::QrGPUKernel<float>, ops::QrGPUKernel<double>);
REGISTER_OP_CUDA_KERNEL(
    qr_grad, ops::QrGradKernel<paddle::platform::CUDADeviceContext, float>,
    ops::QrGradKernel<paddle::platform::CUDADeviceContext, double>);

#endif  // not PADDLE_WITH_HIP