matrix_rank_op.cu 12.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#ifndef PADDLE_WITH_HIP
// HIP not support cusolver
#include <algorithm>
#include <vector>
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/operators/elementwise/elementwise_op_function.h"
#include "paddle/fluid/operators/matrix_rank_op.h"
#include "paddle/fluid/operators/svd_helper.h"
#include "paddle/fluid/platform/dynload/cusolver.h"
#include "paddle/fluid/platform/for_range.h"
F
From00 已提交
25
#include "paddle/phi/kernels/funcs/compare_functors.h"
26 27
#include "paddle/phi/kernels/funcs/complex_functors.h"
#include "paddle/phi/kernels/funcs/math_function.h"
28 29 30 31 32

namespace paddle {
namespace operators {
namespace detail {
DDim GetUDDim(const DDim& x_dim, int k) {
33
  auto x_vec = phi::vectorize(x_dim);
34
  x_vec[x_vec.size() - 1] = k;
35
  return phi::make_ddim(x_vec);
36 37 38
}

DDim GetVHDDim(const DDim& x_dim, int k) {
39
  auto x_vec = phi::vectorize(x_dim);
40
  x_vec[x_vec.size() - 2] = k;
41
  return phi::make_ddim(x_vec);
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
}
}  // namespace detail

template <typename T>
class MatrixRankGPUKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    auto& dev_ctx =
        context.template device_context<platform::CUDADeviceContext>();

    const Tensor* x = context.Input<Tensor>("X");
    auto* x_data = x->data<T>();
    auto* out = context.Output<Tensor>("Out");
    out->mutable_data<int64_t>(context.GetPlace());
    bool hermitian = context.Attr<bool>("hermitian");

    auto dim_x = x->dims();
    auto dim_out = out->dims();
    int rows = dim_x[dim_x.size() - 2];
    int cols = dim_x[dim_x.size() - 1];
    int k = std::min(rows, cols);
    auto numel = x->numel();
    int batches = numel / (rows * cols);

    bool use_default_tol = context.Attr<bool>("use_default_tol");
    const Tensor* atol_tensor = nullptr;
    Tensor temp_tensor;
    T rtol_T = 0;
    if (use_default_tol) {
      framework::TensorFromVector<T>(std::vector<T>{0},
                                     context.device_context(), &temp_tensor);
      atol_tensor = &temp_tensor;
      rtol_T = std::numeric_limits<T>::epsilon() * std::max(rows, cols);
    } else if (context.HasInput("TolTensor")) {
      atol_tensor = context.Input<Tensor>("TolTensor");
    } else {
      framework::TensorFromVector<T>(std::vector<T>{context.Attr<float>("tol")},
                                     context.device_context(), &temp_tensor);
      atol_tensor = &temp_tensor;
    }

    // Must Copy X once, because the gesvdj will destory the content when exit.
    Tensor x_tmp;
85
    paddle::framework::TensorCopy(*x, context.GetPlace(), &x_tmp);
86 87 88 89 90 91 92 93 94 95 96
    auto info = memory::Alloc(dev_ctx, sizeof(int) * batches);
    int* info_ptr = reinterpret_cast<int*>(info->ptr());

    Tensor eigenvalue_tensor;
    auto* eigenvalue_data = eigenvalue_tensor.mutable_data<T>(
        detail::GetEigenvalueDim(dim_x, k), context.GetPlace());
    if (hermitian) {
      SyevjBatched(dev_ctx, batches, rows, x_tmp.data<T>(), eigenvalue_data,
                   info_ptr);
      platform::ForRange<platform::CUDADeviceContext> for_range(
          dev_ctx, eigenvalue_tensor.numel());
97 98
      phi::funcs::AbsFunctor<T> functor(eigenvalue_data, eigenvalue_data,
                                        eigenvalue_tensor.numel());
99 100 101 102 103 104 105 106 107 108 109 110 111 112
      for_range(functor);
    } else {
      Tensor U, VH;
      auto* u_data =
          U.mutable_data<T>(detail::GetUDDim(dim_x, k), context.GetPlace());
      auto* vh_data =
          VH.mutable_data<T>(detail::GetVHDDim(dim_x, k), context.GetPlace());
      GesvdjBatched(dev_ctx, batches, cols, rows, k, x_tmp.data<T>(), vh_data,
                    u_data, eigenvalue_data, info_ptr, 1);
    }

    auto dito_T =
        math::DeviceIndependenceTensorOperations<platform::CUDADeviceContext,
                                                 T>(context);
113
    std::vector<int> max_eigenvalue_shape =
114
        phi::vectorize<int>(detail::RemoveLastDim(eigenvalue_tensor.dims()));
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
    Tensor max_eigenvalue_tensor =
        dito_T.ReduceMax(eigenvalue_tensor, max_eigenvalue_shape);
    Tensor temp_rtol_tensor;
    framework::TensorFromVector<T>(std::vector<T>{rtol_T},
                                   context.device_context(), &temp_rtol_tensor);
    Tensor rtol_tensor = dito_T.Mul(temp_rtol_tensor, max_eigenvalue_tensor);
    Tensor tol_tensor;
    tol_tensor.mutable_data<T>(dim_out, context.GetPlace());
    ElementwiseComputeEx<GreaterElementFunctor<T>, platform::CUDADeviceContext,
                         T, T>(context, atol_tensor, &rtol_tensor, -1,
                               GreaterElementFunctor<T>(), &tol_tensor);

    tol_tensor.Resize(detail::NewAxisDim(tol_tensor.dims(), 1));

    Tensor compare_result;
    compare_result.mutable_data<int64_t>(detail::NewAxisDim(dim_out, k),
                                         context.GetPlace());
    int axis = -1;
F
From00 已提交
133
    ElementwiseComputeEx<phi::funcs::GreaterThanFunctor<T, int64_t>,
Z
Zhang Ting 已提交
134 135
                         platform::CUDADeviceContext, T, int64_t>(
        context, &eigenvalue_tensor, &tol_tensor, axis,
F
From00 已提交
136
        phi::funcs::GreaterThanFunctor<T, int64_t>(), &compare_result);
137 138 139
    auto dito_int =
        math::DeviceIndependenceTensorOperations<platform::CUDADeviceContext,
                                                 int64_t>(context);
140
    std::vector<int> result_shape = phi::vectorize<int>(dim_out);
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
    Tensor result = dito_int.ReduceSum(compare_result, result_shape);
    out->ShareDataWith(result);
  }

  void GesvdjBatched(const platform::CUDADeviceContext& dev_ctx, int batchSize,
                     int m, int n, int k, T* A, T* U, T* V, T* S, int* info,
                     int thin_UV = 1) const;

  void SyevjBatched(const platform::CUDADeviceContext& dev_ctx, int batchSize,
                    int n, T* A, T* W, int* info) const;
};

template <>
void MatrixRankGPUKernel<float>::GesvdjBatched(
    const platform::CUDADeviceContext& dev_ctx, int batchSize, int m, int n,
    int k, float* A, float* U, float* V, float* S, int* info,
    int thin_UV) const {
  // do not compute singular vectors
  const cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_NOVECTOR;
  gesvdjInfo_t gesvdj_params = NULL;
  int lda = m;
  int ldu = m;
  int ldt = n;
  int lwork = 0;
  auto handle = dev_ctx.cusolver_dn_handle();
166
  PADDLE_ENFORCE_GPU_SUCCESS(
167
      platform::dynload::cusolverDnCreateGesvdjInfo(&gesvdj_params));
168
  PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnSgesvdj_bufferSize(
169 170 171 172 173 174 175 176
      handle, jobz, thin_UV, m, n, A, lda, S, U, ldu, V, ldt, &lwork,
      gesvdj_params));
  auto workspace = memory::Alloc(dev_ctx, lwork * sizeof(float));
  float* workspace_ptr = reinterpret_cast<float*>(workspace->ptr());
  int stride_A = lda * n;
  int stride_U = ldu * (thin_UV ? k : m);
  int stride_V = ldt * (thin_UV ? k : n);
  for (int i = 0; i < batchSize; i++) {
177
    PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnSgesvdj(
178 179 180 181
        handle, jobz, thin_UV, m, n, A + stride_A * i, lda, S + k * i,
        U + stride_U * i, ldu, V + stride_V * i, ldt, workspace_ptr, lwork,
        info, gesvdj_params));
    int error_info;
182
    memory::Copy(platform::CPUPlace(), &error_info, dev_ctx.GetPlace(), info,
183 184 185 186 187 188
                 sizeof(int), dev_ctx.stream());
    PADDLE_ENFORCE_EQ(
        error_info, 0,
        platform::errors::PreconditionNotMet(
            "For batch [%d]: CUSolver SVD is not zero. [%d]", i, error_info));
  }
189
  PADDLE_ENFORCE_GPU_SUCCESS(
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
      platform::dynload::cusolverDnDestroyGesvdjInfo(gesvdj_params));
}

template <>
void MatrixRankGPUKernel<double>::GesvdjBatched(
    const platform::CUDADeviceContext& dev_ctx, int batchSize, int m, int n,
    int k, double* A, double* U, double* V, double* S, int* info,
    int thin_UV) const {
  // do not compute singular vectors
  const cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_NOVECTOR;
  gesvdjInfo_t gesvdj_params = NULL;
  int lda = m;
  int ldu = m;
  int ldt = n;
  int lwork = 0;
  auto handle = dev_ctx.cusolver_dn_handle();
206
  PADDLE_ENFORCE_GPU_SUCCESS(
207
      platform::dynload::cusolverDnCreateGesvdjInfo(&gesvdj_params));
208
  PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnDgesvdj_bufferSize(
209 210 211 212 213 214 215 216
      handle, jobz, thin_UV, m, n, A, lda, S, U, ldu, V, ldt, &lwork,
      gesvdj_params));
  auto workspace = memory::Alloc(dev_ctx, lwork * sizeof(double));
  double* workspace_ptr = reinterpret_cast<double*>(workspace->ptr());
  int stride_A = lda * n;
  int stride_U = ldu * (thin_UV ? k : m);
  int stride_V = ldt * (thin_UV ? k : n);
  for (int i = 0; i < batchSize; ++i) {
217
    PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnDgesvdj(
218 219 220 221 222
        handle, jobz, thin_UV, m, n, A + stride_A * i, lda, S + k * i,
        U + stride_U * i, ldu, V + stride_V * i, ldt, workspace_ptr, lwork,
        info, gesvdj_params));
    // check the error info
    int error_info;
223
    memory::Copy(platform::CPUPlace(), &error_info, dev_ctx.GetPlace(), info,
224 225 226 227 228 229
                 sizeof(int), dev_ctx.stream());
    PADDLE_ENFORCE_EQ(
        error_info, 0,
        platform::errors::PreconditionNotMet(
            "For batch [%d]: CUSolver SVD is not zero. [%d]", i, error_info));
  }
230
  PADDLE_ENFORCE_GPU_SUCCESS(
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
      platform::dynload::cusolverDnDestroyGesvdjInfo(gesvdj_params));
}

template <>
void MatrixRankGPUKernel<float>::SyevjBatched(
    const platform::CUDADeviceContext& dev_ctx, int batchSize, int n, float* A,
    float* W, int* info) const {
  auto handle = dev_ctx.cusolver_dn_handle();
  // Compute eigenvalues only
  const cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_NOVECTOR;
  // matrix is saved as column-major in cusolver.
  // numpy and torch use lower triangle to compute eigenvalues, so here use
  // upper triangle
  cublasFillMode_t uplo = CUBLAS_FILL_MODE_UPPER;
  int lda = n;
  int stride_A = lda * n;
  int lwork = 0;
  syevjInfo_t params = NULL;
249
  PADDLE_ENFORCE_GPU_SUCCESS(
250
      platform::dynload::cusolverDnCreateSyevjInfo(&params));
251
  PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnSsyevj_bufferSize(
252 253 254 255
      handle, jobz, uplo, n, A, lda, W, &lwork, params));
  auto workspace = memory::Alloc(dev_ctx, lwork * sizeof(float));
  float* workspace_ptr = reinterpret_cast<float*>(workspace->ptr());
  for (int i = 0; i < batchSize; i++) {
256
    PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnSsyevj(
257 258 259 260
        handle, jobz, uplo, n, A + stride_A * i, lda, W + n * i, workspace_ptr,
        lwork, info, params));

    int error_info;
261
    memory::Copy(platform::CPUPlace(), &error_info, dev_ctx.GetPlace(), info,
262 263 264 265 266 267 268
                 sizeof(int), dev_ctx.stream());
    PADDLE_ENFORCE_EQ(
        error_info, 0,
        platform::errors::PreconditionNotMet(
            "For batch [%d]: CUSolver eigenvalues is not zero. [%d]", i,
            error_info));
  }
269
  PADDLE_ENFORCE_GPU_SUCCESS(
270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
      platform::dynload::cusolverDnDestroySyevjInfo(params));
}

template <>
void MatrixRankGPUKernel<double>::SyevjBatched(
    const platform::CUDADeviceContext& dev_ctx, int batchSize, int n, double* A,
    double* W, int* info) const {
  auto handle = dev_ctx.cusolver_dn_handle();
  // Compute eigenvalues only
  const cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_NOVECTOR;
  //  upper triangle of A is stored
  cublasFillMode_t uplo = CUBLAS_FILL_MODE_UPPER;
  int lda = n;
  int stride_A = lda * n;
  int lwork = 0;
  syevjInfo_t params = NULL;
286
  PADDLE_ENFORCE_GPU_SUCCESS(
287
      platform::dynload::cusolverDnCreateSyevjInfo(&params));
288
  PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnDsyevj_bufferSize(
289 290 291 292 293
      handle, jobz, uplo, n, A, lda, W, &lwork, params));
  auto workspace = memory::Alloc(dev_ctx, lwork * sizeof(double));
  double* workspace_ptr = reinterpret_cast<double*>(workspace->ptr());

  for (int i = 0; i < batchSize; i++) {
294
    PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnDsyevj(
295 296 297
        handle, jobz, uplo, n, A + stride_A * i, lda, W + n * i, workspace_ptr,
        lwork, info, params));
    int error_info;
298
    memory::Copy(platform::CPUPlace(), &error_info, dev_ctx.GetPlace(), info,
299 300 301 302 303 304 305
                 sizeof(int), dev_ctx.stream());
    PADDLE_ENFORCE_EQ(
        error_info, 0,
        platform::errors::PreconditionNotMet(
            "For batch [%d]: CUSolver eigenvalues is not zero. [%d]", i,
            error_info));
  }
306
  PADDLE_ENFORCE_GPU_SUCCESS(
307 308 309 310 311 312 313 314 315 316
      platform::dynload::cusolverDnDestroySyevjInfo(params));
}

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(matrix_rank, ops::MatrixRankGPUKernel<float>,
                        ops::MatrixRankGPUKernel<double>);
#endif  // not PADDLE_WITH_HIP