eigen_values_vectors.h 11.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/operators/svd_helper.h"
19
#include "paddle/phi/kernels/funcs/lapack/lapack_function.h"
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
#ifdef PADDLE_WITH_CUDA
#include "paddle/fluid/platform/dynload/cusolver.h"
#endif  // PADDLE_WITH_CUDA

namespace paddle {
namespace operators {
namespace math {

inline int64_t GetBatchSize(framework::DDim dims) {
  int64_t batch_size = 1;
  auto dim_size = dims.size();
  for (int i = 0; i < dim_size - 2; i++) {
    batch_size *= dims[i];
  }
  return batch_size;
}

37 38 39 40 41 42 43 44
static void CheckEighResult(const int batch, const int info) {
  PADDLE_ENFORCE_LE(
      info, 0,
      platform::errors::PreconditionNotMet(
          "For batch [%d]: the [%d] off-diagonal elements of an intermediate"
          "tridiagonal form did not converge to zero",
          batch, info));
  PADDLE_ENFORCE_GE(
45 46 47 48
      info, 0,
      platform::errors::PreconditionNotMet(
          "For batch [%d]: the [%d] argument had an illegal value", batch,
          info));
49 50 51
}

template <typename DeviceContext, typename T>
52 53 54 55 56 57
struct MatrixEighFunctor {
  void operator()(const framework::ExecutionContext &ctx, const Tensor &input,
                  Tensor *eigen_values, Tensor *eigen_vectors, bool is_lower,
                  bool has_vectors);
};

58 59 60
// Calculates the eigenvalues ​​and eigenvectors of Hermitian or real
// symmetric matrices, and uses the variable has_vectors to
// control whether to return the eigenvectors.
61 62
template <typename T>
struct MatrixEighFunctor<platform::CPUDeviceContext, T> {
63 64 65 66
 public:
  void operator()(const framework::ExecutionContext &ctx, const Tensor &input,
                  Tensor *eigen_values, Tensor *eigen_vectors, bool is_lower,
                  bool has_vectors) {
67
    using ValueType = phi::dtype::Real<T>;
68
    auto *out_value = eigen_values->mutable_data<ValueType>(ctx.GetPlace());
69

70
    auto dito =
71 72
        math::DeviceIndependenceTensorOperations<platform::CPUDeviceContext, T>(
            ctx);
73

74 75 76 77 78
    Tensor input_trans;
    // lapack is a column-major storge, transpose make the input to
    // have a continuous memory layout
    input_trans = dito.Transpose(input);
    auto *input_vector = input_trans.data<T>();
79

80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
    auto dims = input.dims();
    int dim_size = dims.size();
    int64_t batch_size = GetBatchSize(dims);

    int vector_stride = dims[dim_size - 1] * dims[dim_size - 2];
    int values_stride = dims[dim_size - 1];
    char uplo = is_lower ? 'L' : 'U';
    char jobz = has_vectors ? 'V' : 'N';
    auto n = dims[dim_size - 1];
    auto lda = std::max<int64_t>(1, n);
    // if work = -1, it means that you need to use the lapack function to query
    // the optimal value
    int lwork = -1;      // The length of the array work
    int lrwork = -1;     // The dimension of the array rwork,rwork is REAL array
    int liwork = -1;     // The dimension of the array iwork
    int iwork_opt = -1;  // The optimal length of the array liwork
    T lwork_opt = static_cast<T>(-1);  // The optimal length of the array work
    ValueType rwork_opt =
        static_cast<ValueType>(-1);  // The optimal length of the array rwork

    int info = 0;
    // Call lapackEigh to get the optimal size of work data
102
    phi::funcs::lapackEigh<T, ValueType>(
103 104
        jobz, uplo, n, input_vector, lda, out_value, &lwork_opt, lwork,
        &rwork_opt, lrwork, &iwork_opt, liwork, &info);
105 106 107 108 109 110 111
    lwork = std::max<int>(1, static_cast<int>(lwork_opt));
    liwork = std::max<int>(1, iwork_opt);

    Tensor rwork_tensor;
    ValueType *rwork_data = nullptr;

    // complex type
112 113
    if (framework::IsComplexType(
            framework::TransToProtoVarType(input.dtype()))) {
114 115
      lrwork = std::max<int>(1, static_cast<int>(rwork_opt));
      rwork_data = rwork_tensor.mutable_data<ValueType>(
116
          phi::make_ddim({lrwork}), ctx.GetPlace());
117 118
    }
    Tensor iwork_tensor, work_tensor;
119
    auto *iwork_data = iwork_tensor.mutable_data<int>(phi::make_ddim({liwork}),
120 121
                                                      ctx.GetPlace());
    auto *work_data =
122
        work_tensor.mutable_data<T>(phi::make_ddim({lwork}), ctx.GetPlace());
123 124 125 126

    for (auto i = 0; i < batch_size; i++) {
      auto *value_data = out_value + i * values_stride;
      auto *input_data = input_vector + i * vector_stride;
127
      phi::funcs::lapackEigh<T, phi::dtype::Real<T>>(
128 129
          jobz, uplo, n, input_data, lda, value_data, work_data, lwork,
          rwork_data, lrwork, iwork_data, liwork, &info);
130 131 132 133 134 135 136 137 138 139
      CheckEighResult(i, info);
    }
    if (has_vectors) {
      PADDLE_ENFORCE_NOT_NULL(eigen_vectors,
                              platform::errors::InvalidArgument(
                                  "When has_vectors is true,"
                                  "the eigenvectors needs to be calculated, "
                                  "so the eigenvectors must be provided."));
      input_trans = dito.Transpose(input_trans);
      eigen_vectors->ShareDataWith(input_trans);
140 141 142 143 144 145 146 147 148
    }
  }
};

#ifdef PADDLE_WITH_CUDA

// Calculates the eigenvalues ​​and eigenvectors of Hermitian or real
// symmetric matrices on GPU, and uses the variable has_vectors
// to control whether to return the eigenvectors.
149 150
template <typename T>
struct MatrixEighFunctor<platform::CUDADeviceContext, T> {
151 152 153 154
 public:
  void operator()(const framework::ExecutionContext &ctx, const Tensor &input,
                  Tensor *eigen_values, Tensor *eigen_vectors, bool is_lower,
                  bool has_vectors) {
155
    using ValueType = phi::dtype::Real<T>;
156 157
    auto *out_value = eigen_values->mutable_data<ValueType>(ctx.GetPlace());

158 159 160 161 162 163 164
    auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
    auto dito =
        math::DeviceIndependenceTensorOperations<platform::CUDADeviceContext,
                                                 T>(ctx);
    Tensor input_trans;
    input_trans = dito.Transpose(input);
    auto *input_vector = input_trans.data<T>();
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
    auto &dims = input.dims();
    int dim_size = dims.size();
    int64_t batch_size = GetBatchSize(dims);

    cublasFillMode_t uplo =
        is_lower ? CUBLAS_FILL_MODE_LOWER : CUBLAS_FILL_MODE_UPPER;
    cusolverEigMode_t jobz =
        has_vectors ? CUSOLVER_EIG_MODE_VECTOR : CUSOLVER_EIG_MODE_NOVECTOR;

    int n = dims[dim_size - 1];
    int lda = std::max<int>(1, n);
    auto vector_stride = dims[dim_size - 1] * dims[dim_size - 2];
    auto values_stride = dims[dim_size - 1];
    int lwork = 0;
    auto info = memory::Alloc(dev_ctx, sizeof(int) * batch_size);
    auto *info_ptr = reinterpret_cast<int *>(info->ptr());

    // When the input type is float32, and the feature value input dimension is
    // greater than or equal to [*,32,32]  and less than or equal to
    // [*,512,512], Syevj has better performance.
185 186
    bool use_syevj = (framework::TransToProtoVarType(input.dtype()) ==
                          framework::proto::VarType::FP32 &&
187
                      values_stride >= 32 && values_stride <= 512);
188 189
    syevjInfo_t syevj_params;
    if (use_syevj) {
190
      PADDLE_ENFORCE_GPU_SUCCESS(
191
          platform::dynload::cusolverDnCreateSyevjInfo(&syevj_params));
192 193 194 195
      PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnSsyevj_bufferSize(
          dev_ctx.cusolver_dn_handle(), jobz, uplo, n,
          reinterpret_cast<const float *>(input_vector), lda,
          reinterpret_cast<const float *>(out_value), &lwork, syevj_params));
196
    } else {
197
      EvdBuffer(dev_ctx.cusolver_dn_handle(), jobz, uplo, n, input_vector, lda,
198 199 200 201 202
                out_value, &lwork);
    }
    auto work = memory::Alloc(dev_ctx, sizeof(T) * lwork);
    auto *work_ptr = reinterpret_cast<T *>(work->ptr());
    for (auto i = 0; i < batch_size; i++) {
203 204
      auto *input_data = input_vector + i * vector_stride;
      auto *value_data = out_value + i * values_stride;
205 206
      auto handle = dev_ctx.cusolver_dn_handle();
      if (use_syevj) {
207
        PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDnSsyevj(
208
            handle, jobz, uplo, n, reinterpret_cast<float *>(input_data), lda,
209 210 211 212
            reinterpret_cast<float *>(value_data),
            reinterpret_cast<float *>(work_ptr), lwork, info_ptr,
            syevj_params));
      } else {
213 214
        Evd(handle, jobz, uplo, n, input_data, lda, value_data, work_ptr, lwork,
            info_ptr);
215
      }
216
      int error_info = 0;
217
      memory::Copy(platform::CPUPlace(), &error_info, dev_ctx.GetPlace(),
218
                   info_ptr, sizeof(int), dev_ctx.stream());
219
      CheckEighResult(i, error_info);
220 221 222
    }

    if (use_syevj) {
223
      PADDLE_ENFORCE_GPU_SUCCESS(
224 225 226
          platform::dynload::cusolverDnDestroySyevjInfo(syevj_params));
    }
    if (has_vectors) {
227 228 229 230 231 232 233
      PADDLE_ENFORCE_NOT_NULL(eigen_vectors,
                              platform::errors::InvalidArgument(
                                  "When has_vectors is true,"
                                  "the eigenvectors needs to be calculated,"
                                  "so the eigenvectors must be provided."));
      input_trans = dito.Transpose(input_trans);
      eigen_vectors->ShareDataWith(input_trans);
234 235 236
    }
  }

237
  using ValueType = phi::dtype::Real<T>;
238 239 240 241 242 243 244 245 246
  inline void EvdBuffer(cusolverDnHandle_t handle, cusolverEigMode_t jobz,
                        cublasFillMode_t uplo, int n, const T *A, int lda,
                        const ValueType *W, int *lwork) const;

  inline void Evd(cusolverDnHandle_t handle, cusolverEigMode_t jobz,
                  cublasFillMode_t uplo, int n, T *A, int lda, ValueType *W,
                  T *work, int lwork, int *devInfo) const;
};

247 248 249 250
#define FUNC_WITH_TYPES(m)                                \
  m(float, Ssy, float) m(double, Dsy, double)             \
      m(paddle::platform::complex<float>, Che, cuComplex) \
          m(paddle::platform::complex<double>, Zhe, cuDoubleComplex)
251

252
#define EVDBUFFER_INSTANCE(T, C, CastType)                                     \
253
  template <>                                                                  \
254
  inline void MatrixEighFunctor<platform::CUDADeviceContext, T>::EvdBuffer(    \
255 256 257
      cusolverDnHandle_t handle, cusolverEigMode_t jobz,                       \
      cublasFillMode_t uplo, int n, const T *A, int lda, const ValueType *W,   \
      int *lwork) const {                                                      \
258
    PADDLE_ENFORCE_GPU_SUCCESS(                                                \
259 260 261 262 263 264 265
        platform::dynload::cusolverDn##C##evd_bufferSize(                      \
            handle, jobz, uplo, n, reinterpret_cast<const CastType *>(A), lda, \
            W, lwork));                                                        \
  }

FUNC_WITH_TYPES(EVDBUFFER_INSTANCE);

266
#define EVD_INSTANCE(T, C, CastType)                                      \
267
  template <>                                                             \
268
  inline void MatrixEighFunctor<platform::CUDADeviceContext, T>::Evd(     \
269 270 271
      cusolverDnHandle_t handle, cusolverEigMode_t jobz,                  \
      cublasFillMode_t uplo, int n, T *A, int lda, ValueType *W, T *work, \
      int lwork, int *devInfo) const {                                    \
272
    PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cusolverDn##C##evd(     \
273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
        handle, jobz, uplo, n, reinterpret_cast<CastType *>(A), lda, W,   \
        reinterpret_cast<CastType *>(work), lwork, devInfo));             \
  }

FUNC_WITH_TYPES(EVD_INSTANCE);

#undef FUNC_WITH_TYPES
#undef EVDBUFFER_INSTANCE
#undef EVD_INSTANCE

#endif  // PADDLE_WITH_CUDA

}  // namespace math
}  // namespace operators
}  // namespace paddle