enforce.h 39.9 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

17 18 19 20
#ifdef __GNUC__
#include <cxxabi.h>  // for __cxa_demangle
#endif               // __GNUC__

21
#if !defined(_WIN32)
22 23 24 25 26
#include <dlfcn.h>  // dladdr
#else               // _WIN32
#ifndef NOMINMAX
#define NOMINMAX  // msvc max/min macro conflict with std::min/max
#endif
27 28 29
#include <windows.h>  // GetModuleFileName
#endif

30 31 32 33 34 35
#ifdef PADDLE_WITH_CUDA
#include <cublas_v2.h>
#include <cudnn.h>
#include <curand.h>
#include <thrust/system/cuda/error.h>
#include <thrust/system_error.h>
36

37
#include "paddle/fluid/platform/cuda_error.pb.h"
38 39
#endif  // PADDLE_WITH_CUDA

40
#include <fstream>
Y
Yu Yang 已提交
41
#include <iomanip>
L
liaogang 已提交
42
#include <memory>
43 44 45
#include <sstream>
#include <stdexcept>
#include <string>
S
sneaxiy 已提交
46 47
#include <type_traits>
#include <utility>
48

chen.zhiyu's avatar
chen.zhiyu 已提交
49 50 51 52
#if !defined(_WIN32) && !defined(PADDLE_WITH_MUSL)
#include <execinfo.h>
#endif

53
#define GLOG_NO_ABBREVIATED_SEVERITIES  // msvc conflict logging with windows.h
54
#include "glog/logging.h"
55
#include "paddle/fluid/platform/errors.h"
Y
Yi Wang 已提交
56
#include "paddle/fluid/platform/macros.h"
D
dzhwinter 已提交
57
#include "paddle/fluid/platform/port.h"
58
#include "paddle/fluid/platform/variant.h"
59 60
#include "paddle/fluid/string/printf.h"
#include "paddle/fluid/string/to_string.h"
61

62
#ifdef PADDLE_WITH_CUDA
Y
Yi Wang 已提交
63 64 65
#include "paddle/fluid/platform/dynload/cublas.h"
#include "paddle/fluid/platform/dynload/cudnn.h"
#include "paddle/fluid/platform/dynload/curand.h"
G
Guo Sheng 已提交
66
#include "paddle/fluid/platform/dynload/cusolver.h"
67
#if !defined(__APPLE__) && defined(PADDLE_WITH_NCCL)
L
lilong12 已提交
68
#include <error.h>
Y
Yi Wang 已提交
69
#include "paddle/fluid/platform/dynload/nccl.h"
Y
Yi Wang 已提交
70 71
#endif  // __APPLE__
#endif  // PADDLE_WITH_CUDA
72

73 74 75
// Note: these headers for simplify demangle type string
#include "paddle/fluid/framework/type_defs.h"
#include "paddle/fluid/imperative/type_defs.h"
W
wanghuancoder 已提交
76 77 78 79 80 81

namespace paddle {
namespace platform {
class ErrorSummary;
}  // namespace platform
}  // namespace paddle
82

83 84
DECLARE_int32(call_stack_level);

85 86 87
namespace paddle {
namespace platform {

88 89
/** HELPER MACROS AND FUNCTIONS **/

Z
Zeng Jinle 已提交
90 91 92 93
#ifndef PADDLE_MAY_THROW
#define PADDLE_MAY_THROW noexcept(false)
#endif

94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
// Because most enforce conditions would evaluate to true, we can use
// __builtin_expect to instruct the C++ compiler to generate code that
// always forces branch prediction of true.
// This generates faster binary code. __builtin_expect is since C++11.
// For more details, please check https://stackoverflow.com/a/43870188/724872.
#if !defined(_WIN32)
#define UNLIKELY(condition) __builtin_expect(static_cast<bool>(condition), 0)
#else
// there is no equivalent intrinsics in msvc.
#define UNLIKELY(condition) (condition)
#endif

#if !defined(_WIN32)
#define LIKELY(condition) __builtin_expect(static_cast<bool>(condition), 1)
#else
// there is no equivalent intrinsics in msvc.
#define LIKELY(condition) (condition)
#endif

113 114 115 116 117 118 119 120 121 122 123 124 125
#if defined _WIN32 && defined PADDLE_ON_INFERENCE && defined PADDLE_NO_PYTHON
#define HANDLE_THE_ERROR try {
#define END_HANDLE_THE_ERROR            \
  }                                     \
  catch (const std::exception& e) {     \
    std::cout << e.what() << std::endl; \
    throw;                              \
  }
#else
#define HANDLE_THE_ERROR
#define END_HANDLE_THE_ERROR
#endif

L
liaogang 已提交
126 127 128 129 130 131 132 133 134 135 136
#ifdef __GNUC__
inline std::string demangle(std::string name) {
  int status = -4;  // some arbitrary value to eliminate the compiler warning
  std::unique_ptr<char, void (*)(void*)> res{
      abi::__cxa_demangle(name.c_str(), NULL, NULL, &status), std::free};
  return (status == 0) ? res.get() : name;
}
#else
inline std::string demangle(std::string name) { return name; }
#endif

137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
namespace details {
template <typename T>
inline constexpr bool IsArithmetic() {
  return std::is_arithmetic<T>::value;
}

template <typename T1, typename T2, bool kIsArithmetic /* = true */>
struct TypeConverterImpl {
  using Type1 = typename std::common_type<T1, T2>::type;
  using Type2 = Type1;
};

template <typename T1, typename T2>
struct TypeConverterImpl<T1, T2, false> {
  using Type1 = T1;
  using Type2 = T2;
};

template <typename T1, typename T2>
struct TypeConverter {
 private:
  static constexpr bool kIsArithmetic =
      IsArithmetic<T1>() && IsArithmetic<T2>();

 public:
  using Type1 = typename TypeConverterImpl<T1, T2, kIsArithmetic>::Type1;
  using Type2 = typename TypeConverterImpl<T1, T2, kIsArithmetic>::Type2;
};

template <typename T1, typename T2>
using CommonType1 = typename std::add_lvalue_reference<
    typename std::add_const<typename TypeConverter<T1, T2>::Type1>::type>::type;

template <typename T1, typename T2>
using CommonType2 = typename std::add_lvalue_reference<
    typename std::add_const<typename TypeConverter<T1, T2>::Type2>::type>::type;

// Here, we use SFINAE to check whether T can be converted to std::string
template <typename T>
struct CanToString {
 private:
  using YesType = uint8_t;
  using NoType = uint16_t;

  template <typename U>
  static YesType Check(decltype(std::cout << std::declval<U>())) {
    return 0;
  }

  template <typename U>
  static NoType Check(...) {
    return 0;
  }

 public:
  static constexpr bool kValue =
      std::is_same<YesType, decltype(Check<T>(std::cout))>::value;
};

template <bool kCanToString /* = true */>
struct BinaryCompareMessageConverter {
  template <typename T>
  static std::string Convert(const char* expression, const T& value) {
    return expression + std::string(":") + string::to_string(value);
  }
};

template <>
struct BinaryCompareMessageConverter<false> {
  template <typename T>
  static const char* Convert(const char* expression, const T& value) {
    return expression;
  }
};
}  // namespace details

213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
template <typename T>
inline std::string ReplaceComplexTypeStr(std::string str,
                                         const std::string& type_name) {
  auto demangle_type_str = demangle(typeid(T).name());
  size_t start_pos = 0;
  while ((start_pos = str.find(demangle_type_str, start_pos)) !=
         std::string::npos) {
    str.replace(start_pos, demangle_type_str.length(), type_name);
    start_pos += type_name.length();
  }
  return str;
}

#define __REPLACE_COMPLEX_TYPE_STR__(__TYPENAME, __STR)                       \
  do {                                                                        \
    __STR = paddle::platform::ReplaceComplexTypeStr<__TYPENAME>(__STR,        \
                                                                #__TYPENAME); \
  } while (0)

inline std::string SimplifyDemangleStr(std::string str) {
  // the older is important, you have to put complex types in front
  __REPLACE_COMPLEX_TYPE_STR__(paddle::framework::AttributeMap, str);
  __REPLACE_COMPLEX_TYPE_STR__(paddle::framework::Attribute, str);
  __REPLACE_COMPLEX_TYPE_STR__(paddle::imperative::NameVariableWrapperMap, str);
  __REPLACE_COMPLEX_TYPE_STR__(paddle::imperative::NameVarBaseMap, str);
  __REPLACE_COMPLEX_TYPE_STR__(std::string, str);
  return str;
}

242
inline std::string GetCurrentTraceBackString() {
243 244
  std::ostringstream sout;

245 246 247
  sout << "\n\n--------------------------------------\n";
  sout << "C++ Traceback (most recent call last):";
  sout << "\n--------------------------------------\n";
chen.zhiyu's avatar
chen.zhiyu 已提交
248 249 250
#if !defined(_WIN32) && !defined(PADDLE_WITH_MUSL)
  static constexpr int TRACE_STACK_LIMIT = 100;

251 252 253 254
  void* call_stack[TRACE_STACK_LIMIT];
  auto size = backtrace(call_stack, TRACE_STACK_LIMIT);
  auto symbols = backtrace_symbols(call_stack, size);
  Dl_info info;
255
  int idx = 0;
256
  for (int i = size - 1; i >= 0; --i) {
257 258
    if (dladdr(call_stack[i], &info) && info.dli_sname) {
      auto demangled = demangle(info.dli_sname);
259 260 261
      std::string path(info.dli_fname);
      // C++ traceback info are from core.so
      if (path.substr(path.length() - 3).compare(".so") == 0) {
262 263
        sout << string::Sprintf("%-3d %s\n", idx++,
                                SimplifyDemangleStr(demangled));
264
      }
265 266 267 268
    }
  }
  free(symbols);
#else
chen.zhiyu's avatar
chen.zhiyu 已提交
269
  sout << "Not support stack backtrace yet.\n";
270
#endif
271 272 273 274 275 276 277
  return sout.str();
}

template <typename StrType>
inline std::string GetErrorSumaryString(StrType&& what, const char* file,
                                        int line) {
  std::ostringstream sout;
278 279 280 281
  if (FLAGS_call_stack_level > 1) {
    sout << "\n----------------------\nError Message "
            "Summary:\n----------------------\n";
  }
282
  sout << string::Sprintf("%s (at %s:%d)", std::forward<StrType>(what), file,
283 284
                          line)
       << std::endl;
285 286 287
  return sout.str();
}

288 289 290 291 292 293 294 295 296 297 298
template <typename StrType>
inline std::string GetTraceBackString(StrType&& what, const char* file,
                                      int line) {
  if (FLAGS_call_stack_level > 1) {
    // FLAGS_call_stack_level>1 means showing c++ call stack
    return GetCurrentTraceBackString() + GetErrorSumaryString(what, file, line);
  } else {
    return GetErrorSumaryString(what, file, line);
  }
}

299 300 301 302 303 304 305 306 307 308 309
inline std::string SimplifyErrorTypeFormat(const std::string& str) {
  std::ostringstream sout;
  size_t type_end_pos = str.find(":", 0);
  if (type_end_pos == std::string::npos) {
    sout << str;
  } else {
    // Remove "Error:", add "()""
    sout << "(" << str.substr(0, type_end_pos - 5) << ")"
         << str.substr(type_end_pos + 1);
  }
  return sout.str();
310 311
}

312 313
inline bool is_error(bool stat) { return !stat; }

314
// Note: This Macro can only be used within enforce.h
315 316 317 318 319 320
#define __THROW_ERROR_INTERNAL__(__ERROR_SUMMARY)                      \
  do {                                                                 \
    HANDLE_THE_ERROR                                                   \
    throw ::paddle::platform::EnforceNotMet(__ERROR_SUMMARY, __FILE__, \
                                            __LINE__);                 \
    END_HANDLE_THE_ERROR                                               \
321 322
  } while (0)

323 324
/** ENFORCE EXCEPTION AND MACROS **/

325
struct EnforceNotMet : public std::exception {
326
 public:
327
  EnforceNotMet(std::exception_ptr e, const char* file, int line) {
328
    try {
Y
Yu Yang 已提交
329
      std::rethrow_exception(e);
330 331 332 333
    } catch (platform::EnforceNotMet& e) {
      code_ = e.code();
      err_str_ = GetTraceBackString(e.what(), file, line);
      simple_err_str_ = SimplifyErrorTypeFormat(err_str_);
Y
Yu Yang 已提交
334
    } catch (std::exception& e) {
335
      err_str_ = GetTraceBackString(e.what(), file, line);
336
      simple_err_str_ = SimplifyErrorTypeFormat(err_str_);
Y
Yu Yang 已提交
337 338
    }
  }
339

340
  EnforceNotMet(const std::string& str, const char* file, int line)
341 342 343
      : err_str_(GetTraceBackString(str, file, line)) {
    simple_err_str_ = SimplifyErrorTypeFormat(err_str_);
  }
Y
Yu Yang 已提交
344

345 346 347 348 349
  EnforceNotMet(const ErrorSummary& error, const char* file, int line)
      : code_(error.code()),
        err_str_(GetTraceBackString(error.to_string(), file, line)) {
    simple_err_str_ = SimplifyErrorTypeFormat(err_str_);
  }
350

351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371
  const char* what() const noexcept override {
    if (FLAGS_call_stack_level > 1) {
      return err_str_.c_str();
    } else {
      return simple_err_str_.c_str();
    }
  }

  error::Code code() const { return code_; }

  const std::string& error_str() const { return err_str_; }

  const std::string& simple_error_str() const { return simple_err_str_; }

  void set_error_str(std::string str) {
    if (FLAGS_call_stack_level > 1) {
      err_str_ = str;
    } else {
      simple_err_str_ = str;
    }
  }
372

373 374 375 376 377
 private:
  // Used to determine the final type of exception thrown
  error::Code code_ = error::LEGACY;
  // Complete error message
  // e.g. InvalidArgumentError: ***
378
  std::string err_str_;
379 380 381
  // Simple errror message used when no C++ stack and python compile stack
  // e.g. (InvalidArgument) ***
  std::string simple_err_str_;
382 383
};

384 385
#define PADDLE_THROW(...)                                                   \
  do {                                                                      \
386
    HANDLE_THE_ERROR                                                        \
387 388
    throw ::paddle::platform::EnforceNotMet(                                \
        ::paddle::platform::ErrorSummary(__VA_ARGS__), __FILE__, __LINE__); \
389
    END_HANDLE_THE_ERROR                                                    \
390 391
  } while (0)

392 393 394 395
#if defined(__CUDA_ARCH__)
// For cuda, the assertions can affect performance and it is therefore
// recommended to disable them in production code
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#assertion
396 397 398 399 400 401 402
#define PADDLE_ENFORCE(_IS_NOT_ERROR, __FORMAT, ...)                         \
  do {                                                                       \
    if (!(_IS_NOT_ERROR)) {                                                  \
      printf("Error: %s:%d Assertion `%s` failed. " __FORMAT "\n", __FILE__, \
             __LINE__, #_IS_NOT_ERROR, ##__VA_ARGS__);                       \
      asm("trap;");                                                          \
    }                                                                        \
403 404
  } while (0)
#else
405 406 407 408 409 410
#define PADDLE_ENFORCE(COND, ...)                                              \
  do {                                                                         \
    auto __cond__ = (COND);                                                    \
    if (UNLIKELY(::paddle::platform::is_error(__cond__))) {                    \
      __THROW_ERROR_INTERNAL__(::paddle::platform::ErrorSummary(__VA_ARGS__)); \
    }                                                                          \
411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
  } while (0)
#endif

/*
 * Some enforce helpers here, usage:
 *    int a = 1;
 *    int b = 2;
 *    PADDLE_ENFORCE_EQ(a, b);
 *
 *    will raise an expression described as follows:
 *    "Expected input a == b, but received a(1) != b(2)."
 *      with detailed stack information.
 *
 *    extra messages is also supported, for example:
 *    PADDLE_ENFORCE(a, b, "some simple enforce failed between %d numbers", 2)
 */
427

428 429 430 431 432 433 434 435 436 437
#define PADDLE_ENFORCE_NOT_NULL(__VAL, ...)                                   \
  do {                                                                        \
    if (UNLIKELY(nullptr == (__VAL))) {                                       \
      auto __summary__ = ::paddle::platform::ErrorSummary(__VA_ARGS__);       \
      auto __message__ = ::paddle::string::Sprintf(                           \
          "%s\n  [Hint: " #__VAL " should not be null.]",                     \
          __summary__.error_message());                                       \
      __THROW_ERROR_INTERNAL__(                                               \
          ::paddle::platform::ErrorSummary(__summary__.code(), __message__)); \
    }                                                                         \
438 439
  } while (0)

440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467
#define __PADDLE_BINARY_COMPARE(__VAL1, __VAL2, __CMP, __INV_CMP, ...)        \
  do {                                                                        \
    auto __val1 = (__VAL1);                                                   \
    auto __val2 = (__VAL2);                                                   \
    using __TYPE1__ = decltype(__val1);                                       \
    using __TYPE2__ = decltype(__val2);                                       \
    using __COMMON_TYPE1__ =                                                  \
        ::paddle::platform::details::CommonType1<__TYPE1__, __TYPE2__>;       \
    using __COMMON_TYPE2__ =                                                  \
        ::paddle::platform::details::CommonType2<__TYPE1__, __TYPE2__>;       \
    bool __is_not_error = (static_cast<__COMMON_TYPE1__>(__val1))__CMP(       \
        static_cast<__COMMON_TYPE2__>(__val2));                               \
    if (UNLIKELY(!__is_not_error)) {                                          \
      auto __summary__ = ::paddle::platform::ErrorSummary(__VA_ARGS__);       \
      constexpr bool __kCanToString__ =                                       \
          ::paddle::platform::details::CanToString<__TYPE1__>::kValue &&      \
          ::paddle::platform::details::CanToString<__TYPE2__>::kValue;        \
      auto __message__ = ::paddle::string::Sprintf(                           \
          "%s\n  [Hint: Expected %s " #__CMP                                  \
          " %s, but received %s " #__INV_CMP " %s.]",                         \
          __summary__.error_message(), #__VAL1, #__VAL2,                      \
          ::paddle::platform::details::BinaryCompareMessageConverter<         \
              __kCanToString__>::Convert(#__VAL1, __val1),                    \
          ::paddle::platform::details::BinaryCompareMessageConverter<         \
              __kCanToString__>::Convert(#__VAL2, __val2));                   \
      __THROW_ERROR_INTERNAL__(                                               \
          ::paddle::platform::ErrorSummary(__summary__.code(), __message__)); \
    }                                                                         \
468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
  } while (0)

#define PADDLE_ENFORCE_EQ(__VAL0, __VAL1, ...) \
  __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, ==, !=, __VA_ARGS__)
#define PADDLE_ENFORCE_NE(__VAL0, __VAL1, ...) \
  __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, !=, ==, __VA_ARGS__)
#define PADDLE_ENFORCE_GT(__VAL0, __VAL1, ...) \
  __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, >, <=, __VA_ARGS__)
#define PADDLE_ENFORCE_GE(__VAL0, __VAL1, ...) \
  __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, >=, <, __VA_ARGS__)
#define PADDLE_ENFORCE_LT(__VAL0, __VAL1, ...) \
  __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, <, >=, __VA_ARGS__)
#define PADDLE_ENFORCE_LE(__VAL0, __VAL1, ...) \
  __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, <=, >, __VA_ARGS__)

483 484
/** EXTENDED TOOL FUNCTIONS WITH CHECKING **/

485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506
/*
 * Summary: This macro is used to get Variable or internal type
 *   data (such as LoDTensor or SelectedRows) of the Input and
 *   Output in op, generally used when call scope.FindVar(Input/
 *   Output("Name")) or ctx.Input<LoDTensor>().
 *   Firstly this macro check whether the obtained pointer is null,
 *   and then return data if it is not null.
 *
 * Note: This macro is only suitable for specific scenarios and
 *   does not intended to be widely used. If it cannot meet the
 *   requirements, please use other PADDLE_ENFORCE** check macro.
 *
 * Parameters:
 *     __PTR: pointer
 *     __ROLE: (string), Input or Output
 *     __NAME: (string), Input or Output name
 *     __OP_TYPE: (string), the op type
 *  
 * Return: The data pointed to by the pointer.
 *
 * Examples:
 *    GET_DATA_SAFELY(ctx.Input<LoDTensor>("X"), "Input", "X", "Mul");
507
 */
508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529
#define GET_DATA_SAFELY(__PTR, __ROLE, __NAME, __OP_TYPE)                     \
  (([&]() -> std::add_lvalue_reference<decltype(*(__PTR))>::type {            \
    auto* __ptr = (__PTR);                                                    \
    if (UNLIKELY(nullptr == __ptr)) {                                         \
      auto __summary__ = paddle::platform::errors::NotFound(                  \
          "Unable to get %s data of %s %s in operator %s. "                   \
          "Possible reasons are:\n"                                           \
          "  1. The %s is not the %s of operator %s;\n"                       \
          "  2. The %s has no corresponding variable passed in;\n"            \
          "  3. The %s corresponding variable is not initialized.",           \
          paddle::platform::demangle(                                         \
              typeid(std::add_lvalue_reference<decltype(*__ptr)>::type)       \
                  .name()),                                                   \
          __ROLE, __NAME, __OP_TYPE, __NAME, __ROLE, __OP_TYPE, __NAME,       \
          __NAME);                                                            \
      auto __message__ = ::paddle::string::Sprintf(                           \
          "%s\n  [Hint: pointer " #__PTR " should not be null.]",             \
          __summary__.error_message());                                       \
      __THROW_ERROR_INTERNAL__(                                               \
          ::paddle::platform::ErrorSummary(__summary__.code(), __message__)); \
    }                                                                         \
    return *__ptr;                                                            \
530 531
  })())

532 533 534 535 536 537 538 539 540 541 542 543 544
/*
 * Summary: This macro is used to check whether op has specified
 * Input or Output Variables. Because op's Input and Output
 * checking are written similarly, so abstract this macro.
 *
 * Parameters:
 *     __EXPR: (bool), the bool expression
 *     __ROLE: (string), Input or Output
 *     __NAME: (string), Input or Output name
 *     __OP_TYPE: (string), the op type
 *
 * Examples:
 *    OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Mul");
545
 */
546 547 548 549 550 551 552
#define OP_INOUT_CHECK(__EXPR, __ROLE, __NAME, __OP_TYPE)                   \
  do {                                                                      \
    PADDLE_ENFORCE_EQ(__EXPR, true, paddle::platform::errors::NotFound(     \
                                        "No %s(%s) found for %s operator.", \
                                        __ROLE, __NAME, __OP_TYPE));        \
  } while (0)

553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
/*
 * Summary: This BOOST_GET(_**) series macros are used to call boost::get
 *   safely. boost::get is not a completely safe api, although it will not
 *   go wrong in most cases, but in extreme cases, it may fail and directly
 *   throw a boost::bad_get exception, without any stack information.
 *   This kind of problems is difficult to debug, so add these macros to
 *   enrich boost::get error information. At the same time, we restrict
 *   the direct use of boost::get by CI rule.
 *
 * Parameters:
 *     __TYPE: the target variable type
 *     __VALUE: the target variable to get
 *
 * Examples:
 *     - unsafe writing: int x = boost::get<int>(y);
 *     - safe writing: int x = BOOST_GET(int, y);
 *
 * Note: GCC 4.8 cannot select right overloaded function here, so need
 *    to define different functions and macros here, after we upgreade
 *    CI gcc version, we can only define one BOOST_GET macro.
573
 */
574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
namespace details {

#define DEFINE_SAFE_BOOST_GET(__InputType, __OutputType, __OutputTypePtr,      \
                              __FuncName)                                      \
  template <typename OutputType, typename InputType>                           \
  auto __FuncName(__InputType input, const char* expression, const char* file, \
                  int line)                                                    \
      ->typename std::conditional<std::is_pointer<InputType>::value,           \
                                  __OutputTypePtr, __OutputType>::type {       \
    try {                                                                      \
      return boost::get<OutputType>(input);                                    \
    } catch (boost::bad_get&) {                                                \
      HANDLE_THE_ERROR                                                         \
      throw ::paddle::platform::EnforceNotMet(                                 \
          ::paddle::platform::errors::InvalidArgument(                         \
              "boost::get failed, cannot get value "                           \
              "(%s) by type %s, its type is %s.",                              \
              expression,                                                      \
              paddle::platform::demangle(typeid(OutputType).name()),           \
              paddle::platform::demangle(input.type().name())),                \
          file, line);                                                         \
      END_HANDLE_THE_ERROR                                                     \
    }                                                                          \
  }

DEFINE_SAFE_BOOST_GET(InputType&, OutputType&, OutputType*, SafeBoostGet);
DEFINE_SAFE_BOOST_GET(const InputType&, const OutputType&, const OutputType*,
                      SafeBoostGetConst);
DEFINE_SAFE_BOOST_GET(InputType&&, OutputType, OutputType*,
                      SafeBoostGetMutable);

}  // namespace details

#define BOOST_GET(__TYPE, __VALUE)                                     \
  ::paddle::platform::details::SafeBoostGet<__TYPE>(__VALUE, #__VALUE, \
                                                    __FILE__, __LINE__)
#define BOOST_GET_CONST(__TYPE, __VALUE)                                    \
  ::paddle::platform::details::SafeBoostGetConst<__TYPE>(__VALUE, #__VALUE, \
                                                         __FILE__, __LINE__)
#define BOOST_GET_MUTABLE(__TYPE, __VALUE)                                    \
  ::paddle::platform::details::SafeBoostGetMutable<__TYPE>(__VALUE, #__VALUE, \
                                                           __FILE__, __LINE__)

617 618
/** OTHER EXCEPTION AND ENFORCE **/

619 620
struct EOFException : public std::exception {
  std::string err_str_;
621 622
  EOFException(const char* err_msg, const char* file, int line) {
    err_str_ = string::Sprintf("%s at [%s:%d]", err_msg, file, line);
623 624
  }

625
  const char* what() const noexcept override { return err_str_.c_str(); }
626 627
};

628 629
#define PADDLE_THROW_EOF()                                                     \
  do {                                                                         \
630
    HANDLE_THE_ERROR                                                           \
631 632
    throw ::paddle::platform::EOFException("There is no next data.", __FILE__, \
                                           __LINE__);                          \
633
    END_HANDLE_THE_ERROR                                                       \
634
  } while (0)
635

636 637 638 639 640 641 642
#define PADDLE_THROW_BAD_ALLOC(...)                                          \
  do {                                                                       \
    HANDLE_THE_ERROR                                                         \
    throw ::paddle::memory::allocation::BadAlloc(                            \
        ::paddle::platform::ErrorSummary(__VA_ARGS__).to_string(), __FILE__, \
        __LINE__);                                                           \
    END_HANDLE_THE_ERROR                                                     \
643
  } while (0)
M
minqiyang 已提交
644

645
/** CUDA PADDLE ENFORCE FUNCTIONS AND MACROS **/
646
#ifdef PADDLE_WITH_CUDA
647

648
/***** CUDA ERROR *****/
S
sneaxiy 已提交
649
inline bool is_error(cudaError_t e) { return e != cudaSuccess; }
M
minqiyang 已提交
650

651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736
inline std::string GetCudaErrorWebsite(int32_t cuda_version) {
  std::ostringstream webstr;
  webstr << "https://docs.nvidia.com/cuda/";
  if (cuda_version != -1) {
    double version = cuda_version / 10;
    webstr << "archive/" << std::fixed << std::setprecision(1) << version;
  }
  webstr << "/cuda-runtime-api/group__CUDART__TYPES.html"
            "#group__CUDART__TYPES_1g3f51e3575c2178246db0a94a430e0038";
  return webstr.str();
}

inline std::string build_nvidia_error_msg(cudaError_t e) {
#if CUDA_VERSION >= 10000 && CUDA_VERSION < 11000
  int32_t cuda_version = 100;
#elif CUDA_VERSION >= 9000
  int32_t cuda_version = 90;
#else
  int32_t cuda_version = -1;
#endif
  std::ostringstream sout;
  sout << " Cuda error(" << e << "), " << cudaGetErrorString(e) << ".";
  static platform::proto::cudaerrorDesc cudaerror;
  static bool _initSucceed = false;
  if (cudaerror.ByteSizeLong() == 0) {
    std::string filePath;
#if !defined(_WIN32)
    Dl_info info;
    if (dladdr(reinterpret_cast<void*>(GetCudaErrorWebsite), &info)) {
      std::string strModule(info.dli_fname);
      const size_t last_slash_idx = strModule.find_last_of("/");
      std::string compare_path = strModule.substr(strModule.length() - 6);
      if (std::string::npos != last_slash_idx) {
        strModule.erase(last_slash_idx, std::string::npos);
      }
      if (compare_path.compare("avx.so") == 0) {
        filePath = strModule +
                   "/../include/third_party/cudaerror/data/cudaErrorMessage.pb";
      } else {
        filePath =
            strModule + "/../../thirl_party/cudaerror/data/cudaErrorMessage.pb";
      }
    }
#else
    char buf[100];
    MEMORY_BASIC_INFORMATION mbi;
    HMODULE h_module =
        (::VirtualQuery(GetCudaErrorWebsite, &mbi, sizeof(mbi)) != 0)
            ? (HMODULE)mbi.AllocationBase
            : NULL;
    GetModuleFileName(h_module, buf, 100);
    std::string strModule(buf);
    const size_t last_slash_idx = strModule.find_last_of("\\");
    std::string compare_path = strModule.substr(strModule.length() - 7);
    if (std::string::npos != last_slash_idx) {
      strModule.erase(last_slash_idx, std::string::npos);
    }
    if (compare_path.compare("avx.pyd") == 0) {
      filePath =
          strModule +
          "\\..\\include\\third_party\\cudaerror\\data\\cudaErrorMessage.pb";
    } else {
      filePath =
          strModule + "\\..\\third_party\\cudaerror\\data\\cudaErrorMessage.pb";
    }
#endif
    std::ifstream fin(filePath, std::ios::in | std::ios::binary);
    _initSucceed = cudaerror.ParseFromIstream(&fin);
  }
  if (_initSucceed) {
    for (int i = 0; i < cudaerror.allmessages_size(); ++i) {
      if (cuda_version == cudaerror.allmessages(i).version()) {
        for (int j = 0; j < cudaerror.allmessages(i).messages_size(); ++j) {
          if (e == cudaerror.allmessages(i).messages(j).errorcode()) {
            sout << "\n  [Advise: "
                 << cudaerror.allmessages(i).messages(j).errormessage() << "]";
            return sout.str();
          }
        }
      }
    }
  }
  sout << "\n  [Advise: Please search for the error code(" << e
       << ") on website( " << GetCudaErrorWebsite(cuda_version)
       << " ) to get Nvidia's official solution about CUDA Error.]";
  return sout.str();
737 738
}

739
/** curand ERROR **/
M
minqiyang 已提交
740 741
inline bool is_error(curandStatus_t stat) {
  return stat != CURAND_STATUS_SUCCESS;
742 743
}

744 745 746
inline const char* curandGetErrorString(curandStatus_t stat) {
  switch (stat) {
    case CURAND_STATUS_SUCCESS:
747
      return "`CURAND_STATUS_SUCCESS`. No errors.";
748
    case CURAND_STATUS_VERSION_MISMATCH:
749 750
      return "`CURAND_STATUS_VERSION_MISMATCH`. Header file and linked library "
             "version do not match.";
751
    case CURAND_STATUS_NOT_INITIALIZED:
752
      return "`CURAND_STATUS_NOT_INITIALIZED`. Generator not initialized.";
753
    case CURAND_STATUS_ALLOCATION_FAILED:
754
      return "`CURAND_STATUS_ALLOCATION_FAILED`. Memory allocation failed.";
755
    case CURAND_STATUS_TYPE_ERROR:
756
      return "`CURAND_STATUS_TYPE_ERROR`. Generator is wrong type.";
757
    case CURAND_STATUS_OUT_OF_RANGE:
758
      return "`CURAND_STATUS_OUT_OF_RANGE`. Argument out of range.";
759
    case CURAND_STATUS_LENGTH_NOT_MULTIPLE:
760 761
      return "`CURAND_STATUS_LENGTH_NOT_MULTIPLE`. Length requested is not a "
             "multple of dimension.";
762
    case CURAND_STATUS_DOUBLE_PRECISION_REQUIRED:
763 764
      return "`CURAND_STATUS_DOUBLE_PRECISION_REQUIRED`. GPU does not have "
             "double precision required by MRG32k3a.";
765
    case CURAND_STATUS_LAUNCH_FAILURE:
766
      return "`CURAND_STATUS_LAUNCH_FAILURE`. Kernel launch failure.";
767
    case CURAND_STATUS_PREEXISTING_FAILURE:
768 769
      return "`CURAND_STATUS_PREEXISTING_FAILURE`. Preexisting failure on "
             "library entry.";
770
    case CURAND_STATUS_INITIALIZATION_FAILED:
771 772
      return "`CURAND_STATUS_INITIALIZATION_FAILED`. Initialization of CUDA "
             "failed.";
773
    case CURAND_STATUS_ARCH_MISMATCH:
774 775
      return "`CURAND_STATUS_ARCH_MISMATCH`. Architecture mismatch, GPU does "
             "not support requested feature.";
776
    case CURAND_STATUS_INTERNAL_ERROR:
777
      return "`CURAND_STATUS_INTERNAL_ERROR`. Internal library error.";
778 779 780 781 782 783 784 785
    default:
      return "Unknown curand status";
  }
}

inline std::string build_nvidia_error_msg(curandStatus_t stat) {
  std::string msg(" Curand error, ");
  return msg + curandGetErrorString(stat) + " ";
786 787
}

788
/***** CUDNN ERROR *****/
M
minqiyang 已提交
789 790
inline bool is_error(cudnnStatus_t stat) {
  return stat != CUDNN_STATUS_SUCCESS;
791 792
}

793 794 795
inline std::string build_nvidia_error_msg(cudnnStatus_t stat) {
  std::string msg(" Cudnn error, ");
  return msg + platform::dynload::cudnnGetErrorString(stat) + " ";
796 797
}

798
/***** CUBLAS ERROR *****/
M
minqiyang 已提交
799 800
inline bool is_error(cublasStatus_t stat) {
  return stat != CUBLAS_STATUS_SUCCESS;
801 802
}

803 804 805
inline const char* cublasGetErrorString(cublasStatus_t stat) {
  switch (stat) {
    case CUBLAS_STATUS_NOT_INITIALIZED:
806 807
      return "`CUBLAS_STATUS_NOT_INITIALIZED`. The cuBLAS library was not "
             "initialized.";
808
    case CUBLAS_STATUS_ALLOC_FAILED:
809 810
      return "`CUBLAS_STATUS_ALLOC_FAILED`. Resource allocation failed inside "
             "the cuBLAS library.";
811
    case CUBLAS_STATUS_INVALID_VALUE:
812 813 814
      return "`CUBLAS_STATUS_INVALID_VALUE`. An unsupported value or parameter "
             "was passed to the function (a negative vector size, for "
             "example).";
815
    case CUBLAS_STATUS_ARCH_MISMATCH:
816 817 818
      return "`CUBLAS_STATUS_ARCH_MISMATCH`. The function requires a feature "
             "absent from the device architecture; usually caused by the lack "
             "of support for double precision.";
819
    case CUBLAS_STATUS_MAPPING_ERROR:
820 821
      return "`CUBLAS_STATUS_MAPPING_ERROR`. An access to GPU memory space "
             "failed, which is usually caused by a failure to bind a texture.";
822
    case CUBLAS_STATUS_EXECUTION_FAILED:
823 824 825
      return "`CUBLAS_STATUS_EXECUTION_FAILED`. The GPU program failed to "
             "execute. This is often caused by a launch failure of the kernel "
             "on the GPU, which can be caused by multiple reasons.";
826
    case CUBLAS_STATUS_INTERNAL_ERROR:
827 828 829
      return "`CUBLAS_STATUS_INTERNAL_ERROR`. An internal cuBLAS operation "
             "failed. This error is usually caused by a cudaMemcpyAsync() "
             "failure.";
830
    case CUBLAS_STATUS_NOT_SUPPORTED:
831 832
      return "`CUBLAS_STATUS_NOT_SUPPORTED`. The functionality requested is "
             "not supported.";
833
    case CUBLAS_STATUS_LICENSE_ERROR:
834 835 836
      return "`CUBLAS_STATUS_LICENSE_ERROR`. The functionality requested "
             "requires some license and an error was detected when trying to "
             "check the current licensing.";
837 838
    default:
      return "Unknown cublas status";
839
  }
840 841 842 843 844
}

inline std::string build_nvidia_error_msg(cublasStatus_t stat) {
  std::string msg(" Cublas error, ");
  return msg + cublasGetErrorString(stat) + " ";
845 846
}

G
Guo Sheng 已提交
847 848 849 850 851 852 853 854
/***** CUSOLVER ERROR *****/
inline bool is_error(cusolverStatus_t stat) {
  return stat != CUSOLVER_STATUS_SUCCESS;
}

inline const char* cusolverGetErrorString(cusolverStatus_t stat) {
  switch (stat) {
    case CUSOLVER_STATUS_NOT_INITIALIZED:
855 856 857 858
      return "`CUSOLVER_STATUS_NOT_INITIALIZED`. The cuSolver library was not "
             "initialized. This is usually caused by the lack of a prior call, "
             "an error in the CUDA Runtime API called by the cuSolver routine, "
             "or an error in the hardware setup.";
G
Guo Sheng 已提交
859
    case CUSOLVER_STATUS_ALLOC_FAILED:
860 861 862
      return "`CUSOLVER_STATUS_ALLOC_FAILED`. Resource allocation failed "
             "inside the cuSolver library. This is usually caused by a "
             "cudaMalloc() failure.";
G
Guo Sheng 已提交
863
    case CUSOLVER_STATUS_INVALID_VALUE:
864 865 866
      return "`CUSOLVER_STATUS_INVALID_VALUE`. An unsupported value or "
             "parameter was passed to the function (a negative vector size, "
             "for example).";
G
Guo Sheng 已提交
867
    case CUSOLVER_STATUS_ARCH_MISMATCH:
868 869 870
      return "`CUSOLVER_STATUS_ARCH_MISMATCH`. The function requires a feature "
             "absent from the device architecture; usually caused by the lack "
             "of support for atomic operations or double precision.";
G
Guo Sheng 已提交
871
    case CUSOLVER_STATUS_EXECUTION_FAILED:
872 873 874
      return "`CUSOLVER_STATUS_EXECUTION_FAILED`. The GPU program failed to "
             "execute. This is often caused by a launch failure of the kernel "
             "on the GPU, which can be caused by multiple reasons.";
G
Guo Sheng 已提交
875
    case CUSOLVER_STATUS_INTERNAL_ERROR:
876 877 878
      return "`CUSOLVER_STATUS_INTERNAL_ERROR`. An internal cuSolver operation "
             "failed. This error is usually caused by a cudaMemcpyAsync() "
             "failure.";
G
Guo Sheng 已提交
879
    case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
880 881 882
      return "`CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED`. The matrix type is "
             "not supported by this function. This is usually caused by "
             "passing an invalid matrix descriptor to the function.";
G
Guo Sheng 已提交
883 884 885 886
    default:
      return "Unknown cusolver status";
  }
}
887

G
Guo Sheng 已提交
888 889 890 891 892
inline std::string build_nvidia_error_msg(cusolverStatus_t stat) {
  std::string msg(" Cublas error, ");
  return msg + cusolverGetErrorString(stat) + " ";
}

893
/****** NCCL ERROR ******/
894
#if !defined(__APPLE__) && defined(PADDLE_WITH_NCCL)
S
sneaxiy 已提交
895 896 897 898
inline bool is_error(ncclResult_t nccl_result) {
  return nccl_result != ncclSuccess;
}

899 900
inline std::string build_nvidia_error_msg(ncclResult_t nccl_result) {
  std::string msg(" Nccl error, ");
L
lilong12 已提交
901 902 903 904 905 906 907 908 909 910 911 912
  if (errno == ENOSPC || errno == EAGAIN) {
    std::string detail(strerror(errno));
    detail += "\nPlease try one of the following solutions:";
    detail += "\n1. export NCCL_SHM_DISABLE=1;";
    detail += "\n2. export NCCL_P2P_LEVEL=SYS;";
    detail +=
        "\n3. Increase shared memory by setting the -shm-size "
        "option when starting docker container, e.g., setting "
        " -shm-size=2g.\n";
    return msg + platform::dynload::ncclGetErrorString(nccl_result) +
           ", detail: " + detail + " ";
  }
913
  return msg + platform::dynload::ncclGetErrorString(nccl_result) + " ";
914
}
915
#endif  // not(__APPLE__) and PADDLE_WITH_NCCL
916

917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932
namespace details {

template <typename T>
struct CudaStatusType {};

#define DEFINE_CUDA_STATUS_TYPE(type, success_value) \
  template <>                                        \
  struct CudaStatusType<type> {                      \
    using Type = type;                               \
    static constexpr Type kSuccess = success_value;  \
  }

DEFINE_CUDA_STATUS_TYPE(cudaError_t, cudaSuccess);
DEFINE_CUDA_STATUS_TYPE(curandStatus_t, CURAND_STATUS_SUCCESS);
DEFINE_CUDA_STATUS_TYPE(cudnnStatus_t, CUDNN_STATUS_SUCCESS);
DEFINE_CUDA_STATUS_TYPE(cublasStatus_t, CUBLAS_STATUS_SUCCESS);
G
Guo Sheng 已提交
933
DEFINE_CUDA_STATUS_TYPE(cusolverStatus_t, CUSOLVER_STATUS_SUCCESS);
934

935
#if !defined(__APPLE__) && defined(PADDLE_WITH_NCCL)
936 937 938 939
DEFINE_CUDA_STATUS_TYPE(ncclResult_t, ncclSuccess);
#endif

}  // namespace details
M
minqiyang 已提交
940

941 942 943 944 945 946 947 948 949 950 951 952
#define PADDLE_ENFORCE_CUDA_SUCCESS(COND)                        \
  do {                                                           \
    auto __cond__ = (COND);                                      \
    using __CUDA_STATUS_TYPE__ = decltype(__cond__);             \
    constexpr auto __success_type__ =                            \
        ::paddle::platform::details::CudaStatusType<             \
            __CUDA_STATUS_TYPE__>::kSuccess;                     \
    if (UNLIKELY(__cond__ != __success_type__)) {                \
      auto __summary__ = ::paddle::platform::errors::External(   \
          ::paddle::platform::build_nvidia_error_msg(__cond__)); \
      __THROW_ERROR_INTERNAL__(__summary__);                     \
    }                                                            \
953 954
  } while (0)

955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973
#define PADDLE_RETRY_CUDA_SUCCESS(COND)                                 \
  do {                                                                  \
    auto __cond__ = (COND);                                             \
    int retry_count = 1;                                                \
    using __CUDA_STATUS_TYPE__ = decltype(__cond__);                    \
    constexpr auto __success_type__ =                                   \
        ::paddle::platform::details::CudaStatusType<                    \
            __CUDA_STATUS_TYPE__>::kSuccess;                            \
    while (UNLIKELY(__cond__ != __success_type__) && retry_count < 5) { \
      __cond__ = (COND);                                                \
      ++retry_count;                                                    \
    }                                                                   \
    if (UNLIKELY(__cond__ != __success_type__)) {                       \
      auto __summary__ = ::paddle::platform::errors::External(          \
          ::paddle::platform::build_nvidia_error_msg(__cond__));        \
      __THROW_ERROR_INTERNAL__(__summary__);                            \
    }                                                                   \
  } while (0)

974
#undef DEFINE_CUDA_STATUS_TYPE
975
#endif  // PADDLE_WITH_CUDA
S
add EQ  
Superjom 已提交
976

977 978
}  // namespace platform
}  // namespace paddle