enforce.h 40.5 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

17 18 19 20
#ifdef __GNUC__
#include <cxxabi.h>  // for __cxa_demangle
#endif               // __GNUC__

21
#if !defined(_WIN32)
22
#include <dlfcn.h>   // dladdr
23
#include <unistd.h>  // sleep, usleep
24
#else                // _WIN32
25 26 27
#ifndef NOMINMAX
#define NOMINMAX  // msvc max/min macro conflict with std::min/max
#endif
28
#include <windows.h>  // GetModuleFileName, Sleep
29 30
#endif

31 32 33 34 35 36
#ifdef PADDLE_WITH_CUDA
#include <cublas_v2.h>
#include <cudnn.h>
#include <curand.h>
#include <thrust/system/cuda/error.h>
#include <thrust/system_error.h>
37

38
#include "paddle/fluid/platform/cuda_error.pb.h"
39 40
#endif  // PADDLE_WITH_CUDA

41
#include <fstream>
Y
Yu Yang 已提交
42
#include <iomanip>
L
liaogang 已提交
43
#include <memory>
44 45 46
#include <sstream>
#include <stdexcept>
#include <string>
S
sneaxiy 已提交
47 48
#include <type_traits>
#include <utility>
49

chen.zhiyu's avatar
chen.zhiyu 已提交
50 51 52 53
#if !defined(_WIN32) && !defined(PADDLE_WITH_MUSL)
#include <execinfo.h>
#endif

54
#define GLOG_NO_ABBREVIATED_SEVERITIES  // msvc conflict logging with windows.h
55
#include "glog/logging.h"
56
#include "paddle/fluid/platform/errors.h"
Y
Yi Wang 已提交
57
#include "paddle/fluid/platform/macros.h"
D
dzhwinter 已提交
58
#include "paddle/fluid/platform/port.h"
59
#include "paddle/fluid/platform/variant.h"
60 61
#include "paddle/fluid/string/printf.h"
#include "paddle/fluid/string/to_string.h"
62

63
#ifdef PADDLE_WITH_CUDA
Y
Yi Wang 已提交
64 65 66
#include "paddle/fluid/platform/dynload/cublas.h"
#include "paddle/fluid/platform/dynload/cudnn.h"
#include "paddle/fluid/platform/dynload/curand.h"
G
Guo Sheng 已提交
67
#include "paddle/fluid/platform/dynload/cusolver.h"
68
#if !defined(__APPLE__) && defined(PADDLE_WITH_NCCL)
L
lilong12 已提交
69
#include <error.h>
Y
Yi Wang 已提交
70
#include "paddle/fluid/platform/dynload/nccl.h"
Y
Yi Wang 已提交
71 72
#endif  // __APPLE__
#endif  // PADDLE_WITH_CUDA
73

74 75 76
// Note: these headers for simplify demangle type string
#include "paddle/fluid/framework/type_defs.h"
#include "paddle/fluid/imperative/type_defs.h"
W
wanghuancoder 已提交
77 78 79 80 81 82

namespace paddle {
namespace platform {
class ErrorSummary;
}  // namespace platform
}  // namespace paddle
83

84 85 86
#ifdef PADDLE_WITH_CUDA
DECLARE_int64(gpu_allocator_retry_time);
#endif
87 88
DECLARE_int32(call_stack_level);

89 90 91
namespace paddle {
namespace platform {

92 93
/** HELPER MACROS AND FUNCTIONS **/

Z
Zeng Jinle 已提交
94 95 96 97
#ifndef PADDLE_MAY_THROW
#define PADDLE_MAY_THROW noexcept(false)
#endif

98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
// Because most enforce conditions would evaluate to true, we can use
// __builtin_expect to instruct the C++ compiler to generate code that
// always forces branch prediction of true.
// This generates faster binary code. __builtin_expect is since C++11.
// For more details, please check https://stackoverflow.com/a/43870188/724872.
#if !defined(_WIN32)
#define UNLIKELY(condition) __builtin_expect(static_cast<bool>(condition), 0)
#else
// there is no equivalent intrinsics in msvc.
#define UNLIKELY(condition) (condition)
#endif

#if !defined(_WIN32)
#define LIKELY(condition) __builtin_expect(static_cast<bool>(condition), 1)
#else
// there is no equivalent intrinsics in msvc.
#define LIKELY(condition) (condition)
#endif

117 118 119 120 121 122 123 124 125 126 127 128 129
#if defined _WIN32 && defined PADDLE_ON_INFERENCE && defined PADDLE_NO_PYTHON
#define HANDLE_THE_ERROR try {
#define END_HANDLE_THE_ERROR            \
  }                                     \
  catch (const std::exception& e) {     \
    std::cout << e.what() << std::endl; \
    throw;                              \
  }
#else
#define HANDLE_THE_ERROR
#define END_HANDLE_THE_ERROR
#endif

L
liaogang 已提交
130 131 132 133 134 135 136 137 138 139 140
#ifdef __GNUC__
inline std::string demangle(std::string name) {
  int status = -4;  // some arbitrary value to eliminate the compiler warning
  std::unique_ptr<char, void (*)(void*)> res{
      abi::__cxa_demangle(name.c_str(), NULL, NULL, &status), std::free};
  return (status == 0) ? res.get() : name;
}
#else
inline std::string demangle(std::string name) { return name; }
#endif

141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
namespace details {
template <typename T>
inline constexpr bool IsArithmetic() {
  return std::is_arithmetic<T>::value;
}

template <typename T1, typename T2, bool kIsArithmetic /* = true */>
struct TypeConverterImpl {
  using Type1 = typename std::common_type<T1, T2>::type;
  using Type2 = Type1;
};

template <typename T1, typename T2>
struct TypeConverterImpl<T1, T2, false> {
  using Type1 = T1;
  using Type2 = T2;
};

template <typename T1, typename T2>
struct TypeConverter {
 private:
  static constexpr bool kIsArithmetic =
      IsArithmetic<T1>() && IsArithmetic<T2>();

 public:
  using Type1 = typename TypeConverterImpl<T1, T2, kIsArithmetic>::Type1;
  using Type2 = typename TypeConverterImpl<T1, T2, kIsArithmetic>::Type2;
};

template <typename T1, typename T2>
using CommonType1 = typename std::add_lvalue_reference<
    typename std::add_const<typename TypeConverter<T1, T2>::Type1>::type>::type;

template <typename T1, typename T2>
using CommonType2 = typename std::add_lvalue_reference<
    typename std::add_const<typename TypeConverter<T1, T2>::Type2>::type>::type;

// Here, we use SFINAE to check whether T can be converted to std::string
template <typename T>
struct CanToString {
 private:
  using YesType = uint8_t;
  using NoType = uint16_t;

  template <typename U>
  static YesType Check(decltype(std::cout << std::declval<U>())) {
    return 0;
  }

  template <typename U>
  static NoType Check(...) {
    return 0;
  }

 public:
  static constexpr bool kValue =
      std::is_same<YesType, decltype(Check<T>(std::cout))>::value;
};

template <bool kCanToString /* = true */>
struct BinaryCompareMessageConverter {
  template <typename T>
  static std::string Convert(const char* expression, const T& value) {
    return expression + std::string(":") + string::to_string(value);
  }
};

template <>
struct BinaryCompareMessageConverter<false> {
  template <typename T>
  static const char* Convert(const char* expression, const T& value) {
    return expression;
  }
};
}  // namespace details

217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
template <typename T>
inline std::string ReplaceComplexTypeStr(std::string str,
                                         const std::string& type_name) {
  auto demangle_type_str = demangle(typeid(T).name());
  size_t start_pos = 0;
  while ((start_pos = str.find(demangle_type_str, start_pos)) !=
         std::string::npos) {
    str.replace(start_pos, demangle_type_str.length(), type_name);
    start_pos += type_name.length();
  }
  return str;
}

#define __REPLACE_COMPLEX_TYPE_STR__(__TYPENAME, __STR)                       \
  do {                                                                        \
    __STR = paddle::platform::ReplaceComplexTypeStr<__TYPENAME>(__STR,        \
                                                                #__TYPENAME); \
  } while (0)

inline std::string SimplifyDemangleStr(std::string str) {
  // the older is important, you have to put complex types in front
  __REPLACE_COMPLEX_TYPE_STR__(paddle::framework::AttributeMap, str);
  __REPLACE_COMPLEX_TYPE_STR__(paddle::framework::Attribute, str);
  __REPLACE_COMPLEX_TYPE_STR__(paddle::imperative::NameVariableWrapperMap, str);
  __REPLACE_COMPLEX_TYPE_STR__(paddle::imperative::NameVarBaseMap, str);
  __REPLACE_COMPLEX_TYPE_STR__(std::string, str);
  return str;
}

246
inline std::string GetCurrentTraceBackString() {
247 248
  std::ostringstream sout;

249 250 251
  sout << "\n\n--------------------------------------\n";
  sout << "C++ Traceback (most recent call last):";
  sout << "\n--------------------------------------\n";
chen.zhiyu's avatar
chen.zhiyu 已提交
252 253 254
#if !defined(_WIN32) && !defined(PADDLE_WITH_MUSL)
  static constexpr int TRACE_STACK_LIMIT = 100;

255 256 257 258
  void* call_stack[TRACE_STACK_LIMIT];
  auto size = backtrace(call_stack, TRACE_STACK_LIMIT);
  auto symbols = backtrace_symbols(call_stack, size);
  Dl_info info;
259
  int idx = 0;
260
  for (int i = size - 1; i >= 0; --i) {
261 262
    if (dladdr(call_stack[i], &info) && info.dli_sname) {
      auto demangled = demangle(info.dli_sname);
263 264 265
      std::string path(info.dli_fname);
      // C++ traceback info are from core.so
      if (path.substr(path.length() - 3).compare(".so") == 0) {
266 267
        sout << string::Sprintf("%-3d %s\n", idx++,
                                SimplifyDemangleStr(demangled));
268
      }
269 270 271 272
    }
  }
  free(symbols);
#else
chen.zhiyu's avatar
chen.zhiyu 已提交
273
  sout << "Not support stack backtrace yet.\n";
274
#endif
275 276 277 278 279 280 281
  return sout.str();
}

template <typename StrType>
inline std::string GetErrorSumaryString(StrType&& what, const char* file,
                                        int line) {
  std::ostringstream sout;
282 283 284 285
  if (FLAGS_call_stack_level > 1) {
    sout << "\n----------------------\nError Message "
            "Summary:\n----------------------\n";
  }
286
  sout << string::Sprintf("%s (at %s:%d)", std::forward<StrType>(what), file,
287 288
                          line)
       << std::endl;
289 290 291
  return sout.str();
}

292 293 294 295 296 297 298 299 300 301 302
template <typename StrType>
inline std::string GetTraceBackString(StrType&& what, const char* file,
                                      int line) {
  if (FLAGS_call_stack_level > 1) {
    // FLAGS_call_stack_level>1 means showing c++ call stack
    return GetCurrentTraceBackString() + GetErrorSumaryString(what, file, line);
  } else {
    return GetErrorSumaryString(what, file, line);
  }
}

303 304 305 306 307 308 309 310 311 312 313
inline std::string SimplifyErrorTypeFormat(const std::string& str) {
  std::ostringstream sout;
  size_t type_end_pos = str.find(":", 0);
  if (type_end_pos == std::string::npos) {
    sout << str;
  } else {
    // Remove "Error:", add "()""
    sout << "(" << str.substr(0, type_end_pos - 5) << ")"
         << str.substr(type_end_pos + 1);
  }
  return sout.str();
314 315
}

316 317
inline bool is_error(bool stat) { return !stat; }

318
// Note: This Macro can only be used within enforce.h
319 320 321 322 323 324
#define __THROW_ERROR_INTERNAL__(__ERROR_SUMMARY)                      \
  do {                                                                 \
    HANDLE_THE_ERROR                                                   \
    throw ::paddle::platform::EnforceNotMet(__ERROR_SUMMARY, __FILE__, \
                                            __LINE__);                 \
    END_HANDLE_THE_ERROR                                               \
325 326
  } while (0)

327 328
/** ENFORCE EXCEPTION AND MACROS **/

329
struct EnforceNotMet : public std::exception {
330
 public:
331
  EnforceNotMet(std::exception_ptr e, const char* file, int line) {
332
    try {
Y
Yu Yang 已提交
333
      std::rethrow_exception(e);
334 335 336 337
    } catch (platform::EnforceNotMet& e) {
      code_ = e.code();
      err_str_ = GetTraceBackString(e.what(), file, line);
      simple_err_str_ = SimplifyErrorTypeFormat(err_str_);
Y
Yu Yang 已提交
338
    } catch (std::exception& e) {
339
      err_str_ = GetTraceBackString(e.what(), file, line);
340
      simple_err_str_ = SimplifyErrorTypeFormat(err_str_);
Y
Yu Yang 已提交
341 342
    }
  }
343

344
  EnforceNotMet(const std::string& str, const char* file, int line)
345 346 347
      : err_str_(GetTraceBackString(str, file, line)) {
    simple_err_str_ = SimplifyErrorTypeFormat(err_str_);
  }
Y
Yu Yang 已提交
348

349 350 351 352 353
  EnforceNotMet(const ErrorSummary& error, const char* file, int line)
      : code_(error.code()),
        err_str_(GetTraceBackString(error.to_string(), file, line)) {
    simple_err_str_ = SimplifyErrorTypeFormat(err_str_);
  }
354

355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
  const char* what() const noexcept override {
    if (FLAGS_call_stack_level > 1) {
      return err_str_.c_str();
    } else {
      return simple_err_str_.c_str();
    }
  }

  error::Code code() const { return code_; }

  const std::string& error_str() const { return err_str_; }

  const std::string& simple_error_str() const { return simple_err_str_; }

  void set_error_str(std::string str) {
    if (FLAGS_call_stack_level > 1) {
      err_str_ = str;
    } else {
      simple_err_str_ = str;
    }
  }
376

377 378 379 380 381
 private:
  // Used to determine the final type of exception thrown
  error::Code code_ = error::LEGACY;
  // Complete error message
  // e.g. InvalidArgumentError: ***
382
  std::string err_str_;
383 384 385
  // Simple errror message used when no C++ stack and python compile stack
  // e.g. (InvalidArgument) ***
  std::string simple_err_str_;
386 387
};

388 389
#define PADDLE_THROW(...)                                                   \
  do {                                                                      \
390
    HANDLE_THE_ERROR                                                        \
391 392
    throw ::paddle::platform::EnforceNotMet(                                \
        ::paddle::platform::ErrorSummary(__VA_ARGS__), __FILE__, __LINE__); \
393
    END_HANDLE_THE_ERROR                                                    \
394 395
  } while (0)

396 397 398 399
#if defined(__CUDA_ARCH__)
// For cuda, the assertions can affect performance and it is therefore
// recommended to disable them in production code
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#assertion
400 401 402 403 404 405 406
#define PADDLE_ENFORCE(_IS_NOT_ERROR, __FORMAT, ...)                         \
  do {                                                                       \
    if (!(_IS_NOT_ERROR)) {                                                  \
      printf("Error: %s:%d Assertion `%s` failed. " __FORMAT "\n", __FILE__, \
             __LINE__, #_IS_NOT_ERROR, ##__VA_ARGS__);                       \
      asm("trap;");                                                          \
    }                                                                        \
407 408
  } while (0)
#else
409 410 411 412 413 414
#define PADDLE_ENFORCE(COND, ...)                                              \
  do {                                                                         \
    auto __cond__ = (COND);                                                    \
    if (UNLIKELY(::paddle::platform::is_error(__cond__))) {                    \
      __THROW_ERROR_INTERNAL__(::paddle::platform::ErrorSummary(__VA_ARGS__)); \
    }                                                                          \
415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
  } while (0)
#endif

/*
 * Some enforce helpers here, usage:
 *    int a = 1;
 *    int b = 2;
 *    PADDLE_ENFORCE_EQ(a, b);
 *
 *    will raise an expression described as follows:
 *    "Expected input a == b, but received a(1) != b(2)."
 *      with detailed stack information.
 *
 *    extra messages is also supported, for example:
 *    PADDLE_ENFORCE(a, b, "some simple enforce failed between %d numbers", 2)
 */
431

432 433 434 435 436 437 438 439 440 441
#define PADDLE_ENFORCE_NOT_NULL(__VAL, ...)                                   \
  do {                                                                        \
    if (UNLIKELY(nullptr == (__VAL))) {                                       \
      auto __summary__ = ::paddle::platform::ErrorSummary(__VA_ARGS__);       \
      auto __message__ = ::paddle::string::Sprintf(                           \
          "%s\n  [Hint: " #__VAL " should not be null.]",                     \
          __summary__.error_message());                                       \
      __THROW_ERROR_INTERNAL__(                                               \
          ::paddle::platform::ErrorSummary(__summary__.code(), __message__)); \
    }                                                                         \
442 443
  } while (0)

444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
#define __PADDLE_BINARY_COMPARE(__VAL1, __VAL2, __CMP, __INV_CMP, ...)        \
  do {                                                                        \
    auto __val1 = (__VAL1);                                                   \
    auto __val2 = (__VAL2);                                                   \
    using __TYPE1__ = decltype(__val1);                                       \
    using __TYPE2__ = decltype(__val2);                                       \
    using __COMMON_TYPE1__ =                                                  \
        ::paddle::platform::details::CommonType1<__TYPE1__, __TYPE2__>;       \
    using __COMMON_TYPE2__ =                                                  \
        ::paddle::platform::details::CommonType2<__TYPE1__, __TYPE2__>;       \
    bool __is_not_error = (static_cast<__COMMON_TYPE1__>(__val1))__CMP(       \
        static_cast<__COMMON_TYPE2__>(__val2));                               \
    if (UNLIKELY(!__is_not_error)) {                                          \
      auto __summary__ = ::paddle::platform::ErrorSummary(__VA_ARGS__);       \
      constexpr bool __kCanToString__ =                                       \
          ::paddle::platform::details::CanToString<__TYPE1__>::kValue &&      \
          ::paddle::platform::details::CanToString<__TYPE2__>::kValue;        \
      auto __message__ = ::paddle::string::Sprintf(                           \
          "%s\n  [Hint: Expected %s " #__CMP                                  \
          " %s, but received %s " #__INV_CMP " %s.]",                         \
          __summary__.error_message(), #__VAL1, #__VAL2,                      \
          ::paddle::platform::details::BinaryCompareMessageConverter<         \
              __kCanToString__>::Convert(#__VAL1, __val1),                    \
          ::paddle::platform::details::BinaryCompareMessageConverter<         \
              __kCanToString__>::Convert(#__VAL2, __val2));                   \
      __THROW_ERROR_INTERNAL__(                                               \
          ::paddle::platform::ErrorSummary(__summary__.code(), __message__)); \
    }                                                                         \
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
  } while (0)

#define PADDLE_ENFORCE_EQ(__VAL0, __VAL1, ...) \
  __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, ==, !=, __VA_ARGS__)
#define PADDLE_ENFORCE_NE(__VAL0, __VAL1, ...) \
  __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, !=, ==, __VA_ARGS__)
#define PADDLE_ENFORCE_GT(__VAL0, __VAL1, ...) \
  __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, >, <=, __VA_ARGS__)
#define PADDLE_ENFORCE_GE(__VAL0, __VAL1, ...) \
  __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, >=, <, __VA_ARGS__)
#define PADDLE_ENFORCE_LT(__VAL0, __VAL1, ...) \
  __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, <, >=, __VA_ARGS__)
#define PADDLE_ENFORCE_LE(__VAL0, __VAL1, ...) \
  __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, <=, >, __VA_ARGS__)

487 488
/** EXTENDED TOOL FUNCTIONS WITH CHECKING **/

489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510
/*
 * Summary: This macro is used to get Variable or internal type
 *   data (such as LoDTensor or SelectedRows) of the Input and
 *   Output in op, generally used when call scope.FindVar(Input/
 *   Output("Name")) or ctx.Input<LoDTensor>().
 *   Firstly this macro check whether the obtained pointer is null,
 *   and then return data if it is not null.
 *
 * Note: This macro is only suitable for specific scenarios and
 *   does not intended to be widely used. If it cannot meet the
 *   requirements, please use other PADDLE_ENFORCE** check macro.
 *
 * Parameters:
 *     __PTR: pointer
 *     __ROLE: (string), Input or Output
 *     __NAME: (string), Input or Output name
 *     __OP_TYPE: (string), the op type
 *  
 * Return: The data pointed to by the pointer.
 *
 * Examples:
 *    GET_DATA_SAFELY(ctx.Input<LoDTensor>("X"), "Input", "X", "Mul");
511
 */
512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533
#define GET_DATA_SAFELY(__PTR, __ROLE, __NAME, __OP_TYPE)                     \
  (([&]() -> std::add_lvalue_reference<decltype(*(__PTR))>::type {            \
    auto* __ptr = (__PTR);                                                    \
    if (UNLIKELY(nullptr == __ptr)) {                                         \
      auto __summary__ = paddle::platform::errors::NotFound(                  \
          "Unable to get %s data of %s %s in operator %s. "                   \
          "Possible reasons are:\n"                                           \
          "  1. The %s is not the %s of operator %s;\n"                       \
          "  2. The %s has no corresponding variable passed in;\n"            \
          "  3. The %s corresponding variable is not initialized.",           \
          paddle::platform::demangle(                                         \
              typeid(std::add_lvalue_reference<decltype(*__ptr)>::type)       \
                  .name()),                                                   \
          __ROLE, __NAME, __OP_TYPE, __NAME, __ROLE, __OP_TYPE, __NAME,       \
          __NAME);                                                            \
      auto __message__ = ::paddle::string::Sprintf(                           \
          "%s\n  [Hint: pointer " #__PTR " should not be null.]",             \
          __summary__.error_message());                                       \
      __THROW_ERROR_INTERNAL__(                                               \
          ::paddle::platform::ErrorSummary(__summary__.code(), __message__)); \
    }                                                                         \
    return *__ptr;                                                            \
534 535
  })())

536 537 538 539 540 541 542 543 544 545 546 547 548
/*
 * Summary: This macro is used to check whether op has specified
 * Input or Output Variables. Because op's Input and Output
 * checking are written similarly, so abstract this macro.
 *
 * Parameters:
 *     __EXPR: (bool), the bool expression
 *     __ROLE: (string), Input or Output
 *     __NAME: (string), Input or Output name
 *     __OP_TYPE: (string), the op type
 *
 * Examples:
 *    OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Mul");
549
 */
550 551 552 553 554 555 556
#define OP_INOUT_CHECK(__EXPR, __ROLE, __NAME, __OP_TYPE)                   \
  do {                                                                      \
    PADDLE_ENFORCE_EQ(__EXPR, true, paddle::platform::errors::NotFound(     \
                                        "No %s(%s) found for %s operator.", \
                                        __ROLE, __NAME, __OP_TYPE));        \
  } while (0)

557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576
/*
 * Summary: This BOOST_GET(_**) series macros are used to call boost::get
 *   safely. boost::get is not a completely safe api, although it will not
 *   go wrong in most cases, but in extreme cases, it may fail and directly
 *   throw a boost::bad_get exception, without any stack information.
 *   This kind of problems is difficult to debug, so add these macros to
 *   enrich boost::get error information. At the same time, we restrict
 *   the direct use of boost::get by CI rule.
 *
 * Parameters:
 *     __TYPE: the target variable type
 *     __VALUE: the target variable to get
 *
 * Examples:
 *     - unsafe writing: int x = boost::get<int>(y);
 *     - safe writing: int x = BOOST_GET(int, y);
 *
 * Note: GCC 4.8 cannot select right overloaded function here, so need
 *    to define different functions and macros here, after we upgreade
 *    CI gcc version, we can only define one BOOST_GET macro.
577
 */
578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620
namespace details {

#define DEFINE_SAFE_BOOST_GET(__InputType, __OutputType, __OutputTypePtr,      \
                              __FuncName)                                      \
  template <typename OutputType, typename InputType>                           \
  auto __FuncName(__InputType input, const char* expression, const char* file, \
                  int line)                                                    \
      ->typename std::conditional<std::is_pointer<InputType>::value,           \
                                  __OutputTypePtr, __OutputType>::type {       \
    try {                                                                      \
      return boost::get<OutputType>(input);                                    \
    } catch (boost::bad_get&) {                                                \
      HANDLE_THE_ERROR                                                         \
      throw ::paddle::platform::EnforceNotMet(                                 \
          ::paddle::platform::errors::InvalidArgument(                         \
              "boost::get failed, cannot get value "                           \
              "(%s) by type %s, its type is %s.",                              \
              expression,                                                      \
              paddle::platform::demangle(typeid(OutputType).name()),           \
              paddle::platform::demangle(input.type().name())),                \
          file, line);                                                         \
      END_HANDLE_THE_ERROR                                                     \
    }                                                                          \
  }

DEFINE_SAFE_BOOST_GET(InputType&, OutputType&, OutputType*, SafeBoostGet);
DEFINE_SAFE_BOOST_GET(const InputType&, const OutputType&, const OutputType*,
                      SafeBoostGetConst);
DEFINE_SAFE_BOOST_GET(InputType&&, OutputType, OutputType*,
                      SafeBoostGetMutable);

}  // namespace details

#define BOOST_GET(__TYPE, __VALUE)                                     \
  ::paddle::platform::details::SafeBoostGet<__TYPE>(__VALUE, #__VALUE, \
                                                    __FILE__, __LINE__)
#define BOOST_GET_CONST(__TYPE, __VALUE)                                    \
  ::paddle::platform::details::SafeBoostGetConst<__TYPE>(__VALUE, #__VALUE, \
                                                         __FILE__, __LINE__)
#define BOOST_GET_MUTABLE(__TYPE, __VALUE)                                    \
  ::paddle::platform::details::SafeBoostGetMutable<__TYPE>(__VALUE, #__VALUE, \
                                                           __FILE__, __LINE__)

621 622
/** OTHER EXCEPTION AND ENFORCE **/

623 624
struct EOFException : public std::exception {
  std::string err_str_;
625 626
  EOFException(const char* err_msg, const char* file, int line) {
    err_str_ = string::Sprintf("%s at [%s:%d]", err_msg, file, line);
627 628
  }

629
  const char* what() const noexcept override { return err_str_.c_str(); }
630 631
};

632 633
#define PADDLE_THROW_EOF()                                                     \
  do {                                                                         \
634
    HANDLE_THE_ERROR                                                           \
635 636
    throw ::paddle::platform::EOFException("There is no next data.", __FILE__, \
                                           __LINE__);                          \
637
    END_HANDLE_THE_ERROR                                                       \
638
  } while (0)
639

640 641 642 643 644 645 646
#define PADDLE_THROW_BAD_ALLOC(...)                                          \
  do {                                                                       \
    HANDLE_THE_ERROR                                                         \
    throw ::paddle::memory::allocation::BadAlloc(                            \
        ::paddle::platform::ErrorSummary(__VA_ARGS__).to_string(), __FILE__, \
        __LINE__);                                                           \
    END_HANDLE_THE_ERROR                                                     \
647
  } while (0)
M
minqiyang 已提交
648

649
/** CUDA PADDLE ENFORCE FUNCTIONS AND MACROS **/
650
#ifdef PADDLE_WITH_CUDA
651

652
/***** CUDA ERROR *****/
S
sneaxiy 已提交
653
inline bool is_error(cudaError_t e) { return e != cudaSuccess; }
M
minqiyang 已提交
654

655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740
inline std::string GetCudaErrorWebsite(int32_t cuda_version) {
  std::ostringstream webstr;
  webstr << "https://docs.nvidia.com/cuda/";
  if (cuda_version != -1) {
    double version = cuda_version / 10;
    webstr << "archive/" << std::fixed << std::setprecision(1) << version;
  }
  webstr << "/cuda-runtime-api/group__CUDART__TYPES.html"
            "#group__CUDART__TYPES_1g3f51e3575c2178246db0a94a430e0038";
  return webstr.str();
}

inline std::string build_nvidia_error_msg(cudaError_t e) {
#if CUDA_VERSION >= 10000 && CUDA_VERSION < 11000
  int32_t cuda_version = 100;
#elif CUDA_VERSION >= 9000
  int32_t cuda_version = 90;
#else
  int32_t cuda_version = -1;
#endif
  std::ostringstream sout;
  sout << " Cuda error(" << e << "), " << cudaGetErrorString(e) << ".";
  static platform::proto::cudaerrorDesc cudaerror;
  static bool _initSucceed = false;
  if (cudaerror.ByteSizeLong() == 0) {
    std::string filePath;
#if !defined(_WIN32)
    Dl_info info;
    if (dladdr(reinterpret_cast<void*>(GetCudaErrorWebsite), &info)) {
      std::string strModule(info.dli_fname);
      const size_t last_slash_idx = strModule.find_last_of("/");
      std::string compare_path = strModule.substr(strModule.length() - 6);
      if (std::string::npos != last_slash_idx) {
        strModule.erase(last_slash_idx, std::string::npos);
      }
      if (compare_path.compare("avx.so") == 0) {
        filePath = strModule +
                   "/../include/third_party/cudaerror/data/cudaErrorMessage.pb";
      } else {
        filePath =
            strModule + "/../../thirl_party/cudaerror/data/cudaErrorMessage.pb";
      }
    }
#else
    char buf[100];
    MEMORY_BASIC_INFORMATION mbi;
    HMODULE h_module =
        (::VirtualQuery(GetCudaErrorWebsite, &mbi, sizeof(mbi)) != 0)
            ? (HMODULE)mbi.AllocationBase
            : NULL;
    GetModuleFileName(h_module, buf, 100);
    std::string strModule(buf);
    const size_t last_slash_idx = strModule.find_last_of("\\");
    std::string compare_path = strModule.substr(strModule.length() - 7);
    if (std::string::npos != last_slash_idx) {
      strModule.erase(last_slash_idx, std::string::npos);
    }
    if (compare_path.compare("avx.pyd") == 0) {
      filePath =
          strModule +
          "\\..\\include\\third_party\\cudaerror\\data\\cudaErrorMessage.pb";
    } else {
      filePath =
          strModule + "\\..\\third_party\\cudaerror\\data\\cudaErrorMessage.pb";
    }
#endif
    std::ifstream fin(filePath, std::ios::in | std::ios::binary);
    _initSucceed = cudaerror.ParseFromIstream(&fin);
  }
  if (_initSucceed) {
    for (int i = 0; i < cudaerror.allmessages_size(); ++i) {
      if (cuda_version == cudaerror.allmessages(i).version()) {
        for (int j = 0; j < cudaerror.allmessages(i).messages_size(); ++j) {
          if (e == cudaerror.allmessages(i).messages(j).errorcode()) {
            sout << "\n  [Advise: "
                 << cudaerror.allmessages(i).messages(j).errormessage() << "]";
            return sout.str();
          }
        }
      }
    }
  }
  sout << "\n  [Advise: Please search for the error code(" << e
       << ") on website( " << GetCudaErrorWebsite(cuda_version)
       << " ) to get Nvidia's official solution about CUDA Error.]";
  return sout.str();
741 742
}

743
/** curand ERROR **/
M
minqiyang 已提交
744 745
inline bool is_error(curandStatus_t stat) {
  return stat != CURAND_STATUS_SUCCESS;
746 747
}

748 749 750
inline const char* curandGetErrorString(curandStatus_t stat) {
  switch (stat) {
    case CURAND_STATUS_SUCCESS:
751
      return "`CURAND_STATUS_SUCCESS`. No errors.";
752
    case CURAND_STATUS_VERSION_MISMATCH:
753 754
      return "`CURAND_STATUS_VERSION_MISMATCH`. Header file and linked library "
             "version do not match.";
755
    case CURAND_STATUS_NOT_INITIALIZED:
756
      return "`CURAND_STATUS_NOT_INITIALIZED`. Generator not initialized.";
757
    case CURAND_STATUS_ALLOCATION_FAILED:
758
      return "`CURAND_STATUS_ALLOCATION_FAILED`. Memory allocation failed.";
759
    case CURAND_STATUS_TYPE_ERROR:
760
      return "`CURAND_STATUS_TYPE_ERROR`. Generator is wrong type.";
761
    case CURAND_STATUS_OUT_OF_RANGE:
762
      return "`CURAND_STATUS_OUT_OF_RANGE`. Argument out of range.";
763
    case CURAND_STATUS_LENGTH_NOT_MULTIPLE:
764 765
      return "`CURAND_STATUS_LENGTH_NOT_MULTIPLE`. Length requested is not a "
             "multple of dimension.";
766
    case CURAND_STATUS_DOUBLE_PRECISION_REQUIRED:
767 768
      return "`CURAND_STATUS_DOUBLE_PRECISION_REQUIRED`. GPU does not have "
             "double precision required by MRG32k3a.";
769
    case CURAND_STATUS_LAUNCH_FAILURE:
770
      return "`CURAND_STATUS_LAUNCH_FAILURE`. Kernel launch failure.";
771
    case CURAND_STATUS_PREEXISTING_FAILURE:
772 773
      return "`CURAND_STATUS_PREEXISTING_FAILURE`. Preexisting failure on "
             "library entry.";
774
    case CURAND_STATUS_INITIALIZATION_FAILED:
775 776
      return "`CURAND_STATUS_INITIALIZATION_FAILED`. Initialization of CUDA "
             "failed.";
777
    case CURAND_STATUS_ARCH_MISMATCH:
778 779
      return "`CURAND_STATUS_ARCH_MISMATCH`. Architecture mismatch, GPU does "
             "not support requested feature.";
780
    case CURAND_STATUS_INTERNAL_ERROR:
781
      return "`CURAND_STATUS_INTERNAL_ERROR`. Internal library error.";
782 783 784 785 786 787 788 789
    default:
      return "Unknown curand status";
  }
}

inline std::string build_nvidia_error_msg(curandStatus_t stat) {
  std::string msg(" Curand error, ");
  return msg + curandGetErrorString(stat) + " ";
790 791
}

792
/***** CUDNN ERROR *****/
M
minqiyang 已提交
793 794
inline bool is_error(cudnnStatus_t stat) {
  return stat != CUDNN_STATUS_SUCCESS;
795 796
}

797 798 799
inline std::string build_nvidia_error_msg(cudnnStatus_t stat) {
  std::string msg(" Cudnn error, ");
  return msg + platform::dynload::cudnnGetErrorString(stat) + " ";
800 801
}

802
/***** CUBLAS ERROR *****/
M
minqiyang 已提交
803 804
inline bool is_error(cublasStatus_t stat) {
  return stat != CUBLAS_STATUS_SUCCESS;
805 806
}

807 808 809
inline const char* cublasGetErrorString(cublasStatus_t stat) {
  switch (stat) {
    case CUBLAS_STATUS_NOT_INITIALIZED:
810 811
      return "`CUBLAS_STATUS_NOT_INITIALIZED`. The cuBLAS library was not "
             "initialized.";
812
    case CUBLAS_STATUS_ALLOC_FAILED:
813 814
      return "`CUBLAS_STATUS_ALLOC_FAILED`. Resource allocation failed inside "
             "the cuBLAS library.";
815
    case CUBLAS_STATUS_INVALID_VALUE:
816 817 818
      return "`CUBLAS_STATUS_INVALID_VALUE`. An unsupported value or parameter "
             "was passed to the function (a negative vector size, for "
             "example).";
819
    case CUBLAS_STATUS_ARCH_MISMATCH:
820 821 822
      return "`CUBLAS_STATUS_ARCH_MISMATCH`. The function requires a feature "
             "absent from the device architecture; usually caused by the lack "
             "of support for double precision.";
823
    case CUBLAS_STATUS_MAPPING_ERROR:
824 825
      return "`CUBLAS_STATUS_MAPPING_ERROR`. An access to GPU memory space "
             "failed, which is usually caused by a failure to bind a texture.";
826
    case CUBLAS_STATUS_EXECUTION_FAILED:
827 828 829
      return "`CUBLAS_STATUS_EXECUTION_FAILED`. The GPU program failed to "
             "execute. This is often caused by a launch failure of the kernel "
             "on the GPU, which can be caused by multiple reasons.";
830
    case CUBLAS_STATUS_INTERNAL_ERROR:
831 832 833
      return "`CUBLAS_STATUS_INTERNAL_ERROR`. An internal cuBLAS operation "
             "failed. This error is usually caused by a cudaMemcpyAsync() "
             "failure.";
834
    case CUBLAS_STATUS_NOT_SUPPORTED:
835 836
      return "`CUBLAS_STATUS_NOT_SUPPORTED`. The functionality requested is "
             "not supported.";
837
    case CUBLAS_STATUS_LICENSE_ERROR:
838 839 840
      return "`CUBLAS_STATUS_LICENSE_ERROR`. The functionality requested "
             "requires some license and an error was detected when trying to "
             "check the current licensing.";
841 842
    default:
      return "Unknown cublas status";
843
  }
844 845 846 847 848
}

inline std::string build_nvidia_error_msg(cublasStatus_t stat) {
  std::string msg(" Cublas error, ");
  return msg + cublasGetErrorString(stat) + " ";
849 850
}

G
Guo Sheng 已提交
851 852 853 854 855 856 857 858
/***** CUSOLVER ERROR *****/
inline bool is_error(cusolverStatus_t stat) {
  return stat != CUSOLVER_STATUS_SUCCESS;
}

inline const char* cusolverGetErrorString(cusolverStatus_t stat) {
  switch (stat) {
    case CUSOLVER_STATUS_NOT_INITIALIZED:
859 860 861 862
      return "`CUSOLVER_STATUS_NOT_INITIALIZED`. The cuSolver library was not "
             "initialized. This is usually caused by the lack of a prior call, "
             "an error in the CUDA Runtime API called by the cuSolver routine, "
             "or an error in the hardware setup.";
G
Guo Sheng 已提交
863
    case CUSOLVER_STATUS_ALLOC_FAILED:
864 865 866
      return "`CUSOLVER_STATUS_ALLOC_FAILED`. Resource allocation failed "
             "inside the cuSolver library. This is usually caused by a "
             "cudaMalloc() failure.";
G
Guo Sheng 已提交
867
    case CUSOLVER_STATUS_INVALID_VALUE:
868 869 870
      return "`CUSOLVER_STATUS_INVALID_VALUE`. An unsupported value or "
             "parameter was passed to the function (a negative vector size, "
             "for example).";
G
Guo Sheng 已提交
871
    case CUSOLVER_STATUS_ARCH_MISMATCH:
872 873 874
      return "`CUSOLVER_STATUS_ARCH_MISMATCH`. The function requires a feature "
             "absent from the device architecture; usually caused by the lack "
             "of support for atomic operations or double precision.";
G
Guo Sheng 已提交
875
    case CUSOLVER_STATUS_EXECUTION_FAILED:
876 877 878
      return "`CUSOLVER_STATUS_EXECUTION_FAILED`. The GPU program failed to "
             "execute. This is often caused by a launch failure of the kernel "
             "on the GPU, which can be caused by multiple reasons.";
G
Guo Sheng 已提交
879
    case CUSOLVER_STATUS_INTERNAL_ERROR:
880 881 882
      return "`CUSOLVER_STATUS_INTERNAL_ERROR`. An internal cuSolver operation "
             "failed. This error is usually caused by a cudaMemcpyAsync() "
             "failure.";
G
Guo Sheng 已提交
883
    case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
884 885 886
      return "`CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED`. The matrix type is "
             "not supported by this function. This is usually caused by "
             "passing an invalid matrix descriptor to the function.";
G
Guo Sheng 已提交
887 888 889 890
    default:
      return "Unknown cusolver status";
  }
}
891

G
Guo Sheng 已提交
892 893 894 895 896
inline std::string build_nvidia_error_msg(cusolverStatus_t stat) {
  std::string msg(" Cublas error, ");
  return msg + cusolverGetErrorString(stat) + " ";
}

897
/****** NCCL ERROR ******/
898
#if !defined(__APPLE__) && defined(PADDLE_WITH_NCCL)
S
sneaxiy 已提交
899 900 901 902
inline bool is_error(ncclResult_t nccl_result) {
  return nccl_result != ncclSuccess;
}

903 904
inline std::string build_nvidia_error_msg(ncclResult_t nccl_result) {
  std::string msg(" Nccl error, ");
L
lilong12 已提交
905 906 907 908 909 910 911 912 913 914 915 916
  if (errno == ENOSPC || errno == EAGAIN) {
    std::string detail(strerror(errno));
    detail += "\nPlease try one of the following solutions:";
    detail += "\n1. export NCCL_SHM_DISABLE=1;";
    detail += "\n2. export NCCL_P2P_LEVEL=SYS;";
    detail +=
        "\n3. Increase shared memory by setting the -shm-size "
        "option when starting docker container, e.g., setting "
        " -shm-size=2g.\n";
    return msg + platform::dynload::ncclGetErrorString(nccl_result) +
           ", detail: " + detail + " ";
  }
917
  return msg + platform::dynload::ncclGetErrorString(nccl_result) + " ";
918
}
919
#endif  // not(__APPLE__) and PADDLE_WITH_NCCL
920

921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936
namespace details {

template <typename T>
struct CudaStatusType {};

#define DEFINE_CUDA_STATUS_TYPE(type, success_value) \
  template <>                                        \
  struct CudaStatusType<type> {                      \
    using Type = type;                               \
    static constexpr Type kSuccess = success_value;  \
  }

DEFINE_CUDA_STATUS_TYPE(cudaError_t, cudaSuccess);
DEFINE_CUDA_STATUS_TYPE(curandStatus_t, CURAND_STATUS_SUCCESS);
DEFINE_CUDA_STATUS_TYPE(cudnnStatus_t, CUDNN_STATUS_SUCCESS);
DEFINE_CUDA_STATUS_TYPE(cublasStatus_t, CUBLAS_STATUS_SUCCESS);
G
Guo Sheng 已提交
937
DEFINE_CUDA_STATUS_TYPE(cusolverStatus_t, CUSOLVER_STATUS_SUCCESS);
938

939
#if !defined(__APPLE__) && defined(PADDLE_WITH_NCCL)
940 941 942 943
DEFINE_CUDA_STATUS_TYPE(ncclResult_t, ncclSuccess);
#endif

}  // namespace details
M
minqiyang 已提交
944

945 946 947 948 949 950 951 952 953 954 955 956
#define PADDLE_ENFORCE_CUDA_SUCCESS(COND)                        \
  do {                                                           \
    auto __cond__ = (COND);                                      \
    using __CUDA_STATUS_TYPE__ = decltype(__cond__);             \
    constexpr auto __success_type__ =                            \
        ::paddle::platform::details::CudaStatusType<             \
            __CUDA_STATUS_TYPE__>::kSuccess;                     \
    if (UNLIKELY(__cond__ != __success_type__)) {                \
      auto __summary__ = ::paddle::platform::errors::External(   \
          ::paddle::platform::build_nvidia_error_msg(__cond__)); \
      __THROW_ERROR_INTERNAL__(__summary__);                     \
    }                                                            \
957 958
  } while (0)

959
inline void retry_sleep(unsigned milliseconds) {
960
#ifdef _WIN32
961
  Sleep(milliseconds);
962
#else
963 964 965 966 967 968 969 970 971
  if (milliseconds < 1000) {
    // usleep argument must be less than 1,000,000. Reference:
    // https://pubs.opengroup.org/onlinepubs/7908799/xsh/usleep.html
    usleep(milliseconds * 1000);
  } else {
    // clip to sleep in seconds because we can not and don't have to
    // sleep for exact milliseconds
    sleep(milliseconds / 1000);
  }
972 973 974
#endif
}

975 976 977 978 979 980 981 982 983
#define PADDLE_RETRY_CUDA_SUCCESS(COND)                                 \
  do {                                                                  \
    auto __cond__ = (COND);                                             \
    int retry_count = 1;                                                \
    using __CUDA_STATUS_TYPE__ = decltype(__cond__);                    \
    constexpr auto __success_type__ =                                   \
        ::paddle::platform::details::CudaStatusType<                    \
            __CUDA_STATUS_TYPE__>::kSuccess;                            \
    while (UNLIKELY(__cond__ != __success_type__) && retry_count < 5) { \
984
      retry_sleep(FLAGS_gpu_allocator_retry_time);                      \
985 986 987 988 989 990 991 992 993 994
      __cond__ = (COND);                                                \
      ++retry_count;                                                    \
    }                                                                   \
    if (UNLIKELY(__cond__ != __success_type__)) {                       \
      auto __summary__ = ::paddle::platform::errors::External(          \
          ::paddle::platform::build_nvidia_error_msg(__cond__));        \
      __THROW_ERROR_INTERNAL__(__summary__);                            \
    }                                                                   \
  } while (0)

995
#undef DEFINE_CUDA_STATUS_TYPE
996
#endif  // PADDLE_WITH_CUDA
S
add EQ  
Superjom 已提交
997

998 999
}  // namespace platform
}  // namespace paddle