未验证 提交 974b8a83 编写于 作者: C Chen Weihang 提交者: GitHub

Cherry-pick error type support for release1.6 (#21294)

* delete paddle infershape enforce marco (#20832)

* Polish and arrange code in enforce.h (#20901)

* Enrich the type of error and declare the error type interfaces (#21024)

* Enrich the type of error and declare the error type interfaces, test=develop

* adjust tests to adapt new form, test=develop

* add inference deps with error_codes.pb.h, test=develop

* restore stack iter start pos, test=develop

* polish code based review comments, test=develop

* Add dependency for error_codes.proto (#21084)

* fix activation_functions deps, test=develop, test=document_fix

* add error_codes_proto deps, test=develop, test=document_fix

* try delete enforce.h, test=develop, test=document_fix

* change cuda enforce & add example (#21142)
test=release/1.6
上级 7ab85396
...@@ -223,8 +223,8 @@ set(module "platform") ...@@ -223,8 +223,8 @@ set(module "platform")
set(platform_lib_deps profiler_proto) set(platform_lib_deps profiler_proto)
add_dependencies(fluid_lib_dist ${platform_lib_deps}) add_dependencies(fluid_lib_dist ${platform_lib_deps})
copy(fluid_lib_dist copy(fluid_lib_dist
SRCS ${src_dir}/${module}/*.h ${src_dir}/${module}/dynload/*.h ${src_dir}/${module}/details/*.h ${PADDLE_BINARY_DIR}/paddle/fluid/platform/profiler.pb.h SRCS ${src_dir}/${module}/*.h ${src_dir}/${module}/dynload/*.h ${src_dir}/${module}/details/*.h ${PADDLE_BINARY_DIR}/paddle/fluid/platform/profiler.pb.h ${PADDLE_BINARY_DIR}/paddle/fluid/platform/error_codes.pb.h
DSTS ${dst_dir}/${module} ${dst_dir}/${module}/dynload ${dst_dir}/${module}/details ${dst_dir}/${module} DSTS ${dst_dir}/${module} ${dst_dir}/${module}/dynload ${dst_dir}/${module}/details ${dst_dir}/${module} ${dst_dir}/${module}
) )
set(module "string") set(module "string")
......
...@@ -39,8 +39,8 @@ TEST(Tensor, DataAssert) { ...@@ -39,8 +39,8 @@ TEST(Tensor, DataAssert) {
} catch (platform::EnforceNotMet& err) { } catch (platform::EnforceNotMet& err) {
caught = true; caught = true;
std::string ex_msg = err.what(); std::string ex_msg = err.what();
EXPECT_TRUE(ex_msg.find("holder_ should not be null\nTensor holds no " EXPECT_TRUE(ex_msg.find("holder_ should not be null") != std::string::npos);
"memory. Call " EXPECT_TRUE(ex_msg.find("Tensor holds no memory. Call "
"Tensor::mutable_data first.") != "Tensor::mutable_data first.") !=
std::string::npos); std::string::npos);
} }
...@@ -154,8 +154,9 @@ TEST(Tensor, ShareDataWith) { ...@@ -154,8 +154,9 @@ TEST(Tensor, ShareDataWith) {
} catch (paddle::platform::EnforceNotMet& err) { } catch (paddle::platform::EnforceNotMet& err) {
caught = true; caught = true;
std::string ex_msg = err.what(); std::string ex_msg = err.what();
EXPECT_TRUE(ex_msg.find("holder_ should not be null\nTensor holds no " EXPECT_TRUE(ex_msg.find("holder_ should not be null") !=
"memory. Call " std::string::npos);
EXPECT_TRUE(ex_msg.find("Tensor holds no memory. Call "
"Tensor::mutable_data first.") != "Tensor::mutable_data first.") !=
std::string::npos); std::string::npos);
} }
......
...@@ -81,7 +81,8 @@ class CUDADeviceContextAllocator : public Allocator { ...@@ -81,7 +81,8 @@ class CUDADeviceContextAllocator : public Allocator {
platform::CUDADeviceGuard guard(place_.device); platform::CUDADeviceGuard guard(place_.device);
PADDLE_ENFORCE_CUDA_SUCCESS( PADDLE_ENFORCE_CUDA_SUCCESS(
cudaEventCreate(&event_, cudaEventDisableTiming), cudaEventCreate(&event_, cudaEventDisableTiming),
"Create event failed in CUDADeviceContextAllocator"); platform::errors::External(
"Create event failed in CUDADeviceContextAllocator"));
} }
~CUDADeviceContextAllocator() { ~CUDADeviceContextAllocator() {
......
...@@ -184,31 +184,35 @@ class LinearChainCRFOp : public framework::OperatorWithKernel { ...@@ -184,31 +184,35 @@ class LinearChainCRFOp : public framework::OperatorWithKernel {
true, true,
"The Input(Label) should be a 3-D tensor with last " "The Input(Label) should be a 3-D tensor with last "
"dimension fixed to 1 or a 2-D tensor in padding mode."); "dimension fixed to 1 or a 2-D tensor in padding mode.");
PADDLE_INFERSHAPE_ENFORCE_EQ( if (ctx->IsRuntime()) {
ctx, emission_dims[0], label_dims[0], PADDLE_ENFORCE_EQ(emission_dims[0], label_dims[0],
"The batch size of Input(Emission) and Input(Label) " "The batch size of Input(Emission) and Input(Label) "
"should be the same."); "should be the same.");
PADDLE_INFERSHAPE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(emission_dims[1], label_dims[1],
ctx, emission_dims[1], label_dims[1],
"The max length of Input(Emission) and Input(Label) " "The max length of Input(Emission) and Input(Label) "
"should be the same."); "should be the same.");
}
} else { } else {
PADDLE_ENFORCE_EQ(emission_dims.size(), 2, PADDLE_ENFORCE_EQ(emission_dims.size(), 2,
"The Input(Emission) should be a 2-D tensor."); "The Input(Emission) should be a 2-D tensor.");
PADDLE_INFERSHAPE_ENFORCE_EQ( if (ctx->IsRuntime()) {
ctx, emission_dims[1], transition_dims[1], PADDLE_ENFORCE_EQ(emission_dims[1], transition_dims[1],
"The 2nd dimension of the Input(Emission) and the Input(Transition) " "The 2nd dimension of the Input(Emission) and the "
"Input(Transition) "
"should be equal to the tag number."); "should be equal to the tag number.");
}
auto label_dims = ctx->GetInputDim("Label"); auto label_dims = ctx->GetInputDim("Label");
PADDLE_ENFORCE_EQ(label_dims.size(), 2, PADDLE_ENFORCE_EQ(label_dims.size(), 2,
"The Input(Label) should be a 2-D tensor with the 2nd " "The Input(Label) should be a 2-D tensor with the 2nd "
"dimensions fixed to 1."); "dimensions fixed to 1.");
PADDLE_INFERSHAPE_ENFORCE_EQ( if (ctx->IsRuntime()) {
ctx, emission_dims[0], label_dims[0], PADDLE_ENFORCE_EQ(
emission_dims[0], label_dims[0],
"The height of Input(Emission) and the height of Input(Label) " "The height of Input(Emission) and the height of Input(Label) "
"should be the same."); "should be the same.");
} }
}
ctx->SetOutputDim("Alpha", emission_dims); ctx->SetOutputDim("Alpha", emission_dims);
ctx->SetOutputDim("EmissionExps", emission_dims); ctx->SetOutputDim("EmissionExps", emission_dims);
ctx->SetOutputDim("TransitionExps", transition_dims); ctx->SetOutputDim("TransitionExps", transition_dims);
......
...@@ -14,9 +14,9 @@ limitations under the License. */ ...@@ -14,9 +14,9 @@ limitations under the License. */
#pragma once #pragma once
#include <math.h> #include <math.h>
#include <stdexcept>
#include <string> #include <string>
#include "paddle/fluid/platform/cpu_info.h" #include "paddle/fluid/platform/cpu_info.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/hostdevice.h" #include "paddle/fluid/platform/hostdevice.h"
namespace paddle { namespace paddle {
...@@ -45,7 +45,7 @@ inline ActivationType GetActivationType(const std::string &type) { ...@@ -45,7 +45,7 @@ inline ActivationType GetActivationType(const std::string &type) {
} else if (type == "identity" || type == "") { } else if (type == "identity" || type == "") {
return ActivationType::kIdentity; return ActivationType::kIdentity;
} }
PADDLE_THROW("Not support type %s.", type); throw std::invalid_argument("The input type is not supported");
} }
namespace forward { namespace forward {
......
...@@ -45,19 +45,20 @@ class AccuracyOp : public framework::OperatorWithKernel { ...@@ -45,19 +45,20 @@ class AccuracyOp : public framework::OperatorWithKernel {
"ShapeError: label's dimensions of AccuracyOp must be 2. " "ShapeError: label's dimensions of AccuracyOp must be 2. "
"But received label's dimensions = %d, label's shape = [%s]", "But received label's dimensions = %d, label's shape = [%s]",
label_dim.size(), label_dim); label_dim.size(), label_dim);
PADDLE_INFERSHAPE_ENFORCE_EQ( if (ctx->IsRuntime()) {
ctx, label_dim[1], 1, PADDLE_ENFORCE_EQ(label_dim[1], 1,
"ShapeError: label's second dimension of " "ShapeError: label's second dimension of "
"AccuracyOp must be 1. But received label's " "AccuracyOp must be 1. But received label's "
"second dimension is = %d, label's shape = [%s]", "second dimension is = %d, label's shape = [%s]",
label_dim[1], label_dim); label_dim[1], label_dim);
PADDLE_INFERSHAPE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
ctx, inference_dim[0], label_dim[0], inference_dim[0], label_dim[0],
"ShapeError: the output's num_rows of AccuracyOp must be" "ShapeError: the output's num_rows of AccuracyOp must be"
" the same as label's num_rows. But received output's " " the same as label's num_rows. But received output's "
"shape = [%s], label's shape = [%s], output's num_rows = %d, label's " "shape = [%s], label's shape = [%s], output's num_rows = %d, label's "
"num_rows = %d", "num_rows = %d",
inference_dim, label_dim, inference_dim[0], label_dim[0]); inference_dim, label_dim, inference_dim[0], label_dim[0]);
}
ctx->SetOutputDim("Accuracy", {1}); ctx->SetOutputDim("Accuracy", {1});
ctx->SetOutputDim("Correct", {1}); ctx->SetOutputDim("Correct", {1});
......
...@@ -28,14 +28,18 @@ class AucOp : public framework::OperatorWithKernel { ...@@ -28,14 +28,18 @@ class AucOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE(ctx->HasInput("Label"), PADDLE_ENFORCE(ctx->HasInput("Label"),
"Input of Label should not be null."); "Input of Label should not be null.");
auto predict_width = ctx->GetInputDim("Predict")[1]; auto predict_width = ctx->GetInputDim("Predict")[1];
PADDLE_INFERSHAPE_ENFORCE_LE(ctx, predict_width, 2, if (ctx->IsRuntime()) {
PADDLE_ENFORCE_LE(predict_width, 2,
"Only support binary classification," "Only support binary classification,"
"prediction dims[1] should be 1 or 2"); "prediction dims[1] should be 1 or 2");
}
auto predict_height = ctx->GetInputDim("Predict")[0]; auto predict_height = ctx->GetInputDim("Predict")[0];
auto label_height = ctx->GetInputDim("Label")[0]; auto label_height = ctx->GetInputDim("Label")[0];
PADDLE_INFERSHAPE_ENFORCE_EQ(ctx, predict_height, label_height, if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(predict_height, label_height,
"Out and Label should have same height."); "Out and Label should have same height.");
}
int num_pred_buckets = ctx->Attrs().Get<int>("num_thresholds") + 1; int num_pred_buckets = ctx->Attrs().Get<int>("num_thresholds") + 1;
int slide_steps = ctx->Attrs().Get<int>("slide_steps"); int slide_steps = ctx->Attrs().Get<int>("slide_steps");
......
...@@ -48,32 +48,41 @@ class MulOp : public framework::OperatorWithKernel { ...@@ -48,32 +48,41 @@ class MulOp : public framework::OperatorWithKernel {
<< " y_num_col_dims=" << y_num_col_dims; << " y_num_col_dims=" << y_num_col_dims;
PADDLE_ENFORCE_NE(framework::product(y_dims), 0, PADDLE_ENFORCE_NE(framework::product(y_dims), 0,
platform::errors::PreconditionNotMet(
"Maybe the Input variable Y(%s) has not " "Maybe the Input variable Y(%s) has not "
"been initialized. You may need to confirm " "been initialized. You may need to confirm "
"if you put exe.run(startup_program) " "if you put exe.run(startup_program) "
"after optimizer.minimize function.", "after optimizer.minimize function.",
ctx->Inputs("Y").front()); ctx->Inputs("Y").front()));
PADDLE_ENFORCE_GT(x_dims.size(), x_num_col_dims, PADDLE_ENFORCE_GT(
"ShapeError: The input tensor X's dimensions of MulOp " x_dims.size(), x_num_col_dims,
platform::errors::InvalidArgument(
"The input tensor X's dimensions of MulOp "
"should be larger than x_num_col_dims. But received X's " "should be larger than x_num_col_dims. But received X's "
"dimensions = %d, X's shape = [%s], x_num_col_dims = %d.", "dimensions = %d, X's shape = [%s], x_num_col_dims = %d.",
x_dims.size(), x_dims, x_num_col_dims); x_dims.size(), x_dims, x_num_col_dims));
PADDLE_ENFORCE_GT(y_dims.size(), y_num_col_dims, PADDLE_ENFORCE_GT(
"ShapeError: The input tensor Y's dimensions of MulOp " y_dims.size(), y_num_col_dims,
platform::errors::InvalidArgument(
"The input tensor Y's dimensions of MulOp "
"should be larger than y_num_col_dims. But received Y's " "should be larger than y_num_col_dims. But received Y's "
"dimensions = %d, Y's shape = [%s], y_num_col_dims = %d.", "dimensions = %d, Y's shape = [%s], y_num_col_dims = %d.",
y_dims.size(), y_dims, y_num_col_dims); y_dims.size(), y_dims, y_num_col_dims));
auto x_mat_dims = framework::flatten_to_2d(x_dims, x_num_col_dims); auto x_mat_dims = framework::flatten_to_2d(x_dims, x_num_col_dims);
auto y_mat_dims = framework::flatten_to_2d(y_dims, y_num_col_dims); auto y_mat_dims = framework::flatten_to_2d(y_dims, y_num_col_dims);
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
x_mat_dims[1], y_mat_dims[0], x_mat_dims[1], y_mat_dims[0],
"ShapeError: After flatten the input tensor X and Y to 2-D dimensions " platform::errors::InvalidArgument(
"After flatten the input tensor X and Y to 2-D dimensions "
"matrix X1 and Y1, the matrix X1's width must be equal with matrix " "matrix X1 and Y1, the matrix X1's width must be equal with matrix "
"Y1's height. But received X's shape = [%s], X1's shape = [%s], X1's " "Y1's height. But received X's shape = [%s], X1's shape = [%s], "
"width = %s; Y's shape = [%s], Y1's shape = [%s], Y1's height = %s.", "X1's "
x_dims, x_mat_dims, x_mat_dims[1], y_dims, y_mat_dims, y_mat_dims[0]); "width = %s; Y's shape = [%s], Y1's shape = [%s], Y1's height = "
"%s.",
x_dims, x_mat_dims, x_mat_dims[1], y_dims, y_mat_dims,
y_mat_dims[0]));
std::vector<int64_t> output_dims; std::vector<int64_t> output_dims;
output_dims.reserve( output_dims.reserve(
static_cast<size_t>(x_num_col_dims + y_dims.size() - y_num_col_dims)); static_cast<size_t>(x_num_col_dims + y_dims.size() - y_num_col_dims));
......
...@@ -135,11 +135,13 @@ class SmoothL1LossGradOp : public framework::OperatorWithKernel { ...@@ -135,11 +135,13 @@ class SmoothL1LossGradOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_GE(out_dims.size(), 2, PADDLE_ENFORCE_GE(out_dims.size(), 2,
"The tensor rank of Input(Out@Grad) should be 2."); "The tensor rank of Input(Out@Grad) should be 2.");
PADDLE_INFERSHAPE_ENFORCE_EQ(ctx, out_dims[0], in_dims[0], if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(out_dims[0], in_dims[0],
"The 1st dimension of Input(Out@Grad) must be " "The 1st dimension of Input(Out@Grad) must be "
"same as input."); "same as input.");
PADDLE_INFERSHAPE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(out_dims[1], 1,
ctx, out_dims[1], 1, "The 2nd dimension of Input(Out@Grad) must be 1."); "The 2nd dimension of Input(Out@Grad) must be 1.");
}
auto x_grad_name = framework::GradVarName("X"); auto x_grad_name = framework::GradVarName("X");
auto y_grad_name = framework::GradVarName("Y"); auto y_grad_name = framework::GradVarName("Y");
......
...@@ -137,12 +137,14 @@ class SquaredL2DistanceGradOp : public framework::OperatorWithKernel { ...@@ -137,12 +137,14 @@ class SquaredL2DistanceGradOp : public framework::OperatorWithKernel {
auto out_dims = ctx->GetInputDim(framework::GradVarName("Out")); auto out_dims = ctx->GetInputDim(framework::GradVarName("Out"));
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
auto y_dims = ctx->GetInputDim("Y"); auto y_dims = ctx->GetInputDim("Y");
PADDLE_INFERSHAPE_ENFORCE_EQ(ctx, out_dims[0], x_dims[0], if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(out_dims[0], x_dims[0],
"First dimension of output gradient and " "First dimension of output gradient and "
"input value must be equal."); "input value must be equal.");
PADDLE_INFERSHAPE_ENFORCE_EQ(ctx, out_dims[1], 1, PADDLE_ENFORCE_EQ(out_dims[1], 1,
"Second dimension of output gradient " "Second dimension of output gradient "
"must be 1."); "must be 1.");
}
auto x_grad_name = framework::GradVarName("X"); auto x_grad_name = framework::GradVarName("X");
auto y_grad_name = framework::GradVarName("Y"); auto y_grad_name = framework::GradVarName("Y");
if (ctx->HasOutput(x_grad_name)) ctx->SetOutputDim(x_grad_name, x_dims); if (ctx->HasOutput(x_grad_name)) ctx->SetOutputDim(x_grad_name, x_dims);
......
proto_library(profiler_proto SRCS profiler.proto DEPS framework_proto simple_threadpool) proto_library(profiler_proto SRCS profiler.proto DEPS framework_proto simple_threadpool)
py_proto_compile(profiler_py_proto SRCS profiler.proto) py_proto_compile(profiler_py_proto SRCS profiler.proto)
proto_library(error_codes_proto SRCS error_codes.proto)
add_custom_target(profiler_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch __init__.py) add_custom_target(profiler_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch __init__.py)
add_dependencies(profiler_py_proto profiler_py_proto_init) add_dependencies(profiler_py_proto profiler_py_proto_init)
if (NOT WIN32) if (NOT WIN32)
...@@ -22,10 +22,13 @@ endif(NOT WIN32) ...@@ -22,10 +22,13 @@ endif(NOT WIN32)
cc_library(flags SRCS flags.cc DEPS gflags) cc_library(flags SRCS flags.cc DEPS gflags)
cc_library(errors SRCS errors.cc DEPS error_codes_proto)
cc_test(errors_test SRCS errors_test.cc DEPS errors enforce)
if(WITH_GPU) if(WITH_GPU)
nv_library(enforce SRCS enforce.cc DEPS flags) nv_library(enforce SRCS enforce.cc DEPS flags errors)
else() else()
cc_library(enforce SRCS enforce.cc DEPS flags) cc_library(enforce SRCS enforce.cc DEPS flags errors)
endif() endif()
cc_test(enforce_test SRCS enforce_test.cc DEPS stringpiece enforce) cc_test(enforce_test SRCS enforce_test.cc DEPS stringpiece enforce)
......
...@@ -38,6 +38,7 @@ limitations under the License. */ ...@@ -38,6 +38,7 @@ limitations under the License. */
#define GLOG_NO_ABBREVIATED_SEVERITIES // msvc conflict logging with windows.h #define GLOG_NO_ABBREVIATED_SEVERITIES // msvc conflict logging with windows.h
#include "glog/logging.h" #include "glog/logging.h"
#include "paddle/fluid/platform/errors.h"
#include "paddle/fluid/platform/macros.h" #include "paddle/fluid/platform/macros.h"
#include "paddle/fluid/platform/port.h" #include "paddle/fluid/platform/port.h"
#include "paddle/fluid/string/printf.h" #include "paddle/fluid/string/printf.h"
...@@ -52,11 +53,30 @@ limitations under the License. */ ...@@ -52,11 +53,30 @@ limitations under the License. */
#endif // __APPLE__ #endif // __APPLE__
#endif // PADDLE_WITH_CUDA #endif // PADDLE_WITH_CUDA
#define WITH_SIMPLE_TRACEBACK
namespace paddle { namespace paddle {
namespace platform { namespace platform {
/** HELPER MACROS AND FUNCTIONS **/
// Because most enforce conditions would evaluate to true, we can use
// __builtin_expect to instruct the C++ compiler to generate code that
// always forces branch prediction of true.
// This generates faster binary code. __builtin_expect is since C++11.
// For more details, please check https://stackoverflow.com/a/43870188/724872.
#if !defined(_WIN32)
#define UNLIKELY(condition) __builtin_expect(static_cast<bool>(condition), 0)
#else
// there is no equivalent intrinsics in msvc.
#define UNLIKELY(condition) (condition)
#endif
#if !defined(_WIN32)
#define LIKELY(condition) __builtin_expect(static_cast<bool>(condition), 1)
#else
// there is no equivalent intrinsics in msvc.
#define LIKELY(condition) (condition)
#endif
#ifdef __GNUC__ #ifdef __GNUC__
inline std::string demangle(std::string name) { inline std::string demangle(std::string name) {
int status = -4; // some arbitrary value to eliminate the compiler warning int status = -4; // some arbitrary value to eliminate the compiler warning
...@@ -68,6 +88,82 @@ inline std::string demangle(std::string name) { ...@@ -68,6 +88,82 @@ inline std::string demangle(std::string name) {
inline std::string demangle(std::string name) { return name; } inline std::string demangle(std::string name) { return name; }
#endif #endif
namespace details {
template <typename T>
inline constexpr bool IsArithmetic() {
return std::is_arithmetic<T>::value;
}
template <typename T1, typename T2, bool kIsArithmetic /* = true */>
struct TypeConverterImpl {
using Type1 = typename std::common_type<T1, T2>::type;
using Type2 = Type1;
};
template <typename T1, typename T2>
struct TypeConverterImpl<T1, T2, false> {
using Type1 = T1;
using Type2 = T2;
};
template <typename T1, typename T2>
struct TypeConverter {
private:
static constexpr bool kIsArithmetic =
IsArithmetic<T1>() && IsArithmetic<T2>();
public:
using Type1 = typename TypeConverterImpl<T1, T2, kIsArithmetic>::Type1;
using Type2 = typename TypeConverterImpl<T1, T2, kIsArithmetic>::Type2;
};
template <typename T1, typename T2>
using CommonType1 = typename std::add_lvalue_reference<
typename std::add_const<typename TypeConverter<T1, T2>::Type1>::type>::type;
template <typename T1, typename T2>
using CommonType2 = typename std::add_lvalue_reference<
typename std::add_const<typename TypeConverter<T1, T2>::Type2>::type>::type;
// Here, we use SFINAE to check whether T can be converted to std::string
template <typename T>
struct CanToString {
private:
using YesType = uint8_t;
using NoType = uint16_t;
template <typename U>
static YesType Check(decltype(std::cout << std::declval<U>())) {
return 0;
}
template <typename U>
static NoType Check(...) {
return 0;
}
public:
static constexpr bool kValue =
std::is_same<YesType, decltype(Check<T>(std::cout))>::value;
};
template <bool kCanToString /* = true */>
struct BinaryCompareMessageConverter {
template <typename T>
static std::string Convert(const char* expression, const T& value) {
return expression + std::string(":") + string::to_string(value);
}
};
template <>
struct BinaryCompareMessageConverter<false> {
template <typename T>
static const char* Convert(const char* expression, const T& value) {
return expression;
}
};
} // namespace details
template <typename StrType> template <typename StrType>
inline std::string GetTraceBackString(StrType&& what, const char* file, inline std::string GetTraceBackString(StrType&& what, const char* file,
int line) { int line) {
...@@ -86,21 +182,11 @@ inline std::string GetTraceBackString(StrType&& what, const char* file, ...@@ -86,21 +182,11 @@ inline std::string GetTraceBackString(StrType&& what, const char* file,
for (int i = 0; i < size; ++i) { for (int i = 0; i < size; ++i) {
if (dladdr(call_stack[i], &info) && info.dli_sname) { if (dladdr(call_stack[i], &info) && info.dli_sname) {
auto demangled = demangle(info.dli_sname); auto demangled = demangle(info.dli_sname);
#ifdef WITH_SIMPLE_TRACEBACK
std::string path(info.dli_fname); std::string path(info.dli_fname);
// C++ traceback info are from core.so // C++ traceback info are from core.so
if (path.substr(path.length() - 3).compare(".so") == 0) { if (path.substr(path.length() - 3).compare(".so") == 0) {
sout << string::Sprintf("%-3d %s\n", idx++, demangled); sout << string::Sprintf("%-3d %s\n", idx++, demangled);
} }
#else
auto addr_offset = static_cast<char*>(call_stack[i]) -
static_cast<char*>(info.dli_saddr);
sout << string::Sprintf("%-3d %*0p %s + %zd\n", i, 2 + sizeof(void*) * 2,
call_stack[i], demangled, addr_offset);
} else {
sout << string::Sprintf("%-3d %*0p\n", i, 2 + sizeof(void*) * 2,
call_stack[i]);
#endif
} }
} }
free(symbols); free(symbols);
...@@ -109,14 +195,33 @@ inline std::string GetTraceBackString(StrType&& what, const char* file, ...@@ -109,14 +195,33 @@ inline std::string GetTraceBackString(StrType&& what, const char* file,
#endif #endif
sout << "\n----------------------\nError Message " sout << "\n----------------------\nError Message "
"Summary:\n----------------------\n"; "Summary:\n----------------------\n";
sout << string::Sprintf("PaddleCheckError: %s at [%s:%d]", sout << string::Sprintf("%s at (%s:%d)", std::forward<StrType>(what), file,
std::forward<StrType>(what), file, line) line)
<< std::endl; << std::endl;
return sout.str(); return sout.str();
} }
inline bool is_error(bool stat) { return !stat; }
inline void throw_on_error(bool stat, const std::string& msg) {
#ifndef REPLACE_ENFORCE_GLOG
throw std::runtime_error(msg);
#else
LOG(FATAL) << msg;
#endif
}
inline void throw_on_error(const platform::ErrorSummary& error) {
#ifndef REPLACE_ENFORCE_GLOG
throw std::runtime_error(error.ToString());
#else
LOG(FATAL) << error.ToString();
#endif
}
/** ENFORCE EXCEPTION AND MACROS **/
struct EnforceNotMet : public std::exception { struct EnforceNotMet : public std::exception {
std::string err_str_;
EnforceNotMet(std::exception_ptr e, const char* file, int line) { EnforceNotMet(std::exception_ptr e, const char* file, int line) {
try { try {
std::rethrow_exception(e); std::rethrow_exception(e);
...@@ -128,9 +233,118 @@ struct EnforceNotMet : public std::exception { ...@@ -128,9 +233,118 @@ struct EnforceNotMet : public std::exception {
EnforceNotMet(const std::string& str, const char* file, int line) EnforceNotMet(const std::string& str, const char* file, int line)
: err_str_(GetTraceBackString(str, file, line)) {} : err_str_(GetTraceBackString(str, file, line)) {}
EnforceNotMet(const platform::ErrorSummary& error, const char* file, int line)
: err_str_(GetTraceBackString(error.ToString(), file, line)) {}
const char* what() const noexcept override { return err_str_.c_str(); } const char* what() const noexcept override { return err_str_.c_str(); }
std::string err_str_;
}; };
#define PADDLE_THROW(...) \
do { \
throw ::paddle::platform::EnforceNotMet( \
::paddle::platform::ErrorSummary(__VA_ARGS__), __FILE__, __LINE__); \
} while (0)
#define PADDLE_THROW_ERROR(...) \
do { \
throw ::paddle::platform::EnforceNotMet( \
::paddle::string::Sprintf(__VA_ARGS__), __FILE__, __LINE__); \
} while (0)
#if defined(__CUDA_ARCH__)
// For cuda, the assertions can affect performance and it is therefore
// recommended to disable them in production code
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#assertion
#define PADDLE_ENFORCE(_IS_NOT_ERROR, __FORMAT, ...) \
do { \
if (!(_IS_NOT_ERROR)) { \
printf("Error: %s:%d Assertion `%s` failed. " __FORMAT "\n", __FILE__, \
__LINE__, #_IS_NOT_ERROR, ##__VA_ARGS__); \
asm("trap;"); \
} \
} while (0)
#else
#define PADDLE_ENFORCE(COND, ...) \
do { \
auto __cond__ = (COND); \
if (UNLIKELY(::paddle::platform::is_error(__cond__))) { \
try { \
::paddle::platform::throw_on_error( \
::paddle::platform::ErrorSummary(__VA_ARGS__)); \
} catch (...) { \
throw ::paddle::platform::EnforceNotMet(std::current_exception(), \
__FILE__, __LINE__); \
} \
} \
} while (0)
#endif
/*
* Some enforce helpers here, usage:
* int a = 1;
* int b = 2;
* PADDLE_ENFORCE_EQ(a, b);
*
* will raise an expression described as follows:
* "Expected input a == b, but received a(1) != b(2)."
* with detailed stack information.
*
* extra messages is also supported, for example:
* PADDLE_ENFORCE(a, b, "some simple enforce failed between %d numbers", 2)
*/
#define PADDLE_ENFORCE_NOT_NULL(__VAL, ...) \
do { \
if (UNLIKELY(nullptr == (__VAL))) { \
PADDLE_THROW_ERROR( \
"%s\n [Hint: " #__VAL " should not be null.]", \
::paddle::platform::ErrorSummary(__VA_ARGS__).ToString()); \
} \
} while (0)
#define __PADDLE_BINARY_COMPARE(__VAL1, __VAL2, __CMP, __INV_CMP, ...) \
do { \
auto __val1 = (__VAL1); \
auto __val2 = (__VAL2); \
using __TYPE1__ = decltype(__val1); \
using __TYPE2__ = decltype(__val2); \
using __COMMON_TYPE1__ = \
::paddle::platform::details::CommonType1<__TYPE1__, __TYPE2__>; \
using __COMMON_TYPE2__ = \
::paddle::platform::details::CommonType2<__TYPE1__, __TYPE2__>; \
bool __is_not_error = (static_cast<__COMMON_TYPE1__>(__val1))__CMP( \
static_cast<__COMMON_TYPE2__>(__val2)); \
if (UNLIKELY(!__is_not_error)) { \
constexpr bool __kCanToString__ = \
::paddle::platform::details::CanToString<__TYPE1__>::kValue && \
::paddle::platform::details::CanToString<__TYPE2__>::kValue; \
PADDLE_THROW_ERROR( \
"%s\n [Hint: Expected %s " #__CMP \
" %s, but received %s " #__INV_CMP " %s.]", \
::paddle::platform::ErrorSummary(__VA_ARGS__).ToString(), #__VAL1, \
#__VAL2, ::paddle::platform::details::BinaryCompareMessageConverter< \
__kCanToString__>::Convert(#__VAL1, __val1), \
::paddle::platform::details::BinaryCompareMessageConverter< \
__kCanToString__>::Convert(#__VAL2, __val2)); \
} \
} while (0)
#define PADDLE_ENFORCE_EQ(__VAL0, __VAL1, ...) \
__PADDLE_BINARY_COMPARE(__VAL0, __VAL1, ==, !=, __VA_ARGS__)
#define PADDLE_ENFORCE_NE(__VAL0, __VAL1, ...) \
__PADDLE_BINARY_COMPARE(__VAL0, __VAL1, !=, ==, __VA_ARGS__)
#define PADDLE_ENFORCE_GT(__VAL0, __VAL1, ...) \
__PADDLE_BINARY_COMPARE(__VAL0, __VAL1, >, <=, __VA_ARGS__)
#define PADDLE_ENFORCE_GE(__VAL0, __VAL1, ...) \
__PADDLE_BINARY_COMPARE(__VAL0, __VAL1, >=, <, __VA_ARGS__)
#define PADDLE_ENFORCE_LT(__VAL0, __VAL1, ...) \
__PADDLE_BINARY_COMPARE(__VAL0, __VAL1, <, >=, __VA_ARGS__)
#define PADDLE_ENFORCE_LE(__VAL0, __VAL1, ...) \
__PADDLE_BINARY_COMPARE(__VAL0, __VAL1, <=, >, __VA_ARGS__)
/** OTHER EXCEPTION AND ENFORCE **/
struct EOFException : public std::exception { struct EOFException : public std::exception {
std::string err_str_; std::string err_str_;
EOFException(const char* err_msg, const char* file, int line) { EOFException(const char* err_msg, const char* file, int line) {
...@@ -140,39 +354,28 @@ struct EOFException : public std::exception { ...@@ -140,39 +354,28 @@ struct EOFException : public std::exception {
const char* what() const noexcept override { return err_str_.c_str(); } const char* what() const noexcept override { return err_str_.c_str(); }
}; };
// Because most enforce conditions would evaluate to true, we can use #define PADDLE_THROW_EOF() \
// __builtin_expect to instruct the C++ compiler to generate code that do { \
// always forces branch prediction of true. throw ::paddle::platform::EOFException("There is no next data.", __FILE__, \
// This generates faster binary code. __builtin_expect is since C++11. __LINE__); \
// For more details, please check https://stackoverflow.com/a/43870188/724872. } while (0)
#if !defined(_WIN32)
#define UNLIKELY(condition) __builtin_expect(static_cast<bool>(condition), 0)
#else
// there is no equivalent intrinsics in msvc.
#define UNLIKELY(condition) (condition)
#endif
#if !defined(_WIN32)
#define LIKELY(condition) __builtin_expect(static_cast<bool>(condition), 1)
#else
// there is no equivalent intrinsics in msvc.
#define LIKELY(condition) (condition)
#endif
inline bool is_error(bool stat) { return !stat; } #define PADDLE_THROW_BAD_ALLOC(...) \
do { \
throw ::paddle::memory::allocation::BadAlloc( \
::paddle::string::Sprintf(__VA_ARGS__), __FILE__, __LINE__); \
} while (0)
inline void throw_on_error(bool stat, const std::string& msg) { /** CUDA PADDLE ENFORCE FUNCTIONS AND MACROS **/
#ifndef REPLACE_ENFORCE_GLOG
throw std::runtime_error(msg);
#else
LOG(FATAL) << msg;
#endif
}
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
inline bool is_error(cudaError_t e) { return e != cudaSuccess; } inline bool is_error(cudaError_t e) { return e != cudaSuccess; }
inline std::string build_ex_string(cudaError_t e, const std::string& msg) {
return msg;
}
inline void throw_on_error(cudaError_t e, const std::string& msg) { inline void throw_on_error(cudaError_t e, const std::string& msg) {
#ifndef REPLACE_ENFORCE_GLOG #ifndef REPLACE_ENFORCE_GLOG
throw thrust::system_error(e, thrust::cuda_category(), msg); throw thrust::system_error(e, thrust::cuda_category(), msg);
...@@ -185,6 +388,11 @@ inline bool is_error(curandStatus_t stat) { ...@@ -185,6 +388,11 @@ inline bool is_error(curandStatus_t stat) {
return stat != CURAND_STATUS_SUCCESS; return stat != CURAND_STATUS_SUCCESS;
} }
inline std::string build_ex_string(curandStatus_t stat,
const std::string& msg) {
return msg;
}
inline void throw_on_error(curandStatus_t stat, const std::string& msg) { inline void throw_on_error(curandStatus_t stat, const std::string& msg) {
#ifndef REPLACE_ENFORCE_GLOG #ifndef REPLACE_ENFORCE_GLOG
throw thrust::system_error(cudaErrorLaunchFailure, thrust::cuda_category(), throw thrust::system_error(cudaErrorLaunchFailure, thrust::cuda_category(),
...@@ -198,11 +406,15 @@ inline bool is_error(cudnnStatus_t stat) { ...@@ -198,11 +406,15 @@ inline bool is_error(cudnnStatus_t stat) {
return stat != CUDNN_STATUS_SUCCESS; return stat != CUDNN_STATUS_SUCCESS;
} }
inline std::string build_ex_string(cudnnStatus_t stat, const std::string& msg) {
return msg + "\n [" + platform::dynload::cudnnGetErrorString(stat) + "]";
}
inline void throw_on_error(cudnnStatus_t stat, const std::string& msg) { inline void throw_on_error(cudnnStatus_t stat, const std::string& msg) {
#ifndef REPLACE_ENFORCE_GLOG #ifndef REPLACE_ENFORCE_GLOG
throw std::runtime_error(platform::dynload::cudnnGetErrorString(stat) + msg); throw std::runtime_error(msg);
#else #else
LOG(FATAL) << platform::dynload::cudnnGetErrorString(stat) << msg; LOG(FATAL) << msg;
#endif #endif
} }
...@@ -210,31 +422,36 @@ inline bool is_error(cublasStatus_t stat) { ...@@ -210,31 +422,36 @@ inline bool is_error(cublasStatus_t stat) {
return stat != CUBLAS_STATUS_SUCCESS; return stat != CUBLAS_STATUS_SUCCESS;
} }
inline void throw_on_error(cublasStatus_t stat, const std::string& msg) { inline std::string build_ex_string(cublasStatus_t stat,
const std::string& msg) {
std::string err; std::string err;
if (stat == CUBLAS_STATUS_NOT_INITIALIZED) { if (stat == CUBLAS_STATUS_NOT_INITIALIZED) {
err = "CUBLAS: not initialized, "; err = "CUBLAS: not initialized.";
} else if (stat == CUBLAS_STATUS_ALLOC_FAILED) { } else if (stat == CUBLAS_STATUS_ALLOC_FAILED) {
err = "CUBLAS: alloc failed, "; err = "CUBLAS: alloc failed.";
} else if (stat == CUBLAS_STATUS_INVALID_VALUE) { } else if (stat == CUBLAS_STATUS_INVALID_VALUE) {
err = "CUBLAS: invalid value, "; err = "CUBLAS: invalid value.";
} else if (stat == CUBLAS_STATUS_ARCH_MISMATCH) { } else if (stat == CUBLAS_STATUS_ARCH_MISMATCH) {
err = "CUBLAS: arch mismatch, "; err = "CUBLAS: arch mismatch.";
} else if (stat == CUBLAS_STATUS_MAPPING_ERROR) { } else if (stat == CUBLAS_STATUS_MAPPING_ERROR) {
err = "CUBLAS: mapping error, "; err = "CUBLAS: mapping error.";
} else if (stat == CUBLAS_STATUS_EXECUTION_FAILED) { } else if (stat == CUBLAS_STATUS_EXECUTION_FAILED) {
err = "CUBLAS: execution failed, "; err = "CUBLAS: execution failed.";
} else if (stat == CUBLAS_STATUS_INTERNAL_ERROR) { } else if (stat == CUBLAS_STATUS_INTERNAL_ERROR) {
err = "CUBLAS: internal error, "; err = "CUBLAS: internal error.";
} else if (stat == CUBLAS_STATUS_NOT_SUPPORTED) { } else if (stat == CUBLAS_STATUS_NOT_SUPPORTED) {
err = "CUBLAS: not supported, "; err = "CUBLAS: not supported, ";
} else if (stat == CUBLAS_STATUS_LICENSE_ERROR) { } else if (stat == CUBLAS_STATUS_LICENSE_ERROR) {
err = "CUBLAS: license error, "; err = "CUBLAS: license error.";
} }
return msg + "\n [" + err + "]";
}
inline void throw_on_error(cublasStatus_t stat, const std::string& msg) {
#ifndef REPLACE_ENFORCE_GLOG #ifndef REPLACE_ENFORCE_GLOG
throw std::runtime_error(err + msg); throw std::runtime_error(msg);
#else #else
LOG(FATAL) << err << msg; LOG(FATAL) << msg;
#endif #endif
} }
...@@ -243,14 +460,21 @@ inline bool is_error(ncclResult_t nccl_result) { ...@@ -243,14 +460,21 @@ inline bool is_error(ncclResult_t nccl_result) {
return nccl_result != ncclSuccess; return nccl_result != ncclSuccess;
} }
inline void throw_on_error(ncclResult_t stat, const std::string& msg) { inline std::string build_ex_string(ncclResult_t nccl_result,
const std::string& msg) {
return msg + "\n [" + platform::dynload::ncclGetErrorString(nccl_result) +
"]";
}
inline void throw_on_error(ncclResult_t nccl_result, const std::string& msg) {
#ifndef REPLACE_ENFORCE_GLOG #ifndef REPLACE_ENFORCE_GLOG
throw std::runtime_error(platform::dynload::ncclGetErrorString(stat) + msg); throw std::runtime_error(msg);
#else #else
LOG(FATAL) << platform::dynload::ncclGetErrorString(stat) << msg; LOG(FATAL) << msg;
#endif #endif
} }
#endif // __APPLE__ and windows #endif // __APPLE__ and windows
#endif // PADDLE_WITH_CUDA #endif // PADDLE_WITH_CUDA
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
...@@ -276,41 +500,7 @@ DEFINE_CUDA_STATUS_TYPE(ncclResult_t, ncclSuccess); ...@@ -276,41 +500,7 @@ DEFINE_CUDA_STATUS_TYPE(ncclResult_t, ncclSuccess);
#endif #endif
} // namespace details } // namespace details
#endif #endif // PADDLE_WITH_CUDA
#define PADDLE_THROW(...) \
do { \
throw ::paddle::platform::EnforceNotMet( \
::paddle::string::Sprintf(__VA_ARGS__), __FILE__, __LINE__); \
} while (0)
#if defined(__CUDA_ARCH__)
// For cuda, the assertions can affect performance and it is therefore
// recommended to disable them in production code
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#assertion
#define PADDLE_ENFORCE(_IS_NOT_ERROR, __FORMAT, ...) \
do { \
if (!(_IS_NOT_ERROR)) { \
printf("Exception: %s:%d Assertion `%s` failed. " __FORMAT "\n", \
__FILE__, __LINE__, #_IS_NOT_ERROR, ##__VA_ARGS__); \
asm("trap;"); \
} \
} while (0)
#else
#define PADDLE_ENFORCE(COND, ...) \
do { \
auto __cond__ = (COND); \
if (UNLIKELY(::paddle::platform::is_error(__cond__))) { \
try { \
::paddle::platform::throw_on_error( \
__cond__, ::paddle::string::Sprintf(__VA_ARGS__)); \
} catch (...) { \
throw ::paddle::platform::EnforceNotMet(std::current_exception(), \
__FILE__, __LINE__); \
} \
} \
} while (0)
#endif
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
#define PADDLE_ENFORCE_CUDA_SUCCESS(COND, ...) \ #define PADDLE_ENFORCE_CUDA_SUCCESS(COND, ...) \
...@@ -323,7 +513,10 @@ DEFINE_CUDA_STATUS_TYPE(ncclResult_t, ncclSuccess); ...@@ -323,7 +513,10 @@ DEFINE_CUDA_STATUS_TYPE(ncclResult_t, ncclSuccess);
if (UNLIKELY(__cond__ != __success_type__)) { \ if (UNLIKELY(__cond__ != __success_type__)) { \
try { \ try { \
::paddle::platform::throw_on_error( \ ::paddle::platform::throw_on_error( \
__cond__, ::paddle::string::Sprintf(__VA_ARGS__)); \ __cond__, \
::paddle::platform::build_ex_string( \
__cond__, \
::paddle::platform::ErrorSummary(__VA_ARGS__).ToString())); \
} catch (...) { \ } catch (...) { \
throw ::paddle::platform::EnforceNotMet(std::current_exception(), \ throw ::paddle::platform::EnforceNotMet(std::current_exception(), \
__FILE__, __LINE__); \ __FILE__, __LINE__); \
...@@ -332,198 +525,7 @@ DEFINE_CUDA_STATUS_TYPE(ncclResult_t, ncclSuccess); ...@@ -332,198 +525,7 @@ DEFINE_CUDA_STATUS_TYPE(ncclResult_t, ncclSuccess);
} while (0) } while (0)
#undef DEFINE_CUDA_STATUS_TYPE #undef DEFINE_CUDA_STATUS_TYPE
#endif #endif // PADDLE_WITH_CUDA
#define PADDLE_THROW_EOF() \
do { \
throw ::paddle::platform::EOFException("There is no next data.", __FILE__, \
__LINE__); \
} while (0)
#define PADDLE_THROW_BAD_ALLOC(...) \
do { \
throw ::paddle::memory::allocation::BadAlloc( \
::paddle::string::Sprintf(__VA_ARGS__), __FILE__, __LINE__); \
} while (0)
/*
* Some enforce helpers here, usage:
* int a = 1;
* int b = 2;
* PADDLE_ENFORCE_EQ(a, b);
*
* will raise an expression described as follows:
* "Expected input a == b, but received a(1) != b(2)."
* with detailed stack information.
*
* extra messages is also supported, for example:
* PADDLE_ENFORCE(a, b, "some simple enforce failed between %d numbers", 2)
*/
#define PADDLE_ENFORCE_NOT_NULL(__VAL, ...) \
do { \
if (UNLIKELY(nullptr == (__VAL))) { \
PADDLE_THROW(#__VAL " should not be null\n%s", \
::paddle::string::Sprintf(__VA_ARGS__)); \
} \
} while (0)
namespace details {
template <typename T>
inline constexpr bool IsArithmetic() {
return std::is_arithmetic<T>::value;
}
template <typename T1, typename T2, bool kIsArithmetic /* = true */>
struct TypeConverterImpl {
using Type1 = typename std::common_type<T1, T2>::type;
using Type2 = Type1;
};
template <typename T1, typename T2>
struct TypeConverterImpl<T1, T2, false> {
using Type1 = T1;
using Type2 = T2;
};
template <typename T1, typename T2>
struct TypeConverter {
private:
static constexpr bool kIsArithmetic =
IsArithmetic<T1>() && IsArithmetic<T2>();
public:
using Type1 = typename TypeConverterImpl<T1, T2, kIsArithmetic>::Type1;
using Type2 = typename TypeConverterImpl<T1, T2, kIsArithmetic>::Type2;
};
template <typename T1, typename T2>
using CommonType1 = typename std::add_lvalue_reference<
typename std::add_const<typename TypeConverter<T1, T2>::Type1>::type>::type;
template <typename T1, typename T2>
using CommonType2 = typename std::add_lvalue_reference<
typename std::add_const<typename TypeConverter<T1, T2>::Type2>::type>::type;
// Here, we use SFINAE to check whether T can be converted to std::string
template <typename T>
struct CanToString {
private:
using YesType = uint8_t;
using NoType = uint16_t;
template <typename U>
static YesType Check(decltype(std::cout << std::declval<U>())) {
return 0;
}
template <typename U>
static NoType Check(...) {
return 0;
}
public:
static constexpr bool kValue =
std::is_same<YesType, decltype(Check<T>(std::cout))>::value;
};
template <bool kCanToString /* = true */>
struct BinaryCompareMessageConverter {
template <typename T>
static std::string Convert(const char* expression, const T& value) {
return expression + std::string(":") + string::to_string(value);
}
};
template <>
struct BinaryCompareMessageConverter<false> {
template <typename T>
static const char* Convert(const char* expression, const T& value) {
return expression;
}
};
} // namespace details
#define __PADDLE_BINARY_COMPARE(__VAL1, __VAL2, __CMP, __INV_CMP, ...) \
do { \
auto __val1 = (__VAL1); \
auto __val2 = (__VAL2); \
using __TYPE1__ = decltype(__val1); \
using __TYPE2__ = decltype(__val2); \
using __COMMON_TYPE1__ = \
::paddle::platform::details::CommonType1<__TYPE1__, __TYPE2__>; \
using __COMMON_TYPE2__ = \
::paddle::platform::details::CommonType2<__TYPE1__, __TYPE2__>; \
bool __is_not_error = (static_cast<__COMMON_TYPE1__>(__val1))__CMP( \
static_cast<__COMMON_TYPE2__>(__val2)); \
if (UNLIKELY(!__is_not_error)) { \
constexpr bool __kCanToString__ = \
::paddle::platform::details::CanToString<__TYPE1__>::kValue && \
::paddle::platform::details::CanToString<__TYPE2__>::kValue; \
PADDLE_THROW("Expected %s " #__CMP " %s, but received %s " #__INV_CMP \
" %s.\n%s", \
#__VAL1, #__VAL2, \
::paddle::platform::details::BinaryCompareMessageConverter< \
__kCanToString__>::Convert(#__VAL1, __val1), \
::paddle::platform::details::BinaryCompareMessageConverter< \
__kCanToString__>::Convert(#__VAL2, __val2), \
::paddle::string::Sprintf(__VA_ARGS__)); \
} \
} while (0)
#define PADDLE_ENFORCE_EQ(__VAL0, __VAL1, ...) \
__PADDLE_BINARY_COMPARE(__VAL0, __VAL1, ==, !=, __VA_ARGS__)
#define PADDLE_ENFORCE_NE(__VAL0, __VAL1, ...) \
__PADDLE_BINARY_COMPARE(__VAL0, __VAL1, !=, ==, __VA_ARGS__)
#define PADDLE_ENFORCE_GT(__VAL0, __VAL1, ...) \
__PADDLE_BINARY_COMPARE(__VAL0, __VAL1, >, <=, __VA_ARGS__)
#define PADDLE_ENFORCE_GE(__VAL0, __VAL1, ...) \
__PADDLE_BINARY_COMPARE(__VAL0, __VAL1, >=, <, __VA_ARGS__)
#define PADDLE_ENFORCE_LT(__VAL0, __VAL1, ...) \
__PADDLE_BINARY_COMPARE(__VAL0, __VAL1, <, >=, __VA_ARGS__)
#define PADDLE_ENFORCE_LE(__VAL0, __VAL1, ...) \
__PADDLE_BINARY_COMPARE(__VAL0, __VAL1, <=, >, __VA_ARGS__)
#define __PADDLE_INFERSHAPE_BINARY_COMPARE(__CTX, __VAL1, __VAL2, __CMP, \
__INV_CMP, ...) \
do { \
auto __val1 = (__VAL1); \
auto __val2 = (__VAL2); \
if (!__CTX->IsRuntime()) { \
if (__val1 == -1 || __val2 == -1) { \
break; \
} \
} \
using __TYPE1__ = decltype(__val1); \
using __TYPE2__ = decltype(__val2); \
using __COMMON_TYPE1__ = \
::paddle::platform::details::CommonType1<__TYPE1__, __TYPE2__>; \
using __COMMON_TYPE2__ = \
::paddle::platform::details::CommonType2<__TYPE1__, __TYPE2__>; \
bool __is_not_error = (static_cast<__COMMON_TYPE1__>(__val1))__CMP( \
static_cast<__COMMON_TYPE2__>(__val2)); \
if (UNLIKELY(!__is_not_error)) { \
PADDLE_THROW("Expected %s " #__CMP " %s, but received %s:%s " #__INV_CMP \
" %s:%s.\n%s", \
#__VAL1, #__VAL2, #__VAL1, \
::paddle::string::to_string(__val1), #__VAL2, \
::paddle::string::to_string(__val2), \
::paddle::string::Sprintf(__VA_ARGS__)); \
} \
} while (0)
#define PADDLE_INFERSHAPE_ENFORCE_EQ(__CTX, __VAL0, __VAL1, ...) \
__PADDLE_INFERSHAPE_BINARY_COMPARE(__CTX, __VAL0, __VAL1, ==, !=, __VA_ARGS__)
#define PADDLE_INFERSHAPE_ENFORCE_NE(__CTX, __VAL0, __VAL1, ...) \
__PADDLE_INFERSHAPE_BINARY_COMPARE(__CTX, __VAL0, __VAL1, !=, ==, __VA_ARGS__)
#define PADDLE_INFERSHAPE_ENFORCE_GT(__CTX, __VAL0, __VAL1, ...) \
__PADDLE_INFERSHAPE_BINARY_COMPARE(__CTX, __VAL0, __VAL1, >, <=, __VA_ARGS__)
#define PADDLE_INFERSHAPE_ENFORCE_GE(__CTX, __VAL0, __VAL1, ...) \
__PADDLE_INFERSHAPE_BINARY_COMPARE(__CTX, __VAL0, __VAL1, >=, <, __VA_ARGS__)
#define PADDLE_INFERSHAPE_ENFORCE_LT(__CTX, __VAL0, __VAL1, ...) \
__PADDLE_INFERSHAPE_BINARY_COMPARE(__CTX, __VAL0, __VAL1, <, >=, __VA_ARGS__)
#define PADDLE_INFERSHAPE_ENFORCE_LE(__CTX, __VAL0, __VAL1, ...) \
__PADDLE_INFERSHAPE_BINARY_COMPARE(__CTX, __VAL0, __VAL1, <=, >, __VA_ARGS__)
} // namespace platform } // namespace platform
} // namespace paddle } // namespace paddle
...@@ -87,8 +87,10 @@ TEST(ENFORCE_EQ, EXTRA_MSG_FAIL) { ...@@ -87,8 +87,10 @@ TEST(ENFORCE_EQ, EXTRA_MSG_FAIL) {
} catch (paddle::platform::EnforceNotMet& error) { } catch (paddle::platform::EnforceNotMet& error) {
caught_exception = true; caught_exception = true;
std::string ex_msg = error.what(); std::string ex_msg = error.what();
EXPECT_TRUE(ex_msg.find("Expected a == 1 + 3, but received a:2 != 1 + " EXPECT_TRUE(ex_msg.find("their size not match") != std::string::npos);
"3:4.\ntheir size not match") != std::string::npos); EXPECT_TRUE(
ex_msg.find("Expected a == 1 + 3, but received a:2 != 1 + 3:4.") !=
std::string::npos);
} }
EXPECT_TRUE(caught_exception); EXPECT_TRUE(caught_exception);
} }
......
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
syntax = "proto2";
option optimize_for = LITE_RUNTIME;
package paddle.platform.error;
enum Code {
// Legacy error.
// Error type string: "Error"
LEGACY = 0;
// Client specified an invalid argument.
// Error type string: "InvalidArgumentError"
INVALID_ARGUMENT = 1;
// Some requested entity (e.g., file or directory) was not found.
// Error type string: "NotFoundError"
NOT_FOUND = 2;
// Operation tried to iterate past the valid input range. E.g., seeking or
// reading past end of file.
// Error type string: "OutOfRangeError"
OUT_OF_RANGE = 3;
// Some entity that we attempted to create (e.g., file or directory)
// already exists.
// Error type string: "AlreadyExistsError"
ALREADY_EXISTS = 4;
// Some resource has been exhausted, perhaps a per-user quota, or
// perhaps the entire file system is out of space.
// Error type string: "ResourceExhaustedError"
RESOURCE_EXHAUSTED = 5;
// Operation was rejected because the system is not in a state
// required for the operation's execution.
// Error type string: "PreconditionNotMetError"
PRECONDITION_NOT_MET = 6;
// The caller does not have permission to execute the specified
// operation.
// Error type string: "PermissionDeniedError"
PERMISSION_DENIED = 7;
// Deadline expired before operation could complete.
// Error type string: "ExecutionTimeout"
EXECUTION_TIMEOUT = 8;
// Operation is not implemented or not supported/enabled in this service.
// Error type string: "UnimpelmentedError"
UNIMPLEMENTED = 9;
// The service is currently unavailable. This is a most likely a
// transient condition and may be corrected by retrying with
// a backoff.
// Error type string: "UnavailableError"
UNAVAILABLE = 10;
// Fatal errors. Means some invariant expected by the underlying
// system has been broken. If you see one of these errors,
// something is very broken.
// Error type string: "FatalError"
FATAL = 11;
// Third-party library error.
// Error type string: "ExternalError"
EXTERNAL = 12;
}
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/platform/errors.h"
#include <stdexcept>
namespace paddle {
namespace platform {
typedef ::paddle::platform::error::Code Code;
std::string error_name(Code code) {
switch (code) {
case paddle::platform::error::LEGACY:
return "Error";
break;
case paddle::platform::error::INVALID_ARGUMENT:
return "InvalidArgumentError";
break;
case paddle::platform::error::NOT_FOUND:
return "NotFoundError";
break;
case paddle::platform::error::OUT_OF_RANGE:
return "OutOfRangeError";
break;
case paddle::platform::error::ALREADY_EXISTS:
return "AlreadyExistsError";
break;
case paddle::platform::error::RESOURCE_EXHAUSTED:
return "ResourceExhaustedError";
break;
case paddle::platform::error::PRECONDITION_NOT_MET:
return "PreconditionNotMetError";
break;
case paddle::platform::error::PERMISSION_DENIED:
return "PermissionDeniedError";
break;
case paddle::platform::error::EXECUTION_TIMEOUT:
return "ExecutionTimeoutError";
break;
case paddle::platform::error::UNIMPLEMENTED:
return "UnimplementedError";
break;
case paddle::platform::error::UNAVAILABLE:
return "UnavailableError";
break;
case paddle::platform::error::FATAL:
return "FatalError";
break;
case paddle::platform::error::EXTERNAL:
return "ExternalError";
break;
default:
throw std::invalid_argument("The error type is undefined.");
break;
}
}
std::string ErrorSummary::ToString() const {
std::string result(error_name(code()));
result += ": ";
result += error_message();
return result;
}
} // namespace platform
} // namespace paddle
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <memory>
#include <stdexcept>
#include <string>
#include <tuple>
#include <type_traits>
#include "paddle/fluid/platform/error_codes.pb.h"
#include "paddle/fluid/string/printf.h"
namespace paddle {
namespace platform {
typedef ::paddle::platform::error::Code Code;
class ErrorSummary {
public:
// Note(chenweihang): Final deprecated constructor
// This constructor is only used to be compatible with
// current existing no error message PADDLE_ENFORCE_*
ErrorSummary() {
code_ = paddle::platform::error::LEGACY;
msg_ =
"Paddle internal Check failed. (Please help us create a new issue, "
"here we need to find the developer to add a user friendly error "
"message)";
}
// Note(chenweihang): Final deprecated constructor
// This constructor is used to be compatible with
// current existing untyped PADDLE_ENFORCE_*
// PADDLE_ENFORCE
template <typename... Args>
explicit ErrorSummary(Args... args) {
code_ = paddle::platform::error::LEGACY;
msg_ = paddle::string::Sprintf(args...);
}
// Note(chenweihang): Recommended constructor
explicit ErrorSummary(Code code, std::string msg) : code_(code), msg_(msg) {}
Code code() const { return code_; }
const std::string& error_message() const { return msg_; }
std::string ToString() const;
private:
Code code_;
std::string msg_;
};
namespace errors {
#define REGISTER_ERROR(FUNC, CONST, ...) \
template <typename... Args> \
::paddle::platform::ErrorSummary FUNC(Args... args) { \
return ::paddle::platform::ErrorSummary( \
::paddle::platform::error::CONST, ::paddle::string::Sprintf(args...)); \
}
REGISTER_ERROR(InvalidArgument, INVALID_ARGUMENT)
REGISTER_ERROR(NotFound, NOT_FOUND)
REGISTER_ERROR(OutOfRange, OUT_OF_RANGE)
REGISTER_ERROR(AlreadyExists, ALREADY_EXISTS)
REGISTER_ERROR(ResourceExhausted, RESOURCE_EXHAUSTED)
REGISTER_ERROR(PreconditionNotMet, PRECONDITION_NOT_MET)
REGISTER_ERROR(PermissionDenied, PERMISSION_DENIED)
REGISTER_ERROR(ExecutionTimeout, EXECUTION_TIMEOUT)
REGISTER_ERROR(Unimplemented, UNIMPLEMENTED)
REGISTER_ERROR(Unavailable, UNAVAILABLE)
REGISTER_ERROR(Fatal, FATAL)
REGISTER_ERROR(External, EXTERNAL)
#undef REGISTER_ERROR
} // namespace errors
} // namespace platform
} // namespace paddle
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <functional>
#include <string>
#include "gtest/gtest.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/errors.h"
using namespace paddle::platform::errors; // NOLINT
#define CHECK_PADDLE_THROW(EFUNC) \
do { \
bool caught_exception = false; \
try { \
PADDLE_THROW((EFUNC)("paddle throw test.")); \
} catch (paddle::platform::EnforceNotMet & error) { \
caught_exception = true; \
std::string ex_msg = error.what(); \
EXPECT_TRUE(ex_msg.find(#EFUNC "Error: paddle throw test.") != \
std::string::npos); \
} \
EXPECT_TRUE(caught_exception); \
} while (0)
#define CHECK_PADDLE_ENFORCE(EFUNC) \
do { \
bool caught_exception = false; \
try { \
PADDLE_ENFORCE(false, (EFUNC)("paddle enforce test.")); \
} catch (paddle::platform::EnforceNotMet & error) { \
caught_exception = true; \
std::string ex_msg = error.what(); \
EXPECT_TRUE(ex_msg.find(#EFUNC "Error: paddle enforce test.") != \
std::string::npos); \
} \
EXPECT_TRUE(caught_exception); \
} while (0)
#define CHECK_PADDLE_ENFORCE_NOT_NULL(EFUNC) \
do { \
bool caught_exception = false; \
try { \
PADDLE_ENFORCE_NOT_NULL(nullptr, \
(EFUNC)("paddle enforce not null test.")); \
} catch (paddle::platform::EnforceNotMet & error) { \
caught_exception = true; \
std::string ex_msg = error.what(); \
EXPECT_TRUE( \
ex_msg.find(#EFUNC "Error: paddle enforce not null test.") != \
std::string::npos); \
} \
EXPECT_TRUE(caught_exception); \
} while (0)
#define CHECK_PADDLE_ENFORCE_EQ(EFUNC) \
do { \
bool caught_exception = false; \
try { \
PADDLE_ENFORCE_EQ(1, 2, (EFUNC)("paddle enforce equal test.")); \
} catch (paddle::platform::EnforceNotMet & error) { \
caught_exception = true; \
std::string ex_msg = error.what(); \
EXPECT_TRUE(ex_msg.find(#EFUNC "Error: paddle enforce equal test.") != \
std::string::npos); \
} \
EXPECT_TRUE(caught_exception); \
} while (0)
#define CHECK_ALL_PADDLE_EXCEPTION_MACRO(EFUNC) \
do { \
CHECK_PADDLE_THROW(EFUNC); \
CHECK_PADDLE_ENFORCE(EFUNC); \
CHECK_PADDLE_ENFORCE_NOT_NULL(EFUNC); \
CHECK_PADDLE_ENFORCE_EQ(EFUNC); \
} while (0)
TEST(Errors, InvalidArgument) {
CHECK_ALL_PADDLE_EXCEPTION_MACRO(InvalidArgument);
}
TEST(Errors, NotFound) { CHECK_ALL_PADDLE_EXCEPTION_MACRO(NotFound); }
TEST(Errors, OutOfRange) { CHECK_ALL_PADDLE_EXCEPTION_MACRO(OutOfRange); }
TEST(Errors, AlreadExists) { CHECK_ALL_PADDLE_EXCEPTION_MACRO(AlreadyExists); }
TEST(Errors, ResourceExhausted) {
CHECK_ALL_PADDLE_EXCEPTION_MACRO(ResourceExhausted);
}
TEST(Errors, PreconditionNotMet) {
CHECK_ALL_PADDLE_EXCEPTION_MACRO(PreconditionNotMet);
}
TEST(Errors, PermissionDenied) {
CHECK_ALL_PADDLE_EXCEPTION_MACRO(PermissionDenied);
}
TEST(Errors, ExecutionTimeout) {
CHECK_ALL_PADDLE_EXCEPTION_MACRO(ExecutionTimeout);
}
TEST(Errors, Unimplemented) { CHECK_ALL_PADDLE_EXCEPTION_MACRO(Unimplemented); }
TEST(Errors, Unavailable) { CHECK_ALL_PADDLE_EXCEPTION_MACRO(Unavailable); }
TEST(Errors, Fatal) { CHECK_ALL_PADDLE_EXCEPTION_MACRO(Fatal); }
TEST(Errors, External) { CHECK_ALL_PADDLE_EXCEPTION_MACRO(External); }
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册