未验证 提交 974b8a83 编写于 作者: C Chen Weihang 提交者: GitHub

Cherry-pick error type support for release1.6 (#21294)

* delete paddle infershape enforce marco (#20832)

* Polish and arrange code in enforce.h (#20901)

* Enrich the type of error and declare the error type interfaces (#21024)

* Enrich the type of error and declare the error type interfaces, test=develop

* adjust tests to adapt new form, test=develop

* add inference deps with error_codes.pb.h, test=develop

* restore stack iter start pos, test=develop

* polish code based review comments, test=develop

* Add dependency for error_codes.proto (#21084)

* fix activation_functions deps, test=develop, test=document_fix

* add error_codes_proto deps, test=develop, test=document_fix

* try delete enforce.h, test=develop, test=document_fix

* change cuda enforce & add example (#21142)
test=release/1.6
上级 7ab85396
...@@ -223,8 +223,8 @@ set(module "platform") ...@@ -223,8 +223,8 @@ set(module "platform")
set(platform_lib_deps profiler_proto) set(platform_lib_deps profiler_proto)
add_dependencies(fluid_lib_dist ${platform_lib_deps}) add_dependencies(fluid_lib_dist ${platform_lib_deps})
copy(fluid_lib_dist copy(fluid_lib_dist
SRCS ${src_dir}/${module}/*.h ${src_dir}/${module}/dynload/*.h ${src_dir}/${module}/details/*.h ${PADDLE_BINARY_DIR}/paddle/fluid/platform/profiler.pb.h SRCS ${src_dir}/${module}/*.h ${src_dir}/${module}/dynload/*.h ${src_dir}/${module}/details/*.h ${PADDLE_BINARY_DIR}/paddle/fluid/platform/profiler.pb.h ${PADDLE_BINARY_DIR}/paddle/fluid/platform/error_codes.pb.h
DSTS ${dst_dir}/${module} ${dst_dir}/${module}/dynload ${dst_dir}/${module}/details ${dst_dir}/${module} DSTS ${dst_dir}/${module} ${dst_dir}/${module}/dynload ${dst_dir}/${module}/details ${dst_dir}/${module} ${dst_dir}/${module}
) )
set(module "string") set(module "string")
......
...@@ -39,8 +39,8 @@ TEST(Tensor, DataAssert) { ...@@ -39,8 +39,8 @@ TEST(Tensor, DataAssert) {
} catch (platform::EnforceNotMet& err) { } catch (platform::EnforceNotMet& err) {
caught = true; caught = true;
std::string ex_msg = err.what(); std::string ex_msg = err.what();
EXPECT_TRUE(ex_msg.find("holder_ should not be null\nTensor holds no " EXPECT_TRUE(ex_msg.find("holder_ should not be null") != std::string::npos);
"memory. Call " EXPECT_TRUE(ex_msg.find("Tensor holds no memory. Call "
"Tensor::mutable_data first.") != "Tensor::mutable_data first.") !=
std::string::npos); std::string::npos);
} }
...@@ -154,8 +154,9 @@ TEST(Tensor, ShareDataWith) { ...@@ -154,8 +154,9 @@ TEST(Tensor, ShareDataWith) {
} catch (paddle::platform::EnforceNotMet& err) { } catch (paddle::platform::EnforceNotMet& err) {
caught = true; caught = true;
std::string ex_msg = err.what(); std::string ex_msg = err.what();
EXPECT_TRUE(ex_msg.find("holder_ should not be null\nTensor holds no " EXPECT_TRUE(ex_msg.find("holder_ should not be null") !=
"memory. Call " std::string::npos);
EXPECT_TRUE(ex_msg.find("Tensor holds no memory. Call "
"Tensor::mutable_data first.") != "Tensor::mutable_data first.") !=
std::string::npos); std::string::npos);
} }
......
...@@ -81,7 +81,8 @@ class CUDADeviceContextAllocator : public Allocator { ...@@ -81,7 +81,8 @@ class CUDADeviceContextAllocator : public Allocator {
platform::CUDADeviceGuard guard(place_.device); platform::CUDADeviceGuard guard(place_.device);
PADDLE_ENFORCE_CUDA_SUCCESS( PADDLE_ENFORCE_CUDA_SUCCESS(
cudaEventCreate(&event_, cudaEventDisableTiming), cudaEventCreate(&event_, cudaEventDisableTiming),
"Create event failed in CUDADeviceContextAllocator"); platform::errors::External(
"Create event failed in CUDADeviceContextAllocator"));
} }
~CUDADeviceContextAllocator() { ~CUDADeviceContextAllocator() {
......
...@@ -184,31 +184,35 @@ class LinearChainCRFOp : public framework::OperatorWithKernel { ...@@ -184,31 +184,35 @@ class LinearChainCRFOp : public framework::OperatorWithKernel {
true, true,
"The Input(Label) should be a 3-D tensor with last " "The Input(Label) should be a 3-D tensor with last "
"dimension fixed to 1 or a 2-D tensor in padding mode."); "dimension fixed to 1 or a 2-D tensor in padding mode.");
PADDLE_INFERSHAPE_ENFORCE_EQ( if (ctx->IsRuntime()) {
ctx, emission_dims[0], label_dims[0], PADDLE_ENFORCE_EQ(emission_dims[0], label_dims[0],
"The batch size of Input(Emission) and Input(Label) " "The batch size of Input(Emission) and Input(Label) "
"should be the same."); "should be the same.");
PADDLE_INFERSHAPE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(emission_dims[1], label_dims[1],
ctx, emission_dims[1], label_dims[1],
"The max length of Input(Emission) and Input(Label) " "The max length of Input(Emission) and Input(Label) "
"should be the same."); "should be the same.");
}
} else { } else {
PADDLE_ENFORCE_EQ(emission_dims.size(), 2, PADDLE_ENFORCE_EQ(emission_dims.size(), 2,
"The Input(Emission) should be a 2-D tensor."); "The Input(Emission) should be a 2-D tensor.");
PADDLE_INFERSHAPE_ENFORCE_EQ( if (ctx->IsRuntime()) {
ctx, emission_dims[1], transition_dims[1], PADDLE_ENFORCE_EQ(emission_dims[1], transition_dims[1],
"The 2nd dimension of the Input(Emission) and the Input(Transition) " "The 2nd dimension of the Input(Emission) and the "
"Input(Transition) "
"should be equal to the tag number."); "should be equal to the tag number.");
}
auto label_dims = ctx->GetInputDim("Label"); auto label_dims = ctx->GetInputDim("Label");
PADDLE_ENFORCE_EQ(label_dims.size(), 2, PADDLE_ENFORCE_EQ(label_dims.size(), 2,
"The Input(Label) should be a 2-D tensor with the 2nd " "The Input(Label) should be a 2-D tensor with the 2nd "
"dimensions fixed to 1."); "dimensions fixed to 1.");
PADDLE_INFERSHAPE_ENFORCE_EQ( if (ctx->IsRuntime()) {
ctx, emission_dims[0], label_dims[0], PADDLE_ENFORCE_EQ(
emission_dims[0], label_dims[0],
"The height of Input(Emission) and the height of Input(Label) " "The height of Input(Emission) and the height of Input(Label) "
"should be the same."); "should be the same.");
} }
}
ctx->SetOutputDim("Alpha", emission_dims); ctx->SetOutputDim("Alpha", emission_dims);
ctx->SetOutputDim("EmissionExps", emission_dims); ctx->SetOutputDim("EmissionExps", emission_dims);
ctx->SetOutputDim("TransitionExps", transition_dims); ctx->SetOutputDim("TransitionExps", transition_dims);
......
...@@ -14,9 +14,9 @@ limitations under the License. */ ...@@ -14,9 +14,9 @@ limitations under the License. */
#pragma once #pragma once
#include <math.h> #include <math.h>
#include <stdexcept>
#include <string> #include <string>
#include "paddle/fluid/platform/cpu_info.h" #include "paddle/fluid/platform/cpu_info.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/hostdevice.h" #include "paddle/fluid/platform/hostdevice.h"
namespace paddle { namespace paddle {
...@@ -45,7 +45,7 @@ inline ActivationType GetActivationType(const std::string &type) { ...@@ -45,7 +45,7 @@ inline ActivationType GetActivationType(const std::string &type) {
} else if (type == "identity" || type == "") { } else if (type == "identity" || type == "") {
return ActivationType::kIdentity; return ActivationType::kIdentity;
} }
PADDLE_THROW("Not support type %s.", type); throw std::invalid_argument("The input type is not supported");
} }
namespace forward { namespace forward {
......
...@@ -45,19 +45,20 @@ class AccuracyOp : public framework::OperatorWithKernel { ...@@ -45,19 +45,20 @@ class AccuracyOp : public framework::OperatorWithKernel {
"ShapeError: label's dimensions of AccuracyOp must be 2. " "ShapeError: label's dimensions of AccuracyOp must be 2. "
"But received label's dimensions = %d, label's shape = [%s]", "But received label's dimensions = %d, label's shape = [%s]",
label_dim.size(), label_dim); label_dim.size(), label_dim);
PADDLE_INFERSHAPE_ENFORCE_EQ( if (ctx->IsRuntime()) {
ctx, label_dim[1], 1, PADDLE_ENFORCE_EQ(label_dim[1], 1,
"ShapeError: label's second dimension of " "ShapeError: label's second dimension of "
"AccuracyOp must be 1. But received label's " "AccuracyOp must be 1. But received label's "
"second dimension is = %d, label's shape = [%s]", "second dimension is = %d, label's shape = [%s]",
label_dim[1], label_dim); label_dim[1], label_dim);
PADDLE_INFERSHAPE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
ctx, inference_dim[0], label_dim[0], inference_dim[0], label_dim[0],
"ShapeError: the output's num_rows of AccuracyOp must be" "ShapeError: the output's num_rows of AccuracyOp must be"
" the same as label's num_rows. But received output's " " the same as label's num_rows. But received output's "
"shape = [%s], label's shape = [%s], output's num_rows = %d, label's " "shape = [%s], label's shape = [%s], output's num_rows = %d, label's "
"num_rows = %d", "num_rows = %d",
inference_dim, label_dim, inference_dim[0], label_dim[0]); inference_dim, label_dim, inference_dim[0], label_dim[0]);
}
ctx->SetOutputDim("Accuracy", {1}); ctx->SetOutputDim("Accuracy", {1});
ctx->SetOutputDim("Correct", {1}); ctx->SetOutputDim("Correct", {1});
......
...@@ -28,14 +28,18 @@ class AucOp : public framework::OperatorWithKernel { ...@@ -28,14 +28,18 @@ class AucOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE(ctx->HasInput("Label"), PADDLE_ENFORCE(ctx->HasInput("Label"),
"Input of Label should not be null."); "Input of Label should not be null.");
auto predict_width = ctx->GetInputDim("Predict")[1]; auto predict_width = ctx->GetInputDim("Predict")[1];
PADDLE_INFERSHAPE_ENFORCE_LE(ctx, predict_width, 2, if (ctx->IsRuntime()) {
PADDLE_ENFORCE_LE(predict_width, 2,
"Only support binary classification," "Only support binary classification,"
"prediction dims[1] should be 1 or 2"); "prediction dims[1] should be 1 or 2");
}
auto predict_height = ctx->GetInputDim("Predict")[0]; auto predict_height = ctx->GetInputDim("Predict")[0];
auto label_height = ctx->GetInputDim("Label")[0]; auto label_height = ctx->GetInputDim("Label")[0];
PADDLE_INFERSHAPE_ENFORCE_EQ(ctx, predict_height, label_height, if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(predict_height, label_height,
"Out and Label should have same height."); "Out and Label should have same height.");
}
int num_pred_buckets = ctx->Attrs().Get<int>("num_thresholds") + 1; int num_pred_buckets = ctx->Attrs().Get<int>("num_thresholds") + 1;
int slide_steps = ctx->Attrs().Get<int>("slide_steps"); int slide_steps = ctx->Attrs().Get<int>("slide_steps");
......
...@@ -48,32 +48,41 @@ class MulOp : public framework::OperatorWithKernel { ...@@ -48,32 +48,41 @@ class MulOp : public framework::OperatorWithKernel {
<< " y_num_col_dims=" << y_num_col_dims; << " y_num_col_dims=" << y_num_col_dims;
PADDLE_ENFORCE_NE(framework::product(y_dims), 0, PADDLE_ENFORCE_NE(framework::product(y_dims), 0,
platform::errors::PreconditionNotMet(
"Maybe the Input variable Y(%s) has not " "Maybe the Input variable Y(%s) has not "
"been initialized. You may need to confirm " "been initialized. You may need to confirm "
"if you put exe.run(startup_program) " "if you put exe.run(startup_program) "
"after optimizer.minimize function.", "after optimizer.minimize function.",
ctx->Inputs("Y").front()); ctx->Inputs("Y").front()));
PADDLE_ENFORCE_GT(x_dims.size(), x_num_col_dims, PADDLE_ENFORCE_GT(
"ShapeError: The input tensor X's dimensions of MulOp " x_dims.size(), x_num_col_dims,
platform::errors::InvalidArgument(
"The input tensor X's dimensions of MulOp "
"should be larger than x_num_col_dims. But received X's " "should be larger than x_num_col_dims. But received X's "
"dimensions = %d, X's shape = [%s], x_num_col_dims = %d.", "dimensions = %d, X's shape = [%s], x_num_col_dims = %d.",
x_dims.size(), x_dims, x_num_col_dims); x_dims.size(), x_dims, x_num_col_dims));
PADDLE_ENFORCE_GT(y_dims.size(), y_num_col_dims, PADDLE_ENFORCE_GT(
"ShapeError: The input tensor Y's dimensions of MulOp " y_dims.size(), y_num_col_dims,
platform::errors::InvalidArgument(
"The input tensor Y's dimensions of MulOp "
"should be larger than y_num_col_dims. But received Y's " "should be larger than y_num_col_dims. But received Y's "
"dimensions = %d, Y's shape = [%s], y_num_col_dims = %d.", "dimensions = %d, Y's shape = [%s], y_num_col_dims = %d.",
y_dims.size(), y_dims, y_num_col_dims); y_dims.size(), y_dims, y_num_col_dims));
auto x_mat_dims = framework::flatten_to_2d(x_dims, x_num_col_dims); auto x_mat_dims = framework::flatten_to_2d(x_dims, x_num_col_dims);
auto y_mat_dims = framework::flatten_to_2d(y_dims, y_num_col_dims); auto y_mat_dims = framework::flatten_to_2d(y_dims, y_num_col_dims);
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
x_mat_dims[1], y_mat_dims[0], x_mat_dims[1], y_mat_dims[0],
"ShapeError: After flatten the input tensor X and Y to 2-D dimensions " platform::errors::InvalidArgument(
"After flatten the input tensor X and Y to 2-D dimensions "
"matrix X1 and Y1, the matrix X1's width must be equal with matrix " "matrix X1 and Y1, the matrix X1's width must be equal with matrix "
"Y1's height. But received X's shape = [%s], X1's shape = [%s], X1's " "Y1's height. But received X's shape = [%s], X1's shape = [%s], "
"width = %s; Y's shape = [%s], Y1's shape = [%s], Y1's height = %s.", "X1's "
x_dims, x_mat_dims, x_mat_dims[1], y_dims, y_mat_dims, y_mat_dims[0]); "width = %s; Y's shape = [%s], Y1's shape = [%s], Y1's height = "
"%s.",
x_dims, x_mat_dims, x_mat_dims[1], y_dims, y_mat_dims,
y_mat_dims[0]));
std::vector<int64_t> output_dims; std::vector<int64_t> output_dims;
output_dims.reserve( output_dims.reserve(
static_cast<size_t>(x_num_col_dims + y_dims.size() - y_num_col_dims)); static_cast<size_t>(x_num_col_dims + y_dims.size() - y_num_col_dims));
......
...@@ -135,11 +135,13 @@ class SmoothL1LossGradOp : public framework::OperatorWithKernel { ...@@ -135,11 +135,13 @@ class SmoothL1LossGradOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_GE(out_dims.size(), 2, PADDLE_ENFORCE_GE(out_dims.size(), 2,
"The tensor rank of Input(Out@Grad) should be 2."); "The tensor rank of Input(Out@Grad) should be 2.");
PADDLE_INFERSHAPE_ENFORCE_EQ(ctx, out_dims[0], in_dims[0], if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(out_dims[0], in_dims[0],
"The 1st dimension of Input(Out@Grad) must be " "The 1st dimension of Input(Out@Grad) must be "
"same as input."); "same as input.");
PADDLE_INFERSHAPE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(out_dims[1], 1,
ctx, out_dims[1], 1, "The 2nd dimension of Input(Out@Grad) must be 1."); "The 2nd dimension of Input(Out@Grad) must be 1.");
}
auto x_grad_name = framework::GradVarName("X"); auto x_grad_name = framework::GradVarName("X");
auto y_grad_name = framework::GradVarName("Y"); auto y_grad_name = framework::GradVarName("Y");
......
...@@ -137,12 +137,14 @@ class SquaredL2DistanceGradOp : public framework::OperatorWithKernel { ...@@ -137,12 +137,14 @@ class SquaredL2DistanceGradOp : public framework::OperatorWithKernel {
auto out_dims = ctx->GetInputDim(framework::GradVarName("Out")); auto out_dims = ctx->GetInputDim(framework::GradVarName("Out"));
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
auto y_dims = ctx->GetInputDim("Y"); auto y_dims = ctx->GetInputDim("Y");
PADDLE_INFERSHAPE_ENFORCE_EQ(ctx, out_dims[0], x_dims[0], if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(out_dims[0], x_dims[0],
"First dimension of output gradient and " "First dimension of output gradient and "
"input value must be equal."); "input value must be equal.");
PADDLE_INFERSHAPE_ENFORCE_EQ(ctx, out_dims[1], 1, PADDLE_ENFORCE_EQ(out_dims[1], 1,
"Second dimension of output gradient " "Second dimension of output gradient "
"must be 1."); "must be 1.");
}
auto x_grad_name = framework::GradVarName("X"); auto x_grad_name = framework::GradVarName("X");
auto y_grad_name = framework::GradVarName("Y"); auto y_grad_name = framework::GradVarName("Y");
if (ctx->HasOutput(x_grad_name)) ctx->SetOutputDim(x_grad_name, x_dims); if (ctx->HasOutput(x_grad_name)) ctx->SetOutputDim(x_grad_name, x_dims);
......
proto_library(profiler_proto SRCS profiler.proto DEPS framework_proto simple_threadpool) proto_library(profiler_proto SRCS profiler.proto DEPS framework_proto simple_threadpool)
py_proto_compile(profiler_py_proto SRCS profiler.proto) py_proto_compile(profiler_py_proto SRCS profiler.proto)
proto_library(error_codes_proto SRCS error_codes.proto)
add_custom_target(profiler_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch __init__.py) add_custom_target(profiler_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch __init__.py)
add_dependencies(profiler_py_proto profiler_py_proto_init) add_dependencies(profiler_py_proto profiler_py_proto_init)
if (NOT WIN32) if (NOT WIN32)
...@@ -22,10 +22,13 @@ endif(NOT WIN32) ...@@ -22,10 +22,13 @@ endif(NOT WIN32)
cc_library(flags SRCS flags.cc DEPS gflags) cc_library(flags SRCS flags.cc DEPS gflags)
cc_library(errors SRCS errors.cc DEPS error_codes_proto)
cc_test(errors_test SRCS errors_test.cc DEPS errors enforce)
if(WITH_GPU) if(WITH_GPU)
nv_library(enforce SRCS enforce.cc DEPS flags) nv_library(enforce SRCS enforce.cc DEPS flags errors)
else() else()
cc_library(enforce SRCS enforce.cc DEPS flags) cc_library(enforce SRCS enforce.cc DEPS flags errors)
endif() endif()
cc_test(enforce_test SRCS enforce_test.cc DEPS stringpiece enforce) cc_test(enforce_test SRCS enforce_test.cc DEPS stringpiece enforce)
......
此差异已折叠。
...@@ -87,8 +87,10 @@ TEST(ENFORCE_EQ, EXTRA_MSG_FAIL) { ...@@ -87,8 +87,10 @@ TEST(ENFORCE_EQ, EXTRA_MSG_FAIL) {
} catch (paddle::platform::EnforceNotMet& error) { } catch (paddle::platform::EnforceNotMet& error) {
caught_exception = true; caught_exception = true;
std::string ex_msg = error.what(); std::string ex_msg = error.what();
EXPECT_TRUE(ex_msg.find("Expected a == 1 + 3, but received a:2 != 1 + " EXPECT_TRUE(ex_msg.find("their size not match") != std::string::npos);
"3:4.\ntheir size not match") != std::string::npos); EXPECT_TRUE(
ex_msg.find("Expected a == 1 + 3, but received a:2 != 1 + 3:4.") !=
std::string::npos);
} }
EXPECT_TRUE(caught_exception); EXPECT_TRUE(caught_exception);
} }
......
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
syntax = "proto2";
option optimize_for = LITE_RUNTIME;
package paddle.platform.error;
enum Code {
// Legacy error.
// Error type string: "Error"
LEGACY = 0;
// Client specified an invalid argument.
// Error type string: "InvalidArgumentError"
INVALID_ARGUMENT = 1;
// Some requested entity (e.g., file or directory) was not found.
// Error type string: "NotFoundError"
NOT_FOUND = 2;
// Operation tried to iterate past the valid input range. E.g., seeking or
// reading past end of file.
// Error type string: "OutOfRangeError"
OUT_OF_RANGE = 3;
// Some entity that we attempted to create (e.g., file or directory)
// already exists.
// Error type string: "AlreadyExistsError"
ALREADY_EXISTS = 4;
// Some resource has been exhausted, perhaps a per-user quota, or
// perhaps the entire file system is out of space.
// Error type string: "ResourceExhaustedError"
RESOURCE_EXHAUSTED = 5;
// Operation was rejected because the system is not in a state
// required for the operation's execution.
// Error type string: "PreconditionNotMetError"
PRECONDITION_NOT_MET = 6;
// The caller does not have permission to execute the specified
// operation.
// Error type string: "PermissionDeniedError"
PERMISSION_DENIED = 7;
// Deadline expired before operation could complete.
// Error type string: "ExecutionTimeout"
EXECUTION_TIMEOUT = 8;
// Operation is not implemented or not supported/enabled in this service.
// Error type string: "UnimpelmentedError"
UNIMPLEMENTED = 9;
// The service is currently unavailable. This is a most likely a
// transient condition and may be corrected by retrying with
// a backoff.
// Error type string: "UnavailableError"
UNAVAILABLE = 10;
// Fatal errors. Means some invariant expected by the underlying
// system has been broken. If you see one of these errors,
// something is very broken.
// Error type string: "FatalError"
FATAL = 11;
// Third-party library error.
// Error type string: "ExternalError"
EXTERNAL = 12;
}
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/platform/errors.h"
#include <stdexcept>
namespace paddle {
namespace platform {
typedef ::paddle::platform::error::Code Code;
std::string error_name(Code code) {
switch (code) {
case paddle::platform::error::LEGACY:
return "Error";
break;
case paddle::platform::error::INVALID_ARGUMENT:
return "InvalidArgumentError";
break;
case paddle::platform::error::NOT_FOUND:
return "NotFoundError";
break;
case paddle::platform::error::OUT_OF_RANGE:
return "OutOfRangeError";
break;
case paddle::platform::error::ALREADY_EXISTS:
return "AlreadyExistsError";
break;
case paddle::platform::error::RESOURCE_EXHAUSTED:
return "ResourceExhaustedError";
break;
case paddle::platform::error::PRECONDITION_NOT_MET:
return "PreconditionNotMetError";
break;
case paddle::platform::error::PERMISSION_DENIED:
return "PermissionDeniedError";
break;
case paddle::platform::error::EXECUTION_TIMEOUT:
return "ExecutionTimeoutError";
break;
case paddle::platform::error::UNIMPLEMENTED:
return "UnimplementedError";
break;
case paddle::platform::error::UNAVAILABLE:
return "UnavailableError";
break;
case paddle::platform::error::FATAL:
return "FatalError";
break;
case paddle::platform::error::EXTERNAL:
return "ExternalError";
break;
default:
throw std::invalid_argument("The error type is undefined.");
break;
}
}
std::string ErrorSummary::ToString() const {
std::string result(error_name(code()));
result += ": ";
result += error_message();
return result;
}
} // namespace platform
} // namespace paddle
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <memory>
#include <stdexcept>
#include <string>
#include <tuple>
#include <type_traits>
#include "paddle/fluid/platform/error_codes.pb.h"
#include "paddle/fluid/string/printf.h"
namespace paddle {
namespace platform {
typedef ::paddle::platform::error::Code Code;
class ErrorSummary {
public:
// Note(chenweihang): Final deprecated constructor
// This constructor is only used to be compatible with
// current existing no error message PADDLE_ENFORCE_*
ErrorSummary() {
code_ = paddle::platform::error::LEGACY;
msg_ =
"Paddle internal Check failed. (Please help us create a new issue, "
"here we need to find the developer to add a user friendly error "
"message)";
}
// Note(chenweihang): Final deprecated constructor
// This constructor is used to be compatible with
// current existing untyped PADDLE_ENFORCE_*
// PADDLE_ENFORCE
template <typename... Args>
explicit ErrorSummary(Args... args) {
code_ = paddle::platform::error::LEGACY;
msg_ = paddle::string::Sprintf(args...);
}
// Note(chenweihang): Recommended constructor
explicit ErrorSummary(Code code, std::string msg) : code_(code), msg_(msg) {}
Code code() const { return code_; }
const std::string& error_message() const { return msg_; }
std::string ToString() const;
private:
Code code_;
std::string msg_;
};
namespace errors {
#define REGISTER_ERROR(FUNC, CONST, ...) \
template <typename... Args> \
::paddle::platform::ErrorSummary FUNC(Args... args) { \
return ::paddle::platform::ErrorSummary( \
::paddle::platform::error::CONST, ::paddle::string::Sprintf(args...)); \
}
REGISTER_ERROR(InvalidArgument, INVALID_ARGUMENT)
REGISTER_ERROR(NotFound, NOT_FOUND)
REGISTER_ERROR(OutOfRange, OUT_OF_RANGE)
REGISTER_ERROR(AlreadyExists, ALREADY_EXISTS)
REGISTER_ERROR(ResourceExhausted, RESOURCE_EXHAUSTED)
REGISTER_ERROR(PreconditionNotMet, PRECONDITION_NOT_MET)
REGISTER_ERROR(PermissionDenied, PERMISSION_DENIED)
REGISTER_ERROR(ExecutionTimeout, EXECUTION_TIMEOUT)
REGISTER_ERROR(Unimplemented, UNIMPLEMENTED)
REGISTER_ERROR(Unavailable, UNAVAILABLE)
REGISTER_ERROR(Fatal, FATAL)
REGISTER_ERROR(External, EXTERNAL)
#undef REGISTER_ERROR
} // namespace errors
} // namespace platform
} // namespace paddle
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <functional>
#include <string>
#include "gtest/gtest.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/errors.h"
using namespace paddle::platform::errors; // NOLINT
#define CHECK_PADDLE_THROW(EFUNC) \
do { \
bool caught_exception = false; \
try { \
PADDLE_THROW((EFUNC)("paddle throw test.")); \
} catch (paddle::platform::EnforceNotMet & error) { \
caught_exception = true; \
std::string ex_msg = error.what(); \
EXPECT_TRUE(ex_msg.find(#EFUNC "Error: paddle throw test.") != \
std::string::npos); \
} \
EXPECT_TRUE(caught_exception); \
} while (0)
#define CHECK_PADDLE_ENFORCE(EFUNC) \
do { \
bool caught_exception = false; \
try { \
PADDLE_ENFORCE(false, (EFUNC)("paddle enforce test.")); \
} catch (paddle::platform::EnforceNotMet & error) { \
caught_exception = true; \
std::string ex_msg = error.what(); \
EXPECT_TRUE(ex_msg.find(#EFUNC "Error: paddle enforce test.") != \
std::string::npos); \
} \
EXPECT_TRUE(caught_exception); \
} while (0)
#define CHECK_PADDLE_ENFORCE_NOT_NULL(EFUNC) \
do { \
bool caught_exception = false; \
try { \
PADDLE_ENFORCE_NOT_NULL(nullptr, \
(EFUNC)("paddle enforce not null test.")); \
} catch (paddle::platform::EnforceNotMet & error) { \
caught_exception = true; \
std::string ex_msg = error.what(); \
EXPECT_TRUE( \
ex_msg.find(#EFUNC "Error: paddle enforce not null test.") != \
std::string::npos); \
} \
EXPECT_TRUE(caught_exception); \
} while (0)
#define CHECK_PADDLE_ENFORCE_EQ(EFUNC) \
do { \
bool caught_exception = false; \
try { \
PADDLE_ENFORCE_EQ(1, 2, (EFUNC)("paddle enforce equal test.")); \
} catch (paddle::platform::EnforceNotMet & error) { \
caught_exception = true; \
std::string ex_msg = error.what(); \
EXPECT_TRUE(ex_msg.find(#EFUNC "Error: paddle enforce equal test.") != \
std::string::npos); \
} \
EXPECT_TRUE(caught_exception); \
} while (0)
#define CHECK_ALL_PADDLE_EXCEPTION_MACRO(EFUNC) \
do { \
CHECK_PADDLE_THROW(EFUNC); \
CHECK_PADDLE_ENFORCE(EFUNC); \
CHECK_PADDLE_ENFORCE_NOT_NULL(EFUNC); \
CHECK_PADDLE_ENFORCE_EQ(EFUNC); \
} while (0)
TEST(Errors, InvalidArgument) {
CHECK_ALL_PADDLE_EXCEPTION_MACRO(InvalidArgument);
}
TEST(Errors, NotFound) { CHECK_ALL_PADDLE_EXCEPTION_MACRO(NotFound); }
TEST(Errors, OutOfRange) { CHECK_ALL_PADDLE_EXCEPTION_MACRO(OutOfRange); }
TEST(Errors, AlreadExists) { CHECK_ALL_PADDLE_EXCEPTION_MACRO(AlreadyExists); }
TEST(Errors, ResourceExhausted) {
CHECK_ALL_PADDLE_EXCEPTION_MACRO(ResourceExhausted);
}
TEST(Errors, PreconditionNotMet) {
CHECK_ALL_PADDLE_EXCEPTION_MACRO(PreconditionNotMet);
}
TEST(Errors, PermissionDenied) {
CHECK_ALL_PADDLE_EXCEPTION_MACRO(PermissionDenied);
}
TEST(Errors, ExecutionTimeout) {
CHECK_ALL_PADDLE_EXCEPTION_MACRO(ExecutionTimeout);
}
TEST(Errors, Unimplemented) { CHECK_ALL_PADDLE_EXCEPTION_MACRO(Unimplemented); }
TEST(Errors, Unavailable) { CHECK_ALL_PADDLE_EXCEPTION_MACRO(Unavailable); }
TEST(Errors, Fatal) { CHECK_ALL_PADDLE_EXCEPTION_MACRO(Fatal); }
TEST(Errors, External) { CHECK_ALL_PADDLE_EXCEPTION_MACRO(External); }
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册