提交 f812de2c 编写于 作者: L liaogang

ENH: unify PADDLE_ENFORCE

上级 48cf64e8
...@@ -5,7 +5,6 @@ nv_test(dim_test SRCS dim_test.cu DEPS ddim) ...@@ -5,7 +5,6 @@ nv_test(dim_test SRCS dim_test.cu DEPS ddim)
cc_test(tensor_test SRCS tensor_test.cc DEPS ddim) cc_test(tensor_test SRCS tensor_test.cc DEPS ddim)
cc_test(variable_test SRCS variable_test.cc) cc_test(variable_test SRCS variable_test.cc)
cc_test(scope_test SRCS scope_test.cc) cc_test(scope_test SRCS scope_test.cc)
cc_test(enforce_test SRCS enforce_test.cc)
proto_library(attr_type SRCS attr_type.proto) proto_library(attr_type SRCS attr_type.proto)
proto_library(op_proto SRCS op_proto.proto DEPS attr_type) proto_library(op_proto SRCS op_proto.proto DEPS attr_type)
cc_test(op_proto_test SRCS op_proto_test.cc DEPS op_proto protobuf) cc_test(op_proto_test SRCS op_proto_test.cc DEPS op_proto protobuf)
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
#include <string> #include <string>
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
#include "paddle/framework/enforce.h" #include "paddle/platform/enforce.h"
namespace paddle { namespace paddle {
namespace framework { namespace framework {
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <paddle/string/printf.h>
#include <exception>
#include <sstream>
namespace paddle {
namespace framework {
/**
* @brief Enforce exception. Inherits std::exception
*
* All enforce condition not met, will throw an EnforceNotMet exception.
*/
class EnforceNotMet : public std::exception {
public:
EnforceNotMet(const std::string& msg, const char* file, int fileline) {
std::ostringstream sout;
sout << msg << " at [" << file << ":" << fileline << "];";
all_msg_ = sout.str();
}
const char* what() const noexcept override { return all_msg_.c_str(); }
private:
std::string all_msg_;
};
// From https://stackoverflow.com/questions/30130930/
// __buildin_expect is in C++ 11 standard. Since the condition which enforced
// should be true in most situation, it will make the compiler generate faster
// code by adding `UNLIKELY` macro.
#define UNLIKELY(condition) __builtin_expect(static_cast<bool>(condition), 0)
/**
* @brief Throw a EnforceNotMet exception, automatically filled __FILE__ &
* __LINE__
*
* This macro take __VA_ARGS__, user can pass any type if that type can
* serialize to std::ostream
*/
#define PADDLE_THROW(...) \
do { \
throw ::paddle::framework::EnforceNotMet( \
::paddle::string::Sprintf(__VA_ARGS__), __FILE__, __LINE__); \
} while (0)
/**
* @brief Enforce a condition, otherwise throw an EnforceNotMet
*/
#define PADDLE_ENFORCE(condition, ...) \
do { \
if (UNLIKELY(!(condition))) { \
PADDLE_THROW(__VA_ARGS__); \
} \
} while (0)
} // namespace framework
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <paddle/framework/enforce.h>
TEST(ENFORCE, OK) {
PADDLE_ENFORCE(true, "Enforce is ok %d now %f", 123, 0.345);
size_t val = 1;
const size_t limit = 10;
PADDLE_ENFORCE(val < limit, "Enforce is OK too");
}
TEST(ENFORCE, FAILED) {
bool in_catch = false;
try {
PADDLE_ENFORCE(false, "Enforce is not ok %d at all", 123);
} catch (paddle::framework::EnforceNotMet err) {
in_catch = true;
std::string msg = "Enforce is not ok 123 at all";
const char* what = err.what();
for (size_t i = 0; i < msg.length(); ++i) {
ASSERT_EQ(what[i], msg[i]);
}
}
ASSERT_TRUE(in_catch);
}
\ No newline at end of file
...@@ -91,7 +91,7 @@ TEST(OpRegistry, IllegalAttr) { ...@@ -91,7 +91,7 @@ TEST(OpRegistry, IllegalAttr) {
try { try {
paddle::framework::OperatorPtr op __attribute__((unused)) = paddle::framework::OperatorPtr op __attribute__((unused)) =
paddle::framework::OpRegistry::CreateOp(op_desc); paddle::framework::OpRegistry::CreateOp(op_desc);
} catch (paddle::framework::EnforceNotMet err) { } catch (paddle::platform::EnforceNotMet err) {
caught = true; caught = true;
std::string msg = "larger_than check fail"; std::string msg = "larger_than check fail";
const char* err_msg = err.what(); const char* err_msg = err.what();
...@@ -138,7 +138,7 @@ TEST(OpRegistry, CustomChecker) { ...@@ -138,7 +138,7 @@ TEST(OpRegistry, CustomChecker) {
try { try {
paddle::framework::OperatorPtr op __attribute__((unused)) = paddle::framework::OperatorPtr op __attribute__((unused)) =
paddle::framework::OpRegistry::CreateOp(op_desc); paddle::framework::OpRegistry::CreateOp(op_desc);
} catch (paddle::framework::EnforceNotMet err) { } catch (paddle::platform::EnforceNotMet err) {
caught = true; caught = true;
std::string msg = "Attribute 'test_attr' is required!"; std::string msg = "Attribute 'test_attr' is required!";
const char* err_msg = err.what(); const char* err_msg = err.what();
...@@ -157,7 +157,7 @@ TEST(OpRegistry, CustomChecker) { ...@@ -157,7 +157,7 @@ TEST(OpRegistry, CustomChecker) {
try { try {
paddle::framework::OperatorPtr op __attribute__((unused)) = paddle::framework::OperatorPtr op __attribute__((unused)) =
paddle::framework::OpRegistry::CreateOp(op_desc); paddle::framework::OpRegistry::CreateOp(op_desc);
} catch (paddle::framework::EnforceNotMet err) { } catch (paddle::platform::EnforceNotMet err) {
caught = true; caught = true;
std::string msg = "'test_attr' must be even!"; std::string msg = "'test_attr' must be even!";
const char* err_msg = err.what(); const char* err_msg = err.what();
......
...@@ -18,7 +18,7 @@ limitations under the License. */ ...@@ -18,7 +18,7 @@ limitations under the License. */
#include <cstring> #include <cstring>
#include <memory> #include <memory>
#include "paddle/framework/ddim.h" #include "paddle/framework/ddim.h"
#include "paddle/framework/enforce.h" #include "paddle/platform/enforce.h"
#include "paddle/memory/memory.h" #include "paddle/memory/memory.h"
#include "paddle/platform/place.h" #include "paddle/platform/place.h"
......
...@@ -33,7 +33,7 @@ TEST(Tensor, DataAssert) { ...@@ -33,7 +33,7 @@ TEST(Tensor, DataAssert) {
bool caught = false; bool caught = false;
try { try {
src_tensor.data<double>(); src_tensor.data<double>();
} catch (paddle::framework::EnforceNotMet err) { } catch (paddle::platform::EnforceNotMet err) {
caught = true; caught = true;
std::string msg = std::string msg =
"Tenosr holds no memory. Call Tensor::mutable_data first."; "Tenosr holds no memory. Call Tensor::mutable_data first.";
......
...@@ -14,7 +14,7 @@ limitations under the License. */ ...@@ -14,7 +14,7 @@ limitations under the License. */
#include "paddle/memory/detail/system_allocator.h" #include "paddle/memory/detail/system_allocator.h"
#include "paddle/platform/assert.h" #include "paddle/platform/assert.h"
#include "paddle/platform/error.h" #include "paddle/platform/enforce.h"
#include "paddle/platform/gpu_info.h" #include "paddle/platform/gpu_info.h"
#include <stdlib.h> // for malloc and free #include <stdlib.h> // for malloc and free
...@@ -128,8 +128,7 @@ void GPUAllocator::Free(void* p, size_t size, size_t index) { ...@@ -128,8 +128,7 @@ void GPUAllocator::Free(void* p, size_t size, size_t index) {
// process is terminating, in which case we don't care if // process is terminating, in which case we don't care if
// cudaFree succeeds. // cudaFree succeeds.
if (err != cudaErrorCudartUnloading) { if (err != cudaErrorCudartUnloading) {
platform::throw_on_error(err, PADDLE_ENFORCE(err, "cudaFree{Host} failed in GPUAllocator::Free.");
"cudaFree{Host} failed in GPUAllocator::Free.");
} }
} }
......
...@@ -8,6 +8,8 @@ cc_test(place_test SRCS place_test.cc DEPS place glog gflags) ...@@ -8,6 +8,8 @@ cc_test(place_test SRCS place_test.cc DEPS place glog gflags)
add_subdirectory(dynload) add_subdirectory(dynload)
cc_test(enforce_test SRCS enforce_test.cc)
IF(WITH_GPU) IF(WITH_GPU)
set(GPU_CTX_DEPS dynload_cuda dynamic_loader) set(GPU_CTX_DEPS dynload_cuda dynamic_loader)
ELSE() ELSE()
......
...@@ -22,7 +22,6 @@ limitations under the License. */ ...@@ -22,7 +22,6 @@ limitations under the License. */
#endif #endif
#include "gflags/gflags.h" #include "gflags/gflags.h"
#include "paddle/platform/error.h"
DEFINE_double(fraction_of_cpu_memory_to_use, 1, DEFINE_double(fraction_of_cpu_memory_to_use, 1,
"Default use 100% of CPU memory for PaddlePaddle," "Default use 100% of CPU memory for PaddlePaddle,"
......
...@@ -11,7 +11,7 @@ limitations under the License. */ ...@@ -11,7 +11,7 @@ limitations under the License. */
#pragma once #pragma once
#include "paddle/framework/enforce.h" #include "paddle/platform/enforce.h"
#ifndef PADDLE_ONLY_CPU #ifndef PADDLE_ONLY_CPU
#include "paddle/platform/dynload/cublas.h" #include "paddle/platform/dynload/cublas.h"
#include "paddle/platform/dynload/cudnn.h" #include "paddle/platform/dynload/cudnn.h"
...@@ -74,8 +74,7 @@ class CUDADeviceContext : public DeviceContext { ...@@ -74,8 +74,7 @@ class CUDADeviceContext : public DeviceContext {
public: public:
explicit CUDADeviceContext(const GPUPlace gpu_place) : gpu_place_(gpu_place) { explicit CUDADeviceContext(const GPUPlace gpu_place) : gpu_place_(gpu_place) {
GPUPlaceGuard guard(gpu_place_); GPUPlaceGuard guard(gpu_place_);
paddle::platform::throw_on_error(cudaStreamCreate(&stream_), PADDLE_ENFORCE(cudaStreamCreate(&stream_), "cudaStreamCreate failed");
"cudaStreamCreate failed");
eigen_stream_.reset(new Eigen::CudaStreamDevice(&stream_)); eigen_stream_.reset(new Eigen::CudaStreamDevice(&stream_));
eigen_device_.reset(new Eigen::GpuDevice(eigen_stream_.get())); eigen_device_.reset(new Eigen::GpuDevice(eigen_stream_.get()));
} }
...@@ -86,8 +85,8 @@ class CUDADeviceContext : public DeviceContext { ...@@ -86,8 +85,8 @@ class CUDADeviceContext : public DeviceContext {
} }
void Wait() { void Wait() {
paddle::platform::throw_on_error(cudaStreamSynchronize(stream_), PADDLE_ENFORCE(cudaStreamSynchronize(stream_),
"cudaStreamSynchronize failed"); "cudaStreamSynchronize failed");
} }
cudaStream_t stream() { return stream_; } cudaStream_t stream() { return stream_; }
...@@ -97,12 +96,11 @@ class CUDADeviceContext : public DeviceContext { ...@@ -97,12 +96,11 @@ class CUDADeviceContext : public DeviceContext {
cublasHandle_t cublas_handle() { cublasHandle_t cublas_handle() {
if (!blas_handle_) { if (!blas_handle_) {
GPUPlaceGuard guard(gpu_place_); GPUPlaceGuard guard(gpu_place_);
PADDLE_ENFORCE(paddle::platform::dynload::cublasCreate(&blas_handle_) == PADDLE_ENFORCE(paddle::platform::dynload::cublasCreate(&blas_handle_),
CUBLAS_STATUS_SUCCESS,
"cublasCreate failed"); "cublasCreate failed");
PADDLE_ENFORCE(paddle::platform::dynload::cublasSetStream( PADDLE_ENFORCE(
blas_handle_, stream_) == CUBLAS_STATUS_SUCCESS, paddle::platform::dynload::cublasSetStream(blas_handle_, stream_),
"cublasSetStream failed"); "cublasSetStream failed");
} }
return blas_handle_; return blas_handle_;
} }
...@@ -110,12 +108,11 @@ class CUDADeviceContext : public DeviceContext { ...@@ -110,12 +108,11 @@ class CUDADeviceContext : public DeviceContext {
cudnnHandle_t cudnn_handle() { cudnnHandle_t cudnn_handle() {
if (!dnn_handle_) { if (!dnn_handle_) {
GPUPlaceGuard guard(gpu_place_); GPUPlaceGuard guard(gpu_place_);
PADDLE_ENFORCE(paddle::platform::dynload::cudnnCreate(&dnn_handle_) == PADDLE_ENFORCE(paddle::platform::dynload::cudnnCreate(&dnn_handle_),
CUDNN_STATUS_SUCCESS,
"cudnnCreate failed"); "cudnnCreate failed");
PADDLE_ENFORCE(paddle::platform::dynload::cudnnSetStream( PADDLE_ENFORCE(
dnn_handle_, stream_) == CUDNN_STATUS_SUCCESS, paddle::platform::dynload::cudnnSetStream(dnn_handle_, stream_),
"cudnnSetStream failed"); "cudnnSetStream failed");
} }
return dnn_handle_; return dnn_handle_;
} }
...@@ -124,16 +121,15 @@ class CUDADeviceContext : public DeviceContext { ...@@ -124,16 +121,15 @@ class CUDADeviceContext : public DeviceContext {
if (!rand_generator_) { if (!rand_generator_) {
GPUPlaceGuard guard(gpu_place_); GPUPlaceGuard guard(gpu_place_);
PADDLE_ENFORCE(paddle::platform::dynload::curandCreateGenerator( PADDLE_ENFORCE(paddle::platform::dynload::curandCreateGenerator(
&rand_generator_, CURAND_RNG_PSEUDO_DEFAULT) == &rand_generator_, CURAND_RNG_PSEUDO_DEFAULT),
CURAND_STATUS_SUCCESS,
"curandCreateGenerator failed"); "curandCreateGenerator failed");
PADDLE_ENFORCE( PADDLE_ENFORCE(
paddle::platform::dynload::curandSetPseudoRandomGeneratorSeed( paddle::platform::dynload::curandSetPseudoRandomGeneratorSeed(
rand_generator_, random_seed_) == CURAND_STATUS_SUCCESS, rand_generator_, random_seed_),
"curandSetPseudoRandomGeneratorSeed failed"); "curandSetPseudoRandomGeneratorSeed failed");
PADDLE_ENFORCE(paddle::platform::dynload::curandSetStream( PADDLE_ENFORCE(
rand_generator_, stream_) == CURAND_STATUS_SUCCESS, paddle::platform::dynload::curandSetStream(rand_generator_, stream_),
"curandSetStream failed"); "curandSetStream failed");
} }
return rand_generator_; return rand_generator_;
} }
...@@ -141,26 +137,23 @@ class CUDADeviceContext : public DeviceContext { ...@@ -141,26 +137,23 @@ class CUDADeviceContext : public DeviceContext {
~CUDADeviceContext() { ~CUDADeviceContext() {
Wait(); Wait();
if (blas_handle_) { if (blas_handle_) {
PADDLE_ENFORCE(paddle::platform::dynload::cublasDestroy(blas_handle_) == PADDLE_ENFORCE(paddle::platform::dynload::cublasDestroy(blas_handle_),
CUBLAS_STATUS_SUCCESS,
"cublasDestroy failed"); "cublasDestroy failed");
} }
if (dnn_handle_) { if (dnn_handle_) {
PADDLE_ENFORCE(paddle::platform::dynload::cudnnDestroy(dnn_handle_) == PADDLE_ENFORCE(paddle::platform::dynload::cudnnDestroy(dnn_handle_),
CUDNN_STATUS_SUCCESS,
"cudnnDestroy failed"); "cudnnDestroy failed");
} }
if (rand_generator_) { if (rand_generator_) {
PADDLE_ENFORCE(paddle::platform::dynload::curandDestroyGenerator( PADDLE_ENFORCE(
rand_generator_) == CURAND_STATUS_SUCCESS, paddle::platform::dynload::curandDestroyGenerator(rand_generator_),
"curandDestroyGenerator failed"); "curandDestroyGenerator failed");
} }
eigen_stream_.reset(); eigen_stream_.reset();
eigen_device_.reset(); eigen_device_.reset();
paddle::platform::throw_on_error(cudaStreamDestroy(stream_), PADDLE_ENFORCE(cudaStreamDestroy(stream_), "cudaStreamDestroy failed");
"cudaStreamDestroy failed");
} }
private: private:
......
...@@ -19,7 +19,7 @@ limitations under the License. */ ...@@ -19,7 +19,7 @@ limitations under the License. */
#include <string> #include <string>
#include "gflags/gflags.h" #include "gflags/gflags.h"
#include "glog/logging.h" #include "glog/logging.h"
#include "paddle/framework/enforce.h" #include "paddle/platform/enforce.h"
DEFINE_string(cudnn_dir, "", DEFINE_string(cudnn_dir, "",
"Specify path for loading libcudnn.so. For instance, " "Specify path for loading libcudnn.so. For instance, "
......
#pragma once
#include <sstream>
#include <stdexcept>
#include <string>
#ifndef PADDLE_ONLY_CPU
#include <cublas_v2.h>
#include <cudnn.h>
#include <curand.h>
#include <thrust/system/cuda/error.h>
#include <thrust/system_error.h>
#endif // PADDLE_ONLY_CPU
namespace paddle {
namespace platform {
#ifndef PADDLE_ONLY_CPU
inline void throw_on_error(cudaError_t e, const char* message) {
if (e) {
throw thrust::system_error(e, thrust::cuda_category(), message);
}
}
inline void throw_on_error(curandStatus_t stat, const char* message) {
if (stat != CURAND_STATUS_SUCCESS) {
throw thrust::system_error(cudaErrorLaunchFailure, thrust::cuda_category(),
message);
}
}
inline void throw_on_error(cudnnStatus_t stat, const char* message) {
std::stringstream ss;
if (stat == CUDNN_STATUS_SUCCESS) {
return;
} else {
ss << cudnnGetErrorString(stat);
ss << ", " << message;
throw std::runtime_error(ss.str());
}
}
inline void throw_on_error(cublasStatus_t stat, const char* message) {
std::stringstream ss;
if (stat == CUBLAS_STATUS_SUCCESS) {
return;
} else if (stat == CUBLAS_STATUS_NOT_INITIALIZED) {
ss << "CUBLAS: not initialized";
} else if (stat == CUBLAS_STATUS_ALLOC_FAILED) {
ss << "CUBLAS: alloc failed";
} else if (stat == CUBLAS_STATUS_INVALID_VALUE) {
ss << "CUBLAS: invalid value";
} else if (stat == CUBLAS_STATUS_ARCH_MISMATCH) {
ss << "CUBLAS: arch mismatch";
} else if (stat == CUBLAS_STATUS_MAPPING_ERROR) {
ss << "CUBLAS: mapping error";
} else if (stat == CUBLAS_STATUS_EXECUTION_FAILED) {
ss << "CUBLAS: execution failed";
} else if (stat == CUBLAS_STATUS_INTERNAL_ERROR) {
ss << "CUBLAS: internal error";
} else if (stat == CUBLAS_STATUS_NOT_SUPPORTED) {
ss << "CUBLAS: not supported";
} else if (stat == CUBLAS_STATUS_LICENSE_ERROR) {
ss << "CUBLAS: license error";
}
ss << ", " << message;
throw std::runtime_error(ss.str());
}
inline void throw_on_error(cublasStatus_t stat) {
const char* message = "";
throw_on_error(stat, message);
}
#endif // PADDLE_ONLY_CPU
inline void throw_on_error(int stat, const char* message) {
if (stat) {
throw std::runtime_error(message + (", stat = " + std::to_string(stat)));
}
}
} // namespace platform
} // namespace paddle
...@@ -14,7 +14,7 @@ limitations under the License. */ ...@@ -14,7 +14,7 @@ limitations under the License. */
#include "paddle/platform/gpu_info.h" #include "paddle/platform/gpu_info.h"
#include "gflags/gflags.h" #include "gflags/gflags.h"
#include "paddle/platform/error.h" #include "paddle/platform/enforce.h"
DEFINE_double(fraction_of_gpu_memory_to_use, 0.95, DEFINE_double(fraction_of_gpu_memory_to_use, 0.95,
"Default use 95% of GPU memory for PaddlePaddle," "Default use 95% of GPU memory for PaddlePaddle,"
...@@ -25,7 +25,7 @@ namespace platform { ...@@ -25,7 +25,7 @@ namespace platform {
int GetDeviceCount() { int GetDeviceCount() {
int count; int count;
throw_on_error( PADDLE_ENFORCE(
cudaGetDeviceCount(&count), cudaGetDeviceCount(&count),
"cudaGetDeviceCount failed in paddle::platform::GetDeviceCount"); "cudaGetDeviceCount failed in paddle::platform::GetDeviceCount");
return count; return count;
...@@ -33,19 +33,19 @@ int GetDeviceCount() { ...@@ -33,19 +33,19 @@ int GetDeviceCount() {
int GetCurrentDeviceId() { int GetCurrentDeviceId() {
int device_id; int device_id;
throw_on_error( PADDLE_ENFORCE(
cudaGetDevice(&device_id), cudaGetDevice(&device_id),
"cudaGetDevice failed in paddle::platform::GetCurrentDeviceId"); "cudaGetDevice failed in paddle::platform::GetCurrentDeviceId");
return device_id; return device_id;
} }
void SetDeviceId(int id) { void SetDeviceId(int id) {
throw_on_error(cudaSetDevice(id), PADDLE_ENFORCE(cudaSetDevice(id),
"cudaSetDevice failed in paddle::platform::SetDeviceId"); "cudaSetDevice failed in paddle::platform::SetDeviceId");
} }
void GpuMemoryUsage(size_t& available, size_t& total) { void GpuMemoryUsage(size_t& available, size_t& total) {
throw_on_error(cudaMemGetInfo(&available, &total), PADDLE_ENFORCE(cudaMemGetInfo(&available, &total),
"cudaMemGetInfo failed in paddle::platform::GetMemoryUsage"); "cudaMemGetInfo failed in paddle::platform::GetMemoryUsage");
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册