未验证 提交 cdb3d279 编写于 作者: Z Zeng Jinle 提交者: GitHub

Fix warn of gcc8 (#21205)

* fix warnings oof gcc 8 compilation, test=develop

* fix boost::bad_get, test=develop

* refine PADDLE_ENFORCE, test=develop
上级 5d821578
...@@ -170,6 +170,8 @@ if(NOT APPLE) ...@@ -170,6 +170,8 @@ if(NOT APPLE)
-Wno-error=nonnull-compare # Warning in boost gcc 8.2 -Wno-error=nonnull-compare # Warning in boost gcc 8.2
-Wno-error=address # Warning in boost gcc 8.2 -Wno-error=address # Warning in boost gcc 8.2
-Wno-ignored-qualifiers # Warning in boost gcc 8.2 -Wno-ignored-qualifiers # Warning in boost gcc 8.2
-Wno-ignored-attributes # Warning in Eigen gcc 8.3
-Wno-parentheses # Warning in Eigen gcc 8.3
) )
endif() endif()
endif(NOT APPLE) endif(NOT APPLE)
......
...@@ -23,6 +23,8 @@ ...@@ -23,6 +23,8 @@
#include "paddle/fluid/framework/details/op_handle_base.h" #include "paddle/fluid/framework/details/op_handle_base.h"
#include "paddle/fluid/framework/details/var_handle.h" #include "paddle/fluid/framework/details/var_handle.h"
#include "paddle/fluid/framework/op_desc.h"
#include "paddle/fluid/framework/op_proto_maker.h"
#include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/place.h"
...@@ -77,6 +79,27 @@ typedef std::vector<std::vector<std::pair<std::string, std::string>>> ...@@ -77,6 +79,27 @@ typedef std::vector<std::vector<std::pair<std::string, std::string>>>
GroupParamsAndGrads; GroupParamsAndGrads;
constexpr char kGroupParamsAndDenseGrads[] = "group_params_dense_grads"; constexpr char kGroupParamsAndDenseGrads[] = "group_params_dense_grads";
inline bool IsOpRole(const OpDesc &op, OpRole role) {
const auto &attrs = op.GetAttrMap();
auto iter = attrs.find(OpProtoAndCheckerMaker::OpRoleAttrName());
if (iter == attrs.end()) return false;
return static_cast<bool>(boost::get<int>(iter->second) &
static_cast<int>(role));
}
inline std::vector<std::string> GetOpRoleVarsOrEmpty(const OpDesc &op) {
const auto &attrs = op.GetAttrMap();
auto iter = attrs.find(OpProtoAndCheckerMaker::OpRoleVarAttrName());
if (iter == attrs.end()) return {};
auto &ret = boost::get<std::vector<std::string>>(iter->second);
PADDLE_ENFORCE_EQ(
ret.size() % 2, 0,
platform::errors::InvalidArgument(
"The size of attribute %s must be an even number, but got %d",
OpProtoAndCheckerMaker::OpRoleVarAttrName(), ret.size()));
return boost::get<std::vector<std::string>>(iter->second);
}
} // namespace details } // namespace details
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -32,7 +32,7 @@ std::string OpHandleBase::DebugString() const { ...@@ -32,7 +32,7 @@ std::string OpHandleBase::DebugString() const {
return ss.str(); return ss.str();
} }
OpHandleBase::~OpHandleBase() { OpHandleBase::~OpHandleBase() PADDLE_MAY_THROW {
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
for (auto &ev : events_) { for (auto &ev : events_) {
if (ev.second) { if (ev.second) {
......
...@@ -46,7 +46,7 @@ class OpHandleBase { ...@@ -46,7 +46,7 @@ class OpHandleBase {
node_->WrappedBy(this); node_->WrappedBy(this);
} }
virtual ~OpHandleBase(); virtual ~OpHandleBase() PADDLE_MAY_THROW;
std::string DebugString() const; std::string DebugString() const;
......
...@@ -31,7 +31,7 @@ class GarbageCollector { ...@@ -31,7 +31,7 @@ class GarbageCollector {
GarbageCollector(const platform::Place &place, size_t max_memory_size); GarbageCollector(const platform::Place &place, size_t max_memory_size);
virtual ~GarbageCollector() = default; virtual ~GarbageCollector() PADDLE_MAY_THROW {}
virtual void Wait() const {} virtual void Wait() const {}
......
...@@ -432,26 +432,19 @@ class CoalesceGradTensorPass : public ir::Pass { ...@@ -432,26 +432,19 @@ class CoalesceGradTensorPass : public ir::Pass {
details::ParamsAndGrads *params_grads) const { details::ParamsAndGrads *params_grads) const {
std::vector<ir::Node *> topo_nodes = ir::TopologySortOperations(graph); std::vector<ir::Node *> topo_nodes = ir::TopologySortOperations(graph);
for (auto &node : topo_nodes) { for (auto &node : topo_nodes) {
try { auto &op_desc = *(node->Op());
bool is_bk_op =
static_cast<bool>(boost::get<int>(node->Op()->GetAttr( bool is_bk_op = details::IsOpRole(op_desc, OpRole::kBackward);
OpProtoAndCheckerMaker::OpRoleAttrName())) & if (!is_bk_op) continue;
static_cast<int>(OpRole::kBackward)); // Currently, we assume that once gradient is generated, it can be
if (!is_bk_op) continue; // broadcast, and each gradient is only broadcast once.
// Currently, we assume that once gradient is generated, it can be auto backward_vars = details::GetOpRoleVarsOrEmpty(op_desc);
// broadcast, and each gradient is only broadcast once. for (size_t i = 0; i < backward_vars.size(); i += 2) {
auto backward_vars = VLOG(10) << "Trainable parameter: " << backward_vars[i]
boost::get<std::vector<std::string>>(node->Op()->GetNullableAttr( << ", gradient: " << backward_vars[i + 1];
OpProtoAndCheckerMaker::OpRoleVarAttrName()));
PADDLE_ENFORCE_EQ(backward_vars.size() % 2, static_cast<size_t>(0)); params_grads->emplace_back(std::make_pair(
for (size_t i = 0; i < backward_vars.size(); i += 2) { backward_vars[i] /*param*/, backward_vars[i + 1] /*grad*/));
VLOG(10) << "Trainable parameter: " << backward_vars[i]
<< ", gradient: " << backward_vars[i + 1];
params_grads->emplace_back(std::make_pair(
backward_vars[i] /*param*/, backward_vars[i + 1] /*grad*/));
}
} catch (boost::bad_get &e) {
} }
} }
} }
......
...@@ -186,27 +186,18 @@ class AllReduceDepsPass : public ir::Pass { ...@@ -186,27 +186,18 @@ class AllReduceDepsPass : public ir::Pass {
graph.Get<const std::vector<OpDesc*>>(details::kStaleProgramOpDescs); graph.Get<const std::vector<OpDesc*>>(details::kStaleProgramOpDescs);
int order = 0; int order = 0;
for (auto* op_desc : ops) { for (auto* op_desc : ops) {
try { bool is_bk_op = details::IsOpRole(*op_desc, OpRole::kBackward);
bool is_bk_op = if (!is_bk_op) continue;
static_cast<bool>(boost::get<int>(op_desc->GetAttr(
OpProtoAndCheckerMaker::OpRoleAttrName())) & auto backward_vars = details::GetOpRoleVarsOrEmpty(*op_desc);
static_cast<int>(OpRole::kBackward)); if (backward_vars.empty()) continue;
if (!is_bk_op) continue;
for (size_t i = 1; i < backward_vars.size(); i += 2) {
auto backward_vars = vars[order].emplace_back(backward_vars[i]);
boost::get<std::vector<std::string>>(op_desc->GetNullableAttr( VLOG(1) << "get parameter and gradient: " << backward_vars[i - 1]
OpProtoAndCheckerMaker::OpRoleVarAttrName())); << ", " << backward_vars[i];
if (backward_vars.empty()) continue;
PADDLE_ENFORCE_EQ(backward_vars.size() % 2, 0);
for (size_t i = 1; i < backward_vars.size(); i += 2) {
vars[order].emplace_back(backward_vars[i]);
VLOG(1) << "get parameter and gradient: " << backward_vars[i - 1]
<< ", " << backward_vars[i];
}
order++;
} catch (boost::bad_get e) {
} }
order++;
} }
return vars; return vars;
} }
......
...@@ -172,46 +172,32 @@ class BackWardOpDepsPass : public ir::Pass { ...@@ -172,46 +172,32 @@ class BackWardOpDepsPass : public ir::Pass {
void GetBackWardOpHandles( void GetBackWardOpHandles(
ir::Node* node, std::vector<details::OpHandleBase*>* backward_op_handles, ir::Node* node, std::vector<details::OpHandleBase*>* backward_op_handles,
details::ParamsAndGrads* params_grads) const { details::ParamsAndGrads* params_grads) const {
try { auto& op_desc = *(node->Op());
bool is_bk_op = bool is_bk_op = details::IsOpRole(op_desc, OpRole::kBackward);
static_cast<bool>(boost::get<int>(node->Op()->GetAttr( if (!is_bk_op) return;
OpProtoAndCheckerMaker::OpRoleAttrName())) &
static_cast<int>(OpRole::kBackward)); // Currently, we assume that once gradient is generated, it can be
if (!is_bk_op) return; // broadcast, and each gradient is only broadcast once.
auto backward_vars = details::GetOpRoleVarsOrEmpty(op_desc);
// Currently, we assume that once gradient is generated, it can be PADDLE_ENFORCE_EQ(node->IsWrappedBy<details::OpHandleBase>(), true,
// broadcast, and each gradient is only broadcast once. platform::errors::InvalidArgument(
auto backward_vars = "Node must be wrapped by OpHandleBase"));
boost::get<std::vector<std::string>>(node->Op()->GetNullableAttr(
OpProtoAndCheckerMaker::OpRoleVarAttrName())); backward_op_handles->emplace_back(&node->Wrapper<details::OpHandleBase>());
PADDLE_ENFORCE_EQ(backward_vars.size() % 2, static_cast<size_t>(0));
PADDLE_ENFORCE(node->IsWrappedBy<details::OpHandleBase>()); for (size_t i = 0; i < backward_vars.size(); i += 2) {
VLOG(10) << "Trainable parameter: " << backward_vars[i]
backward_op_handles->emplace_back( << ", gradient: " << backward_vars[i + 1];
&node->Wrapper<details::OpHandleBase>());
params_grads->emplace_back(std::make_pair(backward_vars[i] /*param*/,
for (size_t i = 0; i < backward_vars.size(); i += 2) { backward_vars[i + 1] /*grad*/));
VLOG(10) << "Trainable parameter: " << backward_vars[i]
<< ", gradient: " << backward_vars[i + 1];
params_grads->emplace_back(std::make_pair(
backward_vars[i] /*param*/, backward_vars[i + 1] /*grad*/));
}
} catch (boost::bad_get e) {
} }
} }
void GetOptimizerOpHandles( void GetOptimizerOpHandles(
ir::Node* node, std::vector<details::OpHandleBase*>* opt_handles) const { ir::Node* node, std::vector<details::OpHandleBase*>* opt_handles) const {
try { if (details::IsOpRole(*(node->Op()), OpRole::kOptimize)) {
bool is_opt_op =
static_cast<bool>(boost::get<int>(node->Op()->GetAttr(
OpProtoAndCheckerMaker::OpRoleAttrName())) &
static_cast<int>(OpRole::kOptimize));
if (!is_opt_op) return;
opt_handles->emplace_back(&node->Wrapper<details::OpHandleBase>()); opt_handles->emplace_back(&node->Wrapper<details::OpHandleBase>());
} catch (boost::bad_get e) {
} }
} }
}; };
......
...@@ -206,43 +206,35 @@ void MultiDevSSAGraphBuilderBase::ApplyImpl(ir::Graph *graph) const { ...@@ -206,43 +206,35 @@ void MultiDevSSAGraphBuilderBase::ApplyImpl(ir::Graph *graph) const {
// Insert collective ops if nranks > 1 // Insert collective ops if nranks > 1
if (!is_forwarding && Get<size_t>(details::kNRanks) > 1) { if (!is_forwarding && Get<size_t>(details::kNRanks) > 1) {
try { auto &op_desc = *(node->Op());
bool is_bk_op = bool is_bk_op = details::IsOpRole(op_desc, OpRole::kBackward);
static_cast<bool>(boost::get<int>(node->Op()->GetAttr( // optimize op is already processed in DealWithSpecialOp,
OpProtoAndCheckerMaker::OpRoleAttrName())) & // here we only consider backward op
static_cast<int>(OpRole::kBackward)); if (!is_bk_op) continue;
// optimize op is already processed in DealWithSpecialOp,
// here we only consider backward op /*
if (!is_bk_op) continue; * the op that will generate the gradient of on parameter will have
one attr op_role_var
/* * to record the parameter and gradient, like:
* the op that will generate the gradient of on parameter will have attrs {
one attr op_role_var name: "op_role_var"
* to record the parameter and gradient, like: type: STRINGS
attrs { strings: "fc_1.b_0"
name: "op_role_var" strings: "fc_1.b_0@GRAD"
type: STRINGS }
strings: "fc_1.b_0" */
strings: "fc_1.b_0@GRAD"
} // Currently, we assume that once gradient is generated, it can be
*/ // broadcast, and each gradient is only broadcast once.
auto backward_vars = details::GetOpRoleVarsOrEmpty(op_desc);
// Currently, we assume that once gradient is generated, it can be for (size_t i = 0; i < backward_vars.size(); i += 2) {
// broadcast, and each gradient is only broadcast once. auto &p_name = backward_vars[i];
auto backward_vars = auto &g_name = backward_vars[i + 1];
boost::get<std::vector<std::string>>(node->Op()->GetNullableAttr( VLOG(10) << "Bcast " << g_name << " for parameter " << p_name
OpProtoAndCheckerMaker::OpRoleVarAttrName())); << " op_type " << node->Op()->Type();
PADDLE_ENFORCE_EQ(backward_vars.size() % 2, 0); if (NeedCollectiveForGrad(g_name, sorted_ops)) {
for (size_t i = 0; i < backward_vars.size(); i += 2) { InsertCollectiveOp(&result, p_name, g_name);
auto &p_name = backward_vars[i];
auto &g_name = backward_vars[i + 1];
VLOG(10) << "Bcast " << g_name << " for parameter " << p_name
<< " op_type " << node->Op()->Type();
if (NeedCollectiveForGrad(g_name, sorted_ops)) {
InsertCollectiveOp(&result, p_name, g_name);
}
} }
} catch (boost::bad_get &e) {
} }
} }
} }
...@@ -772,15 +764,7 @@ std::vector<ir::Node *> ReduceSSAGraphBuilder::SortForReduceMode( ...@@ -772,15 +764,7 @@ std::vector<ir::Node *> ReduceSSAGraphBuilder::SortForReduceMode(
if (!is_bk_op) continue; if (!is_bk_op) continue;
// Currently, we assume that once gradient is generated, it can be // Currently, we assume that once gradient is generated, it can be
// broadcast, and each gradient is only broadcast once. // broadcast, and each gradient is only broadcast once.
std::vector<std::string> backward_vars; auto backward_vars = details::GetOpRoleVarsOrEmpty(*(node->Op()));
try {
backward_vars =
boost::get<std::vector<std::string>>(node->Op()->GetNullableAttr(
OpProtoAndCheckerMaker::OpRoleVarAttrName()));
} catch (boost::bad_get &e) {
}
PADDLE_ENFORCE_EQ(backward_vars.size() % 2, 0);
for (size_t i = 0; i < backward_vars.size(); i += 2) { for (size_t i = 0; i < backward_vars.size(); i += 2) {
auto &g_name = backward_vars[i + 1]; auto &g_name = backward_vars[i + 1];
size_t cur_device_id = GetAppropriateDeviceID({g_name}); size_t cur_device_id = GetAppropriateDeviceID({g_name});
......
...@@ -429,7 +429,7 @@ TEST(IndicateVarDataTypeTest, lodtensor) { ...@@ -429,7 +429,7 @@ TEST(IndicateVarDataTypeTest, lodtensor) {
bool caught = false; bool caught = false;
try { try {
op->Run(scope, cpu_place); op->Run(scope, cpu_place);
} catch (paddle::platform::EnforceNotMet err) { } catch (paddle::platform::EnforceNotMet& err) {
caught = true; caught = true;
std::string ex_msg = err.what(); std::string ex_msg = err.what();
EXPECT_TRUE( EXPECT_TRUE(
...@@ -457,7 +457,7 @@ TEST(IndicateVarDataTypeTest, selectedrows) { ...@@ -457,7 +457,7 @@ TEST(IndicateVarDataTypeTest, selectedrows) {
bool caught = false; bool caught = false;
try { try {
op->Run(scope, cpu_place); op->Run(scope, cpu_place);
} catch (paddle::platform::EnforceNotMet err) { } catch (paddle::platform::EnforceNotMet& err) {
caught = true; caught = true;
std::string ex_msg = err.what(); std::string ex_msg = err.what();
EXPECT_TRUE( EXPECT_TRUE(
...@@ -484,7 +484,7 @@ TEST(IndicateVarDataTypeTest, other) { ...@@ -484,7 +484,7 @@ TEST(IndicateVarDataTypeTest, other) {
bool caught = false; bool caught = false;
try { try {
op->Run(scope, cpu_place); op->Run(scope, cpu_place);
} catch (paddle::platform::EnforceNotMet err) { } catch (paddle::platform::EnforceNotMet& err) {
caught = true; caught = true;
std::string ex_msg = err.what(); std::string ex_msg = err.what();
EXPECT_TRUE(ex_msg.find("The Input Variable(Other) of " EXPECT_TRUE(ex_msg.find("The Input Variable(Other) of "
...@@ -580,7 +580,7 @@ void SetGetLoDLevelTestMain(std::string op_type) { ...@@ -580,7 +580,7 @@ void SetGetLoDLevelTestMain(std::string op_type) {
"kernel."; "kernel.";
try { try {
op->Run(scope, place); op->Run(scope, place);
} catch (paddle::platform::EnforceNotMet err) { } catch (paddle::platform::EnforceNotMet& err) {
caught = true; caught = true;
std::string ex_msg = err.what(); std::string ex_msg = err.what();
EXPECT_TRUE(ex_msg.find(err_str) != std::string::npos); EXPECT_TRUE(ex_msg.find(err_str) != std::string::npos);
......
...@@ -67,7 +67,7 @@ class Variable { ...@@ -67,7 +67,7 @@ class Variable {
private: private:
struct Placeholder { struct Placeholder {
virtual ~Placeholder() = default; virtual ~Placeholder() PADDLE_MAY_THROW {}
inline int Type() const { return type_; } inline int Type() const { return type_; }
inline const void* Ptr() const { return ptr_; } inline const void* Ptr() const { return ptr_; }
......
...@@ -39,7 +39,7 @@ class CublasHandleHolder { ...@@ -39,7 +39,7 @@ class CublasHandleHolder {
#endif #endif
} }
~CublasHandleHolder() { ~CublasHandleHolder() PADDLE_MAY_THROW {
PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cublasDestroy(handle_)); PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cublasDestroy(handle_));
} }
......
...@@ -226,7 +226,7 @@ class ScopedTensorDescriptor { ...@@ -226,7 +226,7 @@ class ScopedTensorDescriptor {
ScopedTensorDescriptor() { ScopedTensorDescriptor() {
PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnCreateTensorDescriptor(&desc_)); PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnCreateTensorDescriptor(&desc_));
} }
~ScopedTensorDescriptor() { ~ScopedTensorDescriptor() PADDLE_MAY_THROW {
PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnDestroyTensorDescriptor(desc_)); PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnDestroyTensorDescriptor(desc_));
} }
...@@ -287,7 +287,7 @@ class ScopedFilterDescriptor { ...@@ -287,7 +287,7 @@ class ScopedFilterDescriptor {
ScopedFilterDescriptor() { ScopedFilterDescriptor() {
PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnCreateFilterDescriptor(&desc_)); PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnCreateFilterDescriptor(&desc_));
} }
~ScopedFilterDescriptor() { ~ScopedFilterDescriptor() PADDLE_MAY_THROW {
PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnDestroyFilterDescriptor(desc_)); PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnDestroyFilterDescriptor(desc_));
} }
...@@ -329,7 +329,7 @@ class ScopedConvolutionDescriptor { ...@@ -329,7 +329,7 @@ class ScopedConvolutionDescriptor {
PADDLE_ENFORCE_CUDA_SUCCESS( PADDLE_ENFORCE_CUDA_SUCCESS(
dynload::cudnnCreateConvolutionDescriptor(&desc_)); dynload::cudnnCreateConvolutionDescriptor(&desc_));
} }
~ScopedConvolutionDescriptor() { ~ScopedConvolutionDescriptor() PADDLE_MAY_THROW {
PADDLE_ENFORCE_CUDA_SUCCESS( PADDLE_ENFORCE_CUDA_SUCCESS(
dynload::cudnnDestroyConvolutionDescriptor(desc_)); dynload::cudnnDestroyConvolutionDescriptor(desc_));
} }
...@@ -377,7 +377,7 @@ class ScopedPoolingDescriptor { ...@@ -377,7 +377,7 @@ class ScopedPoolingDescriptor {
ScopedPoolingDescriptor() { ScopedPoolingDescriptor() {
PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnCreatePoolingDescriptor(&desc_)); PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnCreatePoolingDescriptor(&desc_));
} }
~ScopedPoolingDescriptor() { ~ScopedPoolingDescriptor() PADDLE_MAY_THROW {
PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnDestroyPoolingDescriptor(desc_)); PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnDestroyPoolingDescriptor(desc_));
} }
...@@ -405,7 +405,7 @@ class ScopedSpatialTransformerDescriptor { ...@@ -405,7 +405,7 @@ class ScopedSpatialTransformerDescriptor {
PADDLE_ENFORCE_CUDA_SUCCESS( PADDLE_ENFORCE_CUDA_SUCCESS(
dynload::cudnnCreateSpatialTransformerDescriptor(&desc_)); dynload::cudnnCreateSpatialTransformerDescriptor(&desc_));
} }
~ScopedSpatialTransformerDescriptor() { ~ScopedSpatialTransformerDescriptor() PADDLE_MAY_THROW {
PADDLE_ENFORCE_CUDA_SUCCESS( PADDLE_ENFORCE_CUDA_SUCCESS(
dynload::cudnnDestroySpatialTransformerDescriptor(desc_)); dynload::cudnnDestroySpatialTransformerDescriptor(desc_));
} }
...@@ -429,7 +429,7 @@ class ScopedActivationDescriptor { ...@@ -429,7 +429,7 @@ class ScopedActivationDescriptor {
PADDLE_ENFORCE_CUDA_SUCCESS( PADDLE_ENFORCE_CUDA_SUCCESS(
dynload::cudnnCreateActivationDescriptor(&desc_)); dynload::cudnnCreateActivationDescriptor(&desc_));
} }
~ScopedActivationDescriptor() { ~ScopedActivationDescriptor() PADDLE_MAY_THROW {
PADDLE_ENFORCE_CUDA_SUCCESS( PADDLE_ENFORCE_CUDA_SUCCESS(
dynload::cudnnDestroyActivationDescriptor(desc_)); dynload::cudnnDestroyActivationDescriptor(desc_));
} }
...@@ -495,7 +495,7 @@ class ScopedCTCLossDescriptor { ...@@ -495,7 +495,7 @@ class ScopedCTCLossDescriptor {
ScopedCTCLossDescriptor() { ScopedCTCLossDescriptor() {
PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnCreateCTCLossDescriptor(&desc_)); PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnCreateCTCLossDescriptor(&desc_));
} }
~ScopedCTCLossDescriptor() { ~ScopedCTCLossDescriptor() PADDLE_MAY_THROW {
PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnDestroyCTCLossDescriptor(desc_)); PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnDestroyCTCLossDescriptor(desc_));
} }
......
...@@ -46,7 +46,7 @@ namespace platform { ...@@ -46,7 +46,7 @@ namespace platform {
class DeviceContext { class DeviceContext {
public: public:
virtual ~DeviceContext() {} virtual ~DeviceContext() PADDLE_MAY_THROW {}
virtual Place GetPlace() const = 0; virtual Place GetPlace() const = 0;
virtual void Wait() const {} virtual void Wait() const {}
......
...@@ -58,6 +58,10 @@ namespace platform { ...@@ -58,6 +58,10 @@ namespace platform {
/** HELPER MACROS AND FUNCTIONS **/ /** HELPER MACROS AND FUNCTIONS **/
#ifndef PADDLE_MAY_THROW
#define PADDLE_MAY_THROW noexcept(false)
#endif
// Because most enforce conditions would evaluate to true, we can use // Because most enforce conditions would evaluate to true, we can use
// __builtin_expect to instruct the C++ compiler to generate code that // __builtin_expect to instruct the C++ compiler to generate code that
// always forces branch prediction of true. // always forces branch prediction of true.
......
...@@ -66,7 +66,7 @@ class NCCLGroupGuard { ...@@ -66,7 +66,7 @@ class NCCLGroupGuard {
PADDLE_ENFORCE_CUDA_SUCCESS(dynload::ncclGroupStart()); PADDLE_ENFORCE_CUDA_SUCCESS(dynload::ncclGroupStart());
} }
inline ~NCCLGroupGuard() { inline ~NCCLGroupGuard() PADDLE_MAY_THROW {
PADDLE_ENFORCE_CUDA_SUCCESS(dynload::ncclGroupEnd()); PADDLE_ENFORCE_CUDA_SUCCESS(dynload::ncclGroupEnd());
NCCLMutex().unlock(); NCCLMutex().unlock();
} }
...@@ -179,7 +179,7 @@ inline std::string GetHierarchicalInterNCCLVarName(size_t pos) { ...@@ -179,7 +179,7 @@ inline std::string GetHierarchicalInterNCCLVarName(size_t pos) {
class NCCLCommunicator { class NCCLCommunicator {
public: public:
NCCLCommunicator() {} NCCLCommunicator() {}
virtual ~NCCLCommunicator() {} virtual ~NCCLCommunicator() PADDLE_MAY_THROW {}
NCCLContextMap *DefaultFlatCtx() const { NCCLContextMap *DefaultFlatCtx() const {
if (flat_ctxs_.size() == 0) { if (flat_ctxs_.size() == 0) {
......
...@@ -300,7 +300,7 @@ if (WITH_MKLDNN) ...@@ -300,7 +300,7 @@ if (WITH_MKLDNN)
endif() endif()
if (WITH_TESTING) if (WITH_TESTING)
set_property(TEST test_parallel_executor_mnist PROPERTY ENVIRONMENT GLOG_vmodule=scope_buffered_ssa_graph_executor=5) set_property(TEST test_parallel_executor_mnist PROPERTY ENVIRONMENT GLOG_vmodule=all_reduce_deps_pass=10)
endif() endif()
set_tests_properties(test_parallel_executor_test_while_train test_parallel_executor_mnist set_tests_properties(test_parallel_executor_test_while_train test_parallel_executor_mnist
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册