未验证 提交 cdb3d279 编写于 作者: Z Zeng Jinle 提交者: GitHub

Fix warn of gcc8 (#21205)

* fix warnings oof gcc 8 compilation, test=develop

* fix boost::bad_get, test=develop

* refine PADDLE_ENFORCE, test=develop
上级 5d821578
......@@ -170,6 +170,8 @@ if(NOT APPLE)
-Wno-error=nonnull-compare # Warning in boost gcc 8.2
-Wno-error=address # Warning in boost gcc 8.2
-Wno-ignored-qualifiers # Warning in boost gcc 8.2
-Wno-ignored-attributes # Warning in Eigen gcc 8.3
-Wno-parentheses # Warning in Eigen gcc 8.3
)
endif()
endif(NOT APPLE)
......
......@@ -23,6 +23,8 @@
#include "paddle/fluid/framework/details/op_handle_base.h"
#include "paddle/fluid/framework/details/var_handle.h"
#include "paddle/fluid/framework/op_desc.h"
#include "paddle/fluid/framework/op_proto_maker.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/platform/place.h"
......@@ -77,6 +79,27 @@ typedef std::vector<std::vector<std::pair<std::string, std::string>>>
GroupParamsAndGrads;
constexpr char kGroupParamsAndDenseGrads[] = "group_params_dense_grads";
inline bool IsOpRole(const OpDesc &op, OpRole role) {
const auto &attrs = op.GetAttrMap();
auto iter = attrs.find(OpProtoAndCheckerMaker::OpRoleAttrName());
if (iter == attrs.end()) return false;
return static_cast<bool>(boost::get<int>(iter->second) &
static_cast<int>(role));
}
inline std::vector<std::string> GetOpRoleVarsOrEmpty(const OpDesc &op) {
const auto &attrs = op.GetAttrMap();
auto iter = attrs.find(OpProtoAndCheckerMaker::OpRoleVarAttrName());
if (iter == attrs.end()) return {};
auto &ret = boost::get<std::vector<std::string>>(iter->second);
PADDLE_ENFORCE_EQ(
ret.size() % 2, 0,
platform::errors::InvalidArgument(
"The size of attribute %s must be an even number, but got %d",
OpProtoAndCheckerMaker::OpRoleVarAttrName(), ret.size()));
return boost::get<std::vector<std::string>>(iter->second);
}
} // namespace details
} // namespace framework
} // namespace paddle
......@@ -32,7 +32,7 @@ std::string OpHandleBase::DebugString() const {
return ss.str();
}
OpHandleBase::~OpHandleBase() {
OpHandleBase::~OpHandleBase() PADDLE_MAY_THROW {
#ifdef PADDLE_WITH_CUDA
for (auto &ev : events_) {
if (ev.second) {
......
......@@ -46,7 +46,7 @@ class OpHandleBase {
node_->WrappedBy(this);
}
virtual ~OpHandleBase();
virtual ~OpHandleBase() PADDLE_MAY_THROW;
std::string DebugString() const;
......
......@@ -31,7 +31,7 @@ class GarbageCollector {
GarbageCollector(const platform::Place &place, size_t max_memory_size);
virtual ~GarbageCollector() = default;
virtual ~GarbageCollector() PADDLE_MAY_THROW {}
virtual void Wait() const {}
......
......@@ -432,26 +432,19 @@ class CoalesceGradTensorPass : public ir::Pass {
details::ParamsAndGrads *params_grads) const {
std::vector<ir::Node *> topo_nodes = ir::TopologySortOperations(graph);
for (auto &node : topo_nodes) {
try {
bool is_bk_op =
static_cast<bool>(boost::get<int>(node->Op()->GetAttr(
OpProtoAndCheckerMaker::OpRoleAttrName())) &
static_cast<int>(OpRole::kBackward));
if (!is_bk_op) continue;
// Currently, we assume that once gradient is generated, it can be
// broadcast, and each gradient is only broadcast once.
auto backward_vars =
boost::get<std::vector<std::string>>(node->Op()->GetNullableAttr(
OpProtoAndCheckerMaker::OpRoleVarAttrName()));
PADDLE_ENFORCE_EQ(backward_vars.size() % 2, static_cast<size_t>(0));
for (size_t i = 0; i < backward_vars.size(); i += 2) {
VLOG(10) << "Trainable parameter: " << backward_vars[i]
<< ", gradient: " << backward_vars[i + 1];
params_grads->emplace_back(std::make_pair(
backward_vars[i] /*param*/, backward_vars[i + 1] /*grad*/));
}
} catch (boost::bad_get &e) {
auto &op_desc = *(node->Op());
bool is_bk_op = details::IsOpRole(op_desc, OpRole::kBackward);
if (!is_bk_op) continue;
// Currently, we assume that once gradient is generated, it can be
// broadcast, and each gradient is only broadcast once.
auto backward_vars = details::GetOpRoleVarsOrEmpty(op_desc);
for (size_t i = 0; i < backward_vars.size(); i += 2) {
VLOG(10) << "Trainable parameter: " << backward_vars[i]
<< ", gradient: " << backward_vars[i + 1];
params_grads->emplace_back(std::make_pair(
backward_vars[i] /*param*/, backward_vars[i + 1] /*grad*/));
}
}
}
......
......@@ -186,27 +186,18 @@ class AllReduceDepsPass : public ir::Pass {
graph.Get<const std::vector<OpDesc*>>(details::kStaleProgramOpDescs);
int order = 0;
for (auto* op_desc : ops) {
try {
bool is_bk_op =
static_cast<bool>(boost::get<int>(op_desc->GetAttr(
OpProtoAndCheckerMaker::OpRoleAttrName())) &
static_cast<int>(OpRole::kBackward));
if (!is_bk_op) continue;
auto backward_vars =
boost::get<std::vector<std::string>>(op_desc->GetNullableAttr(
OpProtoAndCheckerMaker::OpRoleVarAttrName()));
if (backward_vars.empty()) continue;
PADDLE_ENFORCE_EQ(backward_vars.size() % 2, 0);
for (size_t i = 1; i < backward_vars.size(); i += 2) {
vars[order].emplace_back(backward_vars[i]);
VLOG(1) << "get parameter and gradient: " << backward_vars[i - 1]
<< ", " << backward_vars[i];
}
order++;
} catch (boost::bad_get e) {
bool is_bk_op = details::IsOpRole(*op_desc, OpRole::kBackward);
if (!is_bk_op) continue;
auto backward_vars = details::GetOpRoleVarsOrEmpty(*op_desc);
if (backward_vars.empty()) continue;
for (size_t i = 1; i < backward_vars.size(); i += 2) {
vars[order].emplace_back(backward_vars[i]);
VLOG(1) << "get parameter and gradient: " << backward_vars[i - 1]
<< ", " << backward_vars[i];
}
order++;
}
return vars;
}
......
......@@ -172,46 +172,32 @@ class BackWardOpDepsPass : public ir::Pass {
void GetBackWardOpHandles(
ir::Node* node, std::vector<details::OpHandleBase*>* backward_op_handles,
details::ParamsAndGrads* params_grads) const {
try {
bool is_bk_op =
static_cast<bool>(boost::get<int>(node->Op()->GetAttr(
OpProtoAndCheckerMaker::OpRoleAttrName())) &
static_cast<int>(OpRole::kBackward));
if (!is_bk_op) return;
// Currently, we assume that once gradient is generated, it can be
// broadcast, and each gradient is only broadcast once.
auto backward_vars =
boost::get<std::vector<std::string>>(node->Op()->GetNullableAttr(
OpProtoAndCheckerMaker::OpRoleVarAttrName()));
PADDLE_ENFORCE_EQ(backward_vars.size() % 2, static_cast<size_t>(0));
PADDLE_ENFORCE(node->IsWrappedBy<details::OpHandleBase>());
backward_op_handles->emplace_back(
&node->Wrapper<details::OpHandleBase>());
for (size_t i = 0; i < backward_vars.size(); i += 2) {
VLOG(10) << "Trainable parameter: " << backward_vars[i]
<< ", gradient: " << backward_vars[i + 1];
params_grads->emplace_back(std::make_pair(
backward_vars[i] /*param*/, backward_vars[i + 1] /*grad*/));
}
} catch (boost::bad_get e) {
auto& op_desc = *(node->Op());
bool is_bk_op = details::IsOpRole(op_desc, OpRole::kBackward);
if (!is_bk_op) return;
// Currently, we assume that once gradient is generated, it can be
// broadcast, and each gradient is only broadcast once.
auto backward_vars = details::GetOpRoleVarsOrEmpty(op_desc);
PADDLE_ENFORCE_EQ(node->IsWrappedBy<details::OpHandleBase>(), true,
platform::errors::InvalidArgument(
"Node must be wrapped by OpHandleBase"));
backward_op_handles->emplace_back(&node->Wrapper<details::OpHandleBase>());
for (size_t i = 0; i < backward_vars.size(); i += 2) {
VLOG(10) << "Trainable parameter: " << backward_vars[i]
<< ", gradient: " << backward_vars[i + 1];
params_grads->emplace_back(std::make_pair(backward_vars[i] /*param*/,
backward_vars[i + 1] /*grad*/));
}
}
void GetOptimizerOpHandles(
ir::Node* node, std::vector<details::OpHandleBase*>* opt_handles) const {
try {
bool is_opt_op =
static_cast<bool>(boost::get<int>(node->Op()->GetAttr(
OpProtoAndCheckerMaker::OpRoleAttrName())) &
static_cast<int>(OpRole::kOptimize));
if (!is_opt_op) return;
if (details::IsOpRole(*(node->Op()), OpRole::kOptimize)) {
opt_handles->emplace_back(&node->Wrapper<details::OpHandleBase>());
} catch (boost::bad_get e) {
}
}
};
......
......@@ -206,43 +206,35 @@ void MultiDevSSAGraphBuilderBase::ApplyImpl(ir::Graph *graph) const {
// Insert collective ops if nranks > 1
if (!is_forwarding && Get<size_t>(details::kNRanks) > 1) {
try {
bool is_bk_op =
static_cast<bool>(boost::get<int>(node->Op()->GetAttr(
OpProtoAndCheckerMaker::OpRoleAttrName())) &
static_cast<int>(OpRole::kBackward));
// optimize op is already processed in DealWithSpecialOp,
// here we only consider backward op
if (!is_bk_op) continue;
/*
* the op that will generate the gradient of on parameter will have
one attr op_role_var
* to record the parameter and gradient, like:
attrs {
name: "op_role_var"
type: STRINGS
strings: "fc_1.b_0"
strings: "fc_1.b_0@GRAD"
}
*/
// Currently, we assume that once gradient is generated, it can be
// broadcast, and each gradient is only broadcast once.
auto backward_vars =
boost::get<std::vector<std::string>>(node->Op()->GetNullableAttr(
OpProtoAndCheckerMaker::OpRoleVarAttrName()));
PADDLE_ENFORCE_EQ(backward_vars.size() % 2, 0);
for (size_t i = 0; i < backward_vars.size(); i += 2) {
auto &p_name = backward_vars[i];
auto &g_name = backward_vars[i + 1];
VLOG(10) << "Bcast " << g_name << " for parameter " << p_name
<< " op_type " << node->Op()->Type();
if (NeedCollectiveForGrad(g_name, sorted_ops)) {
InsertCollectiveOp(&result, p_name, g_name);
}
auto &op_desc = *(node->Op());
bool is_bk_op = details::IsOpRole(op_desc, OpRole::kBackward);
// optimize op is already processed in DealWithSpecialOp,
// here we only consider backward op
if (!is_bk_op) continue;
/*
* the op that will generate the gradient of on parameter will have
one attr op_role_var
* to record the parameter and gradient, like:
attrs {
name: "op_role_var"
type: STRINGS
strings: "fc_1.b_0"
strings: "fc_1.b_0@GRAD"
}
*/
// Currently, we assume that once gradient is generated, it can be
// broadcast, and each gradient is only broadcast once.
auto backward_vars = details::GetOpRoleVarsOrEmpty(op_desc);
for (size_t i = 0; i < backward_vars.size(); i += 2) {
auto &p_name = backward_vars[i];
auto &g_name = backward_vars[i + 1];
VLOG(10) << "Bcast " << g_name << " for parameter " << p_name
<< " op_type " << node->Op()->Type();
if (NeedCollectiveForGrad(g_name, sorted_ops)) {
InsertCollectiveOp(&result, p_name, g_name);
}
} catch (boost::bad_get &e) {
}
}
}
......@@ -772,15 +764,7 @@ std::vector<ir::Node *> ReduceSSAGraphBuilder::SortForReduceMode(
if (!is_bk_op) continue;
// Currently, we assume that once gradient is generated, it can be
// broadcast, and each gradient is only broadcast once.
std::vector<std::string> backward_vars;
try {
backward_vars =
boost::get<std::vector<std::string>>(node->Op()->GetNullableAttr(
OpProtoAndCheckerMaker::OpRoleVarAttrName()));
} catch (boost::bad_get &e) {
}
PADDLE_ENFORCE_EQ(backward_vars.size() % 2, 0);
auto backward_vars = details::GetOpRoleVarsOrEmpty(*(node->Op()));
for (size_t i = 0; i < backward_vars.size(); i += 2) {
auto &g_name = backward_vars[i + 1];
size_t cur_device_id = GetAppropriateDeviceID({g_name});
......
......@@ -429,7 +429,7 @@ TEST(IndicateVarDataTypeTest, lodtensor) {
bool caught = false;
try {
op->Run(scope, cpu_place);
} catch (paddle::platform::EnforceNotMet err) {
} catch (paddle::platform::EnforceNotMet& err) {
caught = true;
std::string ex_msg = err.what();
EXPECT_TRUE(
......@@ -457,7 +457,7 @@ TEST(IndicateVarDataTypeTest, selectedrows) {
bool caught = false;
try {
op->Run(scope, cpu_place);
} catch (paddle::platform::EnforceNotMet err) {
} catch (paddle::platform::EnforceNotMet& err) {
caught = true;
std::string ex_msg = err.what();
EXPECT_TRUE(
......@@ -484,7 +484,7 @@ TEST(IndicateVarDataTypeTest, other) {
bool caught = false;
try {
op->Run(scope, cpu_place);
} catch (paddle::platform::EnforceNotMet err) {
} catch (paddle::platform::EnforceNotMet& err) {
caught = true;
std::string ex_msg = err.what();
EXPECT_TRUE(ex_msg.find("The Input Variable(Other) of "
......@@ -580,7 +580,7 @@ void SetGetLoDLevelTestMain(std::string op_type) {
"kernel.";
try {
op->Run(scope, place);
} catch (paddle::platform::EnforceNotMet err) {
} catch (paddle::platform::EnforceNotMet& err) {
caught = true;
std::string ex_msg = err.what();
EXPECT_TRUE(ex_msg.find(err_str) != std::string::npos);
......
......@@ -67,7 +67,7 @@ class Variable {
private:
struct Placeholder {
virtual ~Placeholder() = default;
virtual ~Placeholder() PADDLE_MAY_THROW {}
inline int Type() const { return type_; }
inline const void* Ptr() const { return ptr_; }
......
......@@ -39,7 +39,7 @@ class CublasHandleHolder {
#endif
}
~CublasHandleHolder() {
~CublasHandleHolder() PADDLE_MAY_THROW {
PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cublasDestroy(handle_));
}
......
......@@ -226,7 +226,7 @@ class ScopedTensorDescriptor {
ScopedTensorDescriptor() {
PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnCreateTensorDescriptor(&desc_));
}
~ScopedTensorDescriptor() {
~ScopedTensorDescriptor() PADDLE_MAY_THROW {
PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnDestroyTensorDescriptor(desc_));
}
......@@ -287,7 +287,7 @@ class ScopedFilterDescriptor {
ScopedFilterDescriptor() {
PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnCreateFilterDescriptor(&desc_));
}
~ScopedFilterDescriptor() {
~ScopedFilterDescriptor() PADDLE_MAY_THROW {
PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnDestroyFilterDescriptor(desc_));
}
......@@ -329,7 +329,7 @@ class ScopedConvolutionDescriptor {
PADDLE_ENFORCE_CUDA_SUCCESS(
dynload::cudnnCreateConvolutionDescriptor(&desc_));
}
~ScopedConvolutionDescriptor() {
~ScopedConvolutionDescriptor() PADDLE_MAY_THROW {
PADDLE_ENFORCE_CUDA_SUCCESS(
dynload::cudnnDestroyConvolutionDescriptor(desc_));
}
......@@ -377,7 +377,7 @@ class ScopedPoolingDescriptor {
ScopedPoolingDescriptor() {
PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnCreatePoolingDescriptor(&desc_));
}
~ScopedPoolingDescriptor() {
~ScopedPoolingDescriptor() PADDLE_MAY_THROW {
PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnDestroyPoolingDescriptor(desc_));
}
......@@ -405,7 +405,7 @@ class ScopedSpatialTransformerDescriptor {
PADDLE_ENFORCE_CUDA_SUCCESS(
dynload::cudnnCreateSpatialTransformerDescriptor(&desc_));
}
~ScopedSpatialTransformerDescriptor() {
~ScopedSpatialTransformerDescriptor() PADDLE_MAY_THROW {
PADDLE_ENFORCE_CUDA_SUCCESS(
dynload::cudnnDestroySpatialTransformerDescriptor(desc_));
}
......@@ -429,7 +429,7 @@ class ScopedActivationDescriptor {
PADDLE_ENFORCE_CUDA_SUCCESS(
dynload::cudnnCreateActivationDescriptor(&desc_));
}
~ScopedActivationDescriptor() {
~ScopedActivationDescriptor() PADDLE_MAY_THROW {
PADDLE_ENFORCE_CUDA_SUCCESS(
dynload::cudnnDestroyActivationDescriptor(desc_));
}
......@@ -495,7 +495,7 @@ class ScopedCTCLossDescriptor {
ScopedCTCLossDescriptor() {
PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnCreateCTCLossDescriptor(&desc_));
}
~ScopedCTCLossDescriptor() {
~ScopedCTCLossDescriptor() PADDLE_MAY_THROW {
PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnDestroyCTCLossDescriptor(desc_));
}
......
......@@ -46,7 +46,7 @@ namespace platform {
class DeviceContext {
public:
virtual ~DeviceContext() {}
virtual ~DeviceContext() PADDLE_MAY_THROW {}
virtual Place GetPlace() const = 0;
virtual void Wait() const {}
......
......@@ -58,6 +58,10 @@ namespace platform {
/** HELPER MACROS AND FUNCTIONS **/
#ifndef PADDLE_MAY_THROW
#define PADDLE_MAY_THROW noexcept(false)
#endif
// Because most enforce conditions would evaluate to true, we can use
// __builtin_expect to instruct the C++ compiler to generate code that
// always forces branch prediction of true.
......
......@@ -66,7 +66,7 @@ class NCCLGroupGuard {
PADDLE_ENFORCE_CUDA_SUCCESS(dynload::ncclGroupStart());
}
inline ~NCCLGroupGuard() {
inline ~NCCLGroupGuard() PADDLE_MAY_THROW {
PADDLE_ENFORCE_CUDA_SUCCESS(dynload::ncclGroupEnd());
NCCLMutex().unlock();
}
......@@ -179,7 +179,7 @@ inline std::string GetHierarchicalInterNCCLVarName(size_t pos) {
class NCCLCommunicator {
public:
NCCLCommunicator() {}
virtual ~NCCLCommunicator() {}
virtual ~NCCLCommunicator() PADDLE_MAY_THROW {}
NCCLContextMap *DefaultFlatCtx() const {
if (flat_ctxs_.size() == 0) {
......
......@@ -300,7 +300,7 @@ if (WITH_MKLDNN)
endif()
if (WITH_TESTING)
set_property(TEST test_parallel_executor_mnist PROPERTY ENVIRONMENT GLOG_vmodule=scope_buffered_ssa_graph_executor=5)
set_property(TEST test_parallel_executor_mnist PROPERTY ENVIRONMENT GLOG_vmodule=all_reduce_deps_pass=10)
endif()
set_tests_properties(test_parallel_executor_test_while_train test_parallel_executor_mnist
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册