未验证 提交 bcddbc78 编写于 作者: T Tao Luo 提交者: GitHub

remove -Wmaybe-uninitialized warning (#19653)

* remove -Wmaybe-uninitialized warning

test=develop

* remove uninitialized op_handle_ in scale_loss_grad_op_handle.cc

test=develop
上级 2db40d9f
...@@ -155,7 +155,6 @@ set(COMMON_FLAGS ...@@ -155,7 +155,6 @@ set(COMMON_FLAGS
-Wno-error=terminate # Warning in PADDLE_ENFORCE -Wno-error=terminate # Warning in PADDLE_ENFORCE
-Wno-error=int-in-bool-context # Warning in Eigen gcc 7.2 -Wno-error=int-in-bool-context # Warning in Eigen gcc 7.2
-Wimplicit-fallthrough=0 # Warning in tinyformat.h -Wimplicit-fallthrough=0 # Warning in tinyformat.h
-Wno-error=maybe-uninitialized # Warning in boost gcc 7.2
${fsanitize} ${fsanitize}
) )
......
...@@ -38,13 +38,11 @@ struct ScaleLossGradFunctor { ...@@ -38,13 +38,11 @@ struct ScaleLossGradFunctor {
float coeff_; float coeff_;
Tensor *out_; Tensor *out_;
platform::Place place_; platform::Place place_;
OpHandleBase *op_handle_;
proto::VarType::Type out_dtype_; proto::VarType::Type out_dtype_;
platform::DeviceContext *ctx_; platform::DeviceContext *ctx_;
ScaleLossGradFunctor(float coeff, Tensor *out, platform::Place place, ScaleLossGradFunctor(float coeff, Tensor *out, platform::Place place,
OpHandleBase *op_handle, proto::VarType::Type dtype, proto::VarType::Type dtype, platform::DeviceContext *ctx)
platform::DeviceContext *ctx)
: coeff_(coeff), out_(out), place_(place), out_dtype_(dtype), ctx_(ctx) {} : coeff_(coeff), out_(out), place_(place), out_dtype_(dtype), ctx_(ctx) {}
template <typename OutT> template <typename OutT>
...@@ -76,11 +74,11 @@ void ScaleLossGradOpHandle::RunImpl() { ...@@ -76,11 +74,11 @@ void ScaleLossGradOpHandle::RunImpl() {
tensor->Resize(make_ddim({1})); tensor->Resize(make_ddim({1}));
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
ScaleLossGradFunctor func(coeff_, tensor, place_, this, out_dtype_, ScaleLossGradFunctor func(coeff_, tensor, place_, out_dtype_,
this->dev_ctxes_.at(place_)); this->dev_ctxes_.at(place_));
this->RunAndRecordEvent([&] { framework::VisitDataType(out_dtype_, func); }); this->RunAndRecordEvent([&] { framework::VisitDataType(out_dtype_, func); });
#else #else
ScaleLossGradFunctor func(coeff_, tensor, place_, this, out_dtype_, nullptr); ScaleLossGradFunctor func(coeff_, tensor, place_, out_dtype_, nullptr);
framework::VisitDataType(out_dtype_, func); framework::VisitDataType(out_dtype_, func);
#endif #endif
} }
......
...@@ -215,7 +215,7 @@ void prefetchs(const std::vector<std::string>& id_var_names, ...@@ -215,7 +215,7 @@ void prefetchs(const std::vector<std::string>& id_var_names,
std::unordered_set<int64_t> s(ids_union.begin(), ids_union.end()); std::unordered_set<int64_t> s(ids_union.begin(), ids_union.end());
ids_union.assign(s.begin(), s.end()); ids_union.assign(s.begin(), s.end());
for (int i; i < table_names.size(); i++) { for (int i = 0; i < table_names.size(); i++) {
tables.push_back(std::make_pair(table_names[i], endpoints[i])); tables.push_back(std::make_pair(table_names[i], endpoints[i]));
} }
......
...@@ -91,7 +91,7 @@ class LinearChainCRFOpKernel : public framework::OpKernel<T> { ...@@ -91,7 +91,7 @@ class LinearChainCRFOpKernel : public framework::OpKernel<T> {
size_t seq_num = 0; size_t seq_num = 0;
size_t batch_size; size_t batch_size;
size_t tag_num; size_t tag_num;
const int64_t* length_data; const int64_t* length_data = nullptr;
framework::Vector<size_t> in_lod; framework::Vector<size_t> in_lod;
if (ctx.HasInput("length")) { if (ctx.HasInput("length")) {
const Tensor* label_length = ctx.Input<framework::Tensor>("length"); const Tensor* label_length = ctx.Input<framework::Tensor>("length");
...@@ -260,7 +260,7 @@ class LinearChainCRFGradOpKernel : public framework::OpKernel<T> { ...@@ -260,7 +260,7 @@ class LinearChainCRFGradOpKernel : public framework::OpKernel<T> {
// getting seq_num using padding or not // getting seq_num using padding or not
size_t seq_num = 0; size_t seq_num = 0;
framework::Vector<size_t> lod; framework::Vector<size_t> lod;
const int64_t* length_data; const int64_t* length_data = nullptr;
if (ctx.HasInput("length")) { if (ctx.HasInput("length")) {
const Tensor* label_length = ctx.Input<framework::Tensor>("length"); const Tensor* label_length = ctx.Input<framework::Tensor>("length");
length_data = label_length->data<int64_t>(); length_data = label_length->data<int64_t>();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册