diff --git a/cmake/flags.cmake b/cmake/flags.cmake index 8b03601859ab05c857207076dbdbbed14b7a997c..cfd5e177d921e9b07371cbc3e36bcc849edb59b2 100644 --- a/cmake/flags.cmake +++ b/cmake/flags.cmake @@ -155,7 +155,6 @@ set(COMMON_FLAGS -Wno-error=terminate # Warning in PADDLE_ENFORCE -Wno-error=int-in-bool-context # Warning in Eigen gcc 7.2 -Wimplicit-fallthrough=0 # Warning in tinyformat.h - -Wno-error=maybe-uninitialized # Warning in boost gcc 7.2 ${fsanitize} ) diff --git a/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc b/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc index 7ab216095cade0bef3f188708dcee5d49f26c36f..517dd5ee264d96a11d8b54913a1e388edc95c034 100644 --- a/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc +++ b/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc @@ -38,13 +38,11 @@ struct ScaleLossGradFunctor { float coeff_; Tensor *out_; platform::Place place_; - OpHandleBase *op_handle_; proto::VarType::Type out_dtype_; platform::DeviceContext *ctx_; ScaleLossGradFunctor(float coeff, Tensor *out, platform::Place place, - OpHandleBase *op_handle, proto::VarType::Type dtype, - platform::DeviceContext *ctx) + proto::VarType::Type dtype, platform::DeviceContext *ctx) : coeff_(coeff), out_(out), place_(place), out_dtype_(dtype), ctx_(ctx) {} template @@ -76,11 +74,11 @@ void ScaleLossGradOpHandle::RunImpl() { tensor->Resize(make_ddim({1})); #ifdef PADDLE_WITH_CUDA - ScaleLossGradFunctor func(coeff_, tensor, place_, this, out_dtype_, + ScaleLossGradFunctor func(coeff_, tensor, place_, out_dtype_, this->dev_ctxes_.at(place_)); this->RunAndRecordEvent([&] { framework::VisitDataType(out_dtype_, func); }); #else - ScaleLossGradFunctor func(coeff_, tensor, place_, this, out_dtype_, nullptr); + ScaleLossGradFunctor func(coeff_, tensor, place_, out_dtype_, nullptr); framework::VisitDataType(out_dtype_, func); #endif } diff --git a/paddle/fluid/operators/distributed/parameter_prefetch.cc b/paddle/fluid/operators/distributed/parameter_prefetch.cc index de2c37d8056457c4d973dadc1586cdd4710bee6c..c8b8561d673efca21e72aa31a64214bbe4afd96c 100644 --- a/paddle/fluid/operators/distributed/parameter_prefetch.cc +++ b/paddle/fluid/operators/distributed/parameter_prefetch.cc @@ -215,7 +215,7 @@ void prefetchs(const std::vector& id_var_names, std::unordered_set s(ids_union.begin(), ids_union.end()); ids_union.assign(s.begin(), s.end()); - for (int i; i < table_names.size(); i++) { + for (int i = 0; i < table_names.size(); i++) { tables.push_back(std::make_pair(table_names[i], endpoints[i])); } diff --git a/paddle/fluid/operators/linear_chain_crf_op.h b/paddle/fluid/operators/linear_chain_crf_op.h index dab9aab7072492c7c8b1e2cafb79a93ab97be433..8cd3cdadc91ff302f17f4e0b8b8f104b7f57e0fa 100755 --- a/paddle/fluid/operators/linear_chain_crf_op.h +++ b/paddle/fluid/operators/linear_chain_crf_op.h @@ -91,7 +91,7 @@ class LinearChainCRFOpKernel : public framework::OpKernel { size_t seq_num = 0; size_t batch_size; size_t tag_num; - const int64_t* length_data; + const int64_t* length_data = nullptr; framework::Vector in_lod; if (ctx.HasInput("length")) { const Tensor* label_length = ctx.Input("length"); @@ -260,7 +260,7 @@ class LinearChainCRFGradOpKernel : public framework::OpKernel { // getting seq_num using padding or not size_t seq_num = 0; framework::Vector lod; - const int64_t* length_data; + const int64_t* length_data = nullptr; if (ctx.HasInput("length")) { const Tensor* label_length = ctx.Input("length"); length_data = label_length->data();