未验证 提交 bcddbc78 编写于 作者: T Tao Luo 提交者: GitHub

remove -Wmaybe-uninitialized warning (#19653)

* remove -Wmaybe-uninitialized warning

test=develop

* remove uninitialized op_handle_ in scale_loss_grad_op_handle.cc

test=develop
上级 2db40d9f
......@@ -155,7 +155,6 @@ set(COMMON_FLAGS
-Wno-error=terminate # Warning in PADDLE_ENFORCE
-Wno-error=int-in-bool-context # Warning in Eigen gcc 7.2
-Wimplicit-fallthrough=0 # Warning in tinyformat.h
-Wno-error=maybe-uninitialized # Warning in boost gcc 7.2
${fsanitize}
)
......
......@@ -38,13 +38,11 @@ struct ScaleLossGradFunctor {
float coeff_;
Tensor *out_;
platform::Place place_;
OpHandleBase *op_handle_;
proto::VarType::Type out_dtype_;
platform::DeviceContext *ctx_;
ScaleLossGradFunctor(float coeff, Tensor *out, platform::Place place,
OpHandleBase *op_handle, proto::VarType::Type dtype,
platform::DeviceContext *ctx)
proto::VarType::Type dtype, platform::DeviceContext *ctx)
: coeff_(coeff), out_(out), place_(place), out_dtype_(dtype), ctx_(ctx) {}
template <typename OutT>
......@@ -76,11 +74,11 @@ void ScaleLossGradOpHandle::RunImpl() {
tensor->Resize(make_ddim({1}));
#ifdef PADDLE_WITH_CUDA
ScaleLossGradFunctor func(coeff_, tensor, place_, this, out_dtype_,
ScaleLossGradFunctor func(coeff_, tensor, place_, out_dtype_,
this->dev_ctxes_.at(place_));
this->RunAndRecordEvent([&] { framework::VisitDataType(out_dtype_, func); });
#else
ScaleLossGradFunctor func(coeff_, tensor, place_, this, out_dtype_, nullptr);
ScaleLossGradFunctor func(coeff_, tensor, place_, out_dtype_, nullptr);
framework::VisitDataType(out_dtype_, func);
#endif
}
......
......@@ -215,7 +215,7 @@ void prefetchs(const std::vector<std::string>& id_var_names,
std::unordered_set<int64_t> s(ids_union.begin(), ids_union.end());
ids_union.assign(s.begin(), s.end());
for (int i; i < table_names.size(); i++) {
for (int i = 0; i < table_names.size(); i++) {
tables.push_back(std::make_pair(table_names[i], endpoints[i]));
}
......
......@@ -91,7 +91,7 @@ class LinearChainCRFOpKernel : public framework::OpKernel<T> {
size_t seq_num = 0;
size_t batch_size;
size_t tag_num;
const int64_t* length_data;
const int64_t* length_data = nullptr;
framework::Vector<size_t> in_lod;
if (ctx.HasInput("length")) {
const Tensor* label_length = ctx.Input<framework::Tensor>("length");
......@@ -260,7 +260,7 @@ class LinearChainCRFGradOpKernel : public framework::OpKernel<T> {
// getting seq_num using padding or not
size_t seq_num = 0;
framework::Vector<size_t> lod;
const int64_t* length_data;
const int64_t* length_data = nullptr;
if (ctx.HasInput("length")) {
const Tensor* label_length = ctx.Input<framework::Tensor>("length");
length_data = label_length->data<int64_t>();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册