未验证 提交 c0656dcb 编写于 作者: T Tao Luo 提交者: GitHub

remove -Wno-error=sign-compare, make warning as error (#21358)

* remove -Wno-error=sign-compare, make warning as error

test=develop test=document_fix

* fix exist compile warning

test=develop
上级 1840c165
......@@ -148,7 +148,6 @@ set(COMMON_FLAGS
-Wno-unused-parameter
-Wno-unused-function
-Wno-error=literal-suffix
-Wno-error=sign-compare
-Wno-error=unused-local-typedefs
-Wno-error=parentheses-equality # Warnings in pybind11
-Wno-error=ignored-attributes # Warnings in Eigen, gcc 6.3
......@@ -183,7 +182,6 @@ set(GPU_COMMON_FLAGS
-Wdelete-non-virtual-dtor
-Wno-unused-parameter
-Wno-unused-function
-Wno-error=sign-compare
-Wno-error=literal-suffix
-Wno-error=unused-local-typedefs
-Wno-error=unused-function # Warnings in Numpy Header.
......
......@@ -391,7 +391,7 @@ void MultiSlotDataFeed::Init(
use_slots_is_dense_.push_back(slot.is_dense());
std::vector<int> local_shape;
if (slot.is_dense()) {
for (size_t j = 0; j < slot.shape_size(); ++j) {
for (int j = 0; j < slot.shape_size(); ++j) {
if (slot.shape(j) > 0) {
total_dims_without_inductive_[i] *= slot.shape(j);
}
......@@ -400,7 +400,7 @@ void MultiSlotDataFeed::Init(
}
}
}
for (size_t j = 0; j < slot.shape_size(); ++j) {
for (int j = 0; j < slot.shape_size(); ++j) {
local_shape.push_back(slot.shape(j));
}
use_slots_shape_.push_back(local_shape);
......@@ -736,7 +736,7 @@ void MultiSlotInMemoryDataFeed::Init(
use_slots_is_dense_.push_back(slot.is_dense());
std::vector<int> local_shape;
if (slot.is_dense()) {
for (size_t j = 0; j < slot.shape_size(); ++j) {
for (int j = 0; j < slot.shape_size(); ++j) {
if (slot.shape(j) > 0) {
total_dims_without_inductive_[i] *= slot.shape(j);
}
......@@ -745,7 +745,7 @@ void MultiSlotInMemoryDataFeed::Init(
}
}
}
for (size_t j = 0; j < slot.shape_size(); ++j) {
for (int j = 0; j < slot.shape_size(); ++j) {
local_shape.push_back(slot.shape(j));
}
use_slots_shape_.push_back(local_shape);
......
......@@ -638,7 +638,7 @@ void MultiSlotDataset::MergeByInsId() {
}
auto multi_slot_desc = data_feed_desc_.multi_slot_desc();
std::vector<std::string> use_slots;
for (size_t i = 0; i < multi_slot_desc.slots_size(); ++i) {
for (int i = 0; i < multi_slot_desc.slots_size(); ++i) {
const auto& slot = multi_slot_desc.slots(i);
if (slot.is_used()) {
use_slots.push_back(slot.name());
......@@ -828,7 +828,7 @@ void MultiSlotDataset::SlotsShuffle(
timeline.Start();
auto multi_slot_desc = data_feed_desc_.multi_slot_desc();
std::set<uint16_t> index_slots;
for (size_t i = 0; i < multi_slot_desc.slots_size(); ++i) {
for (int i = 0; i < multi_slot_desc.slots_size(); ++i) {
std::string cur_slot = multi_slot_desc.slots(i).name();
if (slots_to_replace.find(cur_slot) != slots_to_replace.end()) {
index_slots.insert(i);
......
......@@ -215,7 +215,7 @@ class DatasetImpl : public Dataset {
bool merge_by_insid_;
bool parse_ins_id_;
bool parse_content_;
int merge_size_;
size_t merge_size_;
bool slots_shuffle_fea_eval_ = false;
int preload_thread_num_;
std::mutex global_index_mutex_;
......
......@@ -152,7 +152,7 @@ void DistMultiTrainer::Finalize() {
for (auto &th : threads_) {
th.join();
}
for (int i = 0; i < need_merge_var_names_.size(); i++) {
for (size_t i = 0; i < need_merge_var_names_.size(); i++) {
Variable *root_var = root_scope_->FindVar(need_merge_var_names_[i]);
if (root_var == nullptr) {
continue;
......
......@@ -180,7 +180,7 @@ std::pair<int64_t, int64_t> GetTensorBound(LoDTensor* tensor, int index) {
}
}
bool CheckValidOutput(LoDTensor* tensor, int batch_size) {
bool CheckValidOutput(LoDTensor* tensor, size_t batch_size) {
auto& dims = tensor->dims();
if (dims.size() != 2) return false;
if (tensor->lod().size() != 0) {
......@@ -189,7 +189,7 @@ bool CheckValidOutput(LoDTensor* tensor, int batch_size) {
return false;
}
} else {
if (dims[0] != batch_size) {
if (dims[0] != static_cast<int>(batch_size)) {
return false;
}
}
......@@ -329,7 +329,8 @@ void DownpourWorker::FillSparseValue(size_t table_idx) {
}
memcpy(ptr + table.emb_dim() * index, fea_value[fea_idx].data(),
sizeof(float) * table.emb_dim());
if (is_nid && index == tensor->lod()[0][nid_ins_index]) {
if (is_nid &&
static_cast<size_t>(index) == tensor->lod()[0][nid_ins_index]) {
nid_show_.push_back(fea_value[fea_idx][0]);
++nid_ins_index;
}
......@@ -346,7 +347,8 @@ void DownpourWorker::FillSparseValue(size_t table_idx) {
}
memcpy(ptr + table.emb_dim() * index, fea_value[fea_idx].data() + 2,
sizeof(float) * table.emb_dim());
if (is_nid && index == tensor->lod()[0][nid_ins_index]) {
if (is_nid &&
static_cast<size_t>(index) == tensor->lod()[0][nid_ins_index]) {
nid_show_.push_back(fea_value[fea_idx][0]);
++nid_ins_index;
}
......@@ -402,7 +404,7 @@ void DownpourWorker::AdjustInsWeight() {
int64_t nid_adjw_num = 0;
double nid_adjw_weight = 0.0;
size_t ins_index = 0;
for (int i = 0; i < len; ++i) {
for (size_t i = 0; i < len; ++i) {
float nid_show = nid_show_[i];
VLOG(3) << "nid_show " << nid_show;
if (nid_show < 0) {
......@@ -970,7 +972,7 @@ void DownpourWorker::TrainFiles() {
}
}
if (need_dump_field_) {
int batch_size = device_reader_->GetCurBatchSize();
size_t batch_size = device_reader_->GetCurBatchSize();
std::vector<std::string> ars(batch_size);
for (auto& ar : ars) {
ar.clear();
......@@ -990,7 +992,7 @@ void DownpourWorker::TrainFiles() {
if (!CheckValidOutput(tensor, batch_size)) {
continue;
}
for (int i = 0; i < batch_size; ++i) {
for (size_t i = 0; i < batch_size; ++i) {
auto output_dim = tensor->dims()[1];
std::string output_dimstr =
boost::lexical_cast<std::string>(output_dim);
......
......@@ -32,8 +32,8 @@ void PullDenseWorker::Initialize(const TrainerDesc& param) {
threshold_ = param_.threshold();
thread_num_ = param_.device_num();
sleep_time_ms_ = param_.sleep_time_ms();
for (size_t i = 0;
i < dwp_param_.program_config(0).pull_dense_table_id_size(); ++i) {
for (int i = 0; i < dwp_param_.program_config(0).pull_dense_table_id_size();
++i) {
uint64_t tid = static_cast<uint64_t>(
dwp_param_.program_config(0).pull_dense_table_id(i));
TableParameter table;
......@@ -67,7 +67,7 @@ void PullDenseWorker::Wait(std::vector<::std::future<int32_t>>* status_vec) {
}
}
int MAX_FAIL_NUM = 20;
size_t MAX_FAIL_NUM = 20;
if (pull_dense_fail_times_ > MAX_FAIL_NUM) {
LOG(FATAL) << "Pull Dense Failed Times More Than " << MAX_FAIL_NUM
<< " Times";
......@@ -85,8 +85,8 @@ void PullDenseWorker::Stop() {
void PullDenseWorker::PullDense(bool force_update) {
pull_dense_status_.resize(0);
for (size_t i = 0;
i < dwp_param_.program_config(0).pull_dense_table_id_size(); ++i) {
for (int i = 0; i < dwp_param_.program_config(0).pull_dense_table_id_size();
++i) {
uint64_t tid = static_cast<uint64_t>(
dwp_param_.program_config(0).pull_dense_table_id(i));
if (force_update || CheckUpdateParam(tid)) {
......@@ -127,7 +127,8 @@ bool PullDenseWorker::CheckUpdateParam(uint64_t table_id) {
auto& version = training_versions_[table_id];
current_version_[table_id] =
*(std::min_element(version.begin(), version.end()));
if (current_version_[table_id] - last_versions_[table_id] < threshold_) {
if (current_version_[table_id] - last_versions_[table_id] <
static_cast<size_t>(threshold_)) {
return false;
}
return true;
......
......@@ -69,7 +69,7 @@ TEST(test_tracer, test_trace_op) {
mul_attr_map["use_mkldnn"] = false;
tracer.TraceOp("mul", ins, outs, mul_attr_map, place, true);
const auto& out_tensor = vout->Var().Get<framework::LoDTensor>();
for (size_t i = 0; i < vout->Var().Get<framework::LoDTensor>().numel(); i++) {
for (int i = 0; i < vout->Var().Get<framework::LoDTensor>().numel(); i++) {
ASSERT_EQ(out_tensor.data<float>()[i], 20.0);
}
}
......@@ -108,7 +108,7 @@ TEST(test_tracer, test_trace_op_with_backward) {
mul_attr_map["use_mkldnn"] = false;
tracer.TraceOp("mul", ins, outs, mul_attr_map, place, true);
const auto& out_tensor = vout->Var().Get<framework::LoDTensor>();
for (size_t i = 0; i < vout->Var().Get<framework::LoDTensor>().numel(); i++) {
for (int i = 0; i < vout->Var().Get<framework::LoDTensor>().numel(); i++) {
ASSERT_EQ(out_tensor.data<float>()[i], 20.0);
}
}
......@@ -239,14 +239,14 @@ TEST(test_tracer, test_trace_op_with_multi_device_inputs) {
framework::LoDTensor rlt;
framework::TensorCopySync(vout->Var().Get<framework::LoDTensor>(), place,
&rlt);
for (size_t i = 0; i < rlt.numel(); i++) {
for (int i = 0; i < rlt.numel(); i++) {
ASSERT_EQ(rlt.data<float>()[i], 4.0);
}
framework::LoDTensor out_grad;
framework::TensorCopySync(vout->GradVar().Get<framework::LoDTensor>(), place,
&out_grad);
for (size_t i = 0; i < out_grad.numel(); ++i) {
for (int i = 0; i < out_grad.numel(); ++i) {
ASSERT_EQ(out_grad.data<float>()[i], 1.0);
}
......@@ -254,7 +254,7 @@ TEST(test_tracer, test_trace_op_with_multi_device_inputs) {
framework::TensorCopySync(x_in->GradVar().Get<framework::LoDTensor>(), place,
&x_grad);
for (size_t i = 0; i < x_grad.numel(); ++i) {
for (int i = 0; i < x_grad.numel(); ++i) {
ASSERT_EQ(x_grad.data<float>()[i], 1.0);
}
......@@ -262,7 +262,7 @@ TEST(test_tracer, test_trace_op_with_multi_device_inputs) {
framework::TensorCopySync(y_in->GradVar().Get<framework::LoDTensor>(), place,
&y_grad);
for (size_t i = 0; i < y_grad.numel(); ++i) {
for (int i = 0; i < y_grad.numel(); ++i) {
ASSERT_EQ(y_grad.data<float>()[i], 1.0);
}
}
......
......@@ -26,7 +26,7 @@ namespace paddle {
namespace operators {
static inline framework::DDim ComputeAndCheckShape(
const bool is_runtime, const std::vector<framework::DDim>& inputs_dims,
const int axis) {
const size_t axis) {
const size_t n = inputs_dims.size();
auto out_dims = inputs_dims[0];
size_t in_zero_dims_size = out_dims.size();
......
......@@ -114,7 +114,7 @@ void ConvOp::InferShape(framework::InferShapeContext* ctx) const {
if (!channel_last) {
output_shape.push_back(filter_dims[0]);
}
for (size_t i = 0; i < in_data_dims.size(); ++i) {
for (int i = 0; i < in_data_dims.size(); ++i) {
if ((!ctx->IsRuntime()) &&
(in_data_dims[i] <= 0 || filter_dims[i + 2] <= 0)) {
output_shape.push_back(-1);
......
......@@ -72,8 +72,8 @@ inline void UpdatePaddingAndDilation(std::vector<int>* paddings,
const std::vector<int>& ksize) {
// set padding size == data_dims.size() * 2
auto data_shape = framework::vectorize<int>(data_dims);
if (paddings->size() == data_dims.size()) {
for (size_t i = 0; i < data_dims.size(); ++i) {
if (static_cast<int>(paddings->size()) == data_dims.size()) {
for (int i = 0; i < data_dims.size(); ++i) {
int copy_pad = *(paddings->begin() + 2 * i);
paddings->insert(paddings->begin() + 2 * i + 1, copy_pad);
}
......@@ -85,7 +85,7 @@ inline void UpdatePaddingAndDilation(std::vector<int>* paddings,
// when padding_algorithm is "VALID" or "SAME"
if (padding_algorithm == "SAME") {
for (size_t i = 0; i < data_dims.size(); ++i) {
for (int i = 0; i < data_dims.size(); ++i) {
int out_size = (data_dims[i] + strides[i] - 1) / strides[i];
int pad_sum =
std::max((out_size - 1) * strides[i] + ksize[i] - data_shape[i], 0);
......
......@@ -192,8 +192,8 @@ void AsyncCommunicator::SendThread() {
auto send_task = [this, &var_name, &var_queue] {
VLOG(3) << var_name << " merge and send";
std::vector<std::shared_ptr<Variable>> vars;
size_t merged_var_num = 0;
size_t wait_times = 0;
int merged_var_num = 0;
int wait_times = 0;
while (merged_var_num < FLAGS_communicator_max_merge_var_num) {
if (var_queue->Size() == 0) {
VLOG(3) << "wait_times -> " << wait_times;
......@@ -254,7 +254,7 @@ void AsyncCommunicator::SendThread() {
void AsyncCommunicator::RecvThread() {
VLOG(3) << "RecvThread start!";
while (running_) {
auto grad_num = grad_num_.load();
int grad_num = grad_num_.load();
if (grad_num > FLAGS_communicator_min_send_grad_num_before_recv) {
VLOG(1) << "current grad num " << grad_num;
RecvAll();
......@@ -538,7 +538,7 @@ void GeoSgdCommunicator::Send(const std::vector<std::string> &sparse_var_names,
int element_number = var_tensor.numel();
int *var_mutable_data = var_tensor.mutable_data<int>(var_tensor.place());
// insert ids which has not been record
for (size_t j = 0; j < element_number; j++) {
for (int j = 0; j < element_number; j++) {
auto ep_idx = GetSectionIndex(var_mutable_data[j],
absolute_section_[sparse_var_tables[i]]);
ids_table->at(sparse_var_tables[i])[ep_idx].insert(var_mutable_data[j]);
......@@ -559,7 +559,7 @@ void GeoSgdCommunicator::SendThread() {
std::vector<std::future<void>> task_futures;
task_futures.reserve(send_varname_to_ctx_.size());
size_t wait_times = 0;
int wait_times = 0;
while (ids_send_vec_.size() < geo_need_push_nums_) {
VLOG(4) << "ids_send_vec_ Size: " << ids_send_vec_.size();
if (need_push_queue_->Size() > 0) {
......@@ -747,7 +747,7 @@ void GeoSgdCommunicator::SendUpdateSparseVars(
auto cpu_ctx = paddle::platform::CPUDeviceContext();
auto blas = math::GetBlas<paddle::platform::CPUDeviceContext, float>(cpu_ctx);
float avg = 1 / static_cast<float>(trainer_nums_);
for (int y = 0; y < new_rows.size(); y++) {
for (size_t y = 0; y < new_rows.size(); y++) {
auto ids = new_rows[y];
float *x_val = x_value + ids * row_numel;
......@@ -876,7 +876,7 @@ void GeoSgdCommunicator::RecvUpdateSparseVars(
auto cpu_ctx = paddle::platform::CPUDeviceContext();
auto blas = math::GetBlas<paddle::platform::CPUDeviceContext, float>(cpu_ctx);
for (int y = 0; y < new_rows.size(); y++) {
for (size_t y = 0; y < new_rows.size(); y++) {
std::vector<float> row_delta(row_numel, 0);
auto ids = new_rows[y];
......
......@@ -418,7 +418,7 @@ class GeoSgdCommunicator : public Communicator {
private:
int trainer_nums_ = 1;
int geo_need_push_nums_ = 100;
size_t geo_need_push_nums_ = 100;
bool is_geo_sgd_ = false;
Scope* training_scope_;
std::shared_ptr<Scope> delta_scope_; // parameter local delta: recv - old
......
......@@ -75,7 +75,7 @@ TEST(communicator, merge_selected_rows) {
auto dims =
framework::make_ddim({static_cast<int64_t>(rows.size()), width});
auto *data = slr->mutable_value()->mutable_data<float>(dims, cpu_place);
for (auto i = 0; i < rows.size(); ++i) {
for (size_t i = 0; i < rows.size(); ++i) {
for (auto j = 0; j < width; ++j) {
data[i * width + j] = static_cast<float>(rows[i]);
}
......@@ -97,7 +97,7 @@ TEST(communicator, merge_selected_rows) {
for (auto i = 0; i < 10; ++i) {
out_values.push_back(static_cast<float>(i * (10 - i)));
}
for (auto i = 0; i < out_slr.rows().size(); ++i) {
for (size_t i = 0; i < out_slr.rows().size(); ++i) {
ASSERT_EQ(out_slr.rows()[i], i);
for (auto j = 0; j < width; ++j) {
ASSERT_EQ(out_data[i * width + j], out_values[i]);
......
......@@ -215,7 +215,7 @@ void prefetchs(const std::vector<std::string>& id_var_names,
std::unordered_set<int64_t> s(ids_union.begin(), ids_union.end());
ids_union.assign(s.begin(), s.end());
for (int i = 0; i < table_names.size(); i++) {
for (size_t i = 0; i < table_names.size(); i++) {
tables.push_back(std::make_pair(table_names[i], endpoints[i]));
}
......@@ -230,7 +230,7 @@ void prefetchs(const std::vector<std::string>& id_var_names,
}
// copy vectors to out vars
for (int i = 0; i < out_var_names.size(); i++) {
for (size_t i = 0; i < out_var_names.size(); i++) {
auto& ids = ids_group[i];
auto* out_t =
scope.FindVar(out_var_names[i])->GetMutable<framework::LoDTensor>();
......@@ -240,7 +240,7 @@ void prefetchs(const std::vector<std::string>& id_var_names,
auto* out_d = out_t->mutable_data<float>(place);
for (int idx = 0; idx < ids.size(); idx++) {
for (size_t idx = 0; idx < ids.size(); idx++) {
const auto& id = ids[idx];
if (padding_idx != distributed::kNoPadding && id == padding_idx) {
......
......@@ -50,12 +50,12 @@ inline EP_SPLIT_TABLE_PAIRS GetMultiFieldRpcContext(
PADDLE_ENFORCE_GT(multi_parts, 0, "multi_parts must >=1");
if (multi_parts == 1) {
for (int i = 0; i < rpc_ctx.splited_var_names.size(); i++) {
for (size_t i = 0; i < rpc_ctx.splited_var_names.size(); i++) {
table_pairs.push_back(
std::make_pair(rpc_ctx.epmap[i], rpc_ctx.splited_var_names[i]));
}
} else {
for (int i = 0; i < rpc_ctx.splited_var_names.size(); i++) {
for (size_t i = 0; i < rpc_ctx.splited_var_names.size(); i++) {
for (int x = 0; x < multi_parts; x++) {
auto table =
string::Sprintf("%s@%d@PIECE", rpc_ctx.splited_var_names[i], x);
......
......@@ -171,7 +171,7 @@ bool RequestGetHandler::Handle(const std::string& varname,
auto* data = out_slr->mutable_value()->mutable_data<float>(
out_dims, origin_tensor.place());
auto width = dims[1];
for (auto i = 0; i < updated_rows.size(); ++i) {
for (size_t i = 0; i < updated_rows.size(); ++i) {
PADDLE_ENFORCE_LT(updated_rows[i], dims[0]);
memcpy(data + i * width, origin_tensor_data + updated_rows[i] * width,
sizeof(float) * width);
......
......@@ -137,7 +137,7 @@ class Conv2DFusionOpInferShape : public framework::InferShapeBase {
std::vector<int64_t> output_shape({in_dims[0]});
output_shape.push_back(filter_dims[0]);
for (size_t i = 0; i < in_data_dims.size(); ++i) {
for (int i = 0; i < in_data_dims.size(); ++i) {
if ((!ctx->IsRuntime()) &&
(in_data_dims[i] <= 0 || filter_dims[i + 2] <= 0)) {
output_shape.push_back(-1);
......
......@@ -41,7 +41,7 @@ void MatMulJitCode::genCode() {
for (size_t g = 0; g < groups.size(); ++g) {
size_t x_offset = 0;
size_t wgt_offset_tmp = 0;
for (int i = 0; i < g; ++i) {
for (size_t i = 0; i < g; ++i) {
wgt_offset_tmp += groups[i] * block_len;
}
for (int k = 0; k < k_; ++k) {
......
......@@ -122,8 +122,8 @@ void PaddingGradFunctor(int rank, const framework::ExecutionContext& context,
inline bool IsSymmetricPadding(const std::vector<int>& pads,
const int data_dim) {
bool is_sys_pad = true;
if (pads.size() == data_dim * 2) {
for (size_t i = 0; i < data_dim; ++i) {
if (static_cast<int>(pads.size()) == data_dim * 2) {
for (int i = 0; i < data_dim; ++i) {
if (pads[2 * i] != pads[2 * i + 1]) {
is_sys_pad = false;
return is_sys_pad;
......
......@@ -65,8 +65,8 @@ inline void UpdatePadding(std::vector<int>* paddings, const bool global_pooling,
const std::vector<int>& ksize) {
// set padding size == data_dims.size() * 2
auto data_shape = framework::vectorize<int>(data_dims);
if (paddings->size() == data_dims.size()) {
for (size_t i = 0; i < data_dims.size(); ++i) {
if (static_cast<int>(paddings->size()) == data_dims.size()) {
for (int i = 0; i < data_dims.size(); ++i) {
int copy_pad = *(paddings->begin() + 2 * i);
paddings->insert(paddings->begin() + 2 * i + 1, copy_pad);
}
......
......@@ -94,7 +94,7 @@ static inline std::vector<framework::DDim> UpdateOutsDims(
framework::make_ddim(sections), in_dims, axis);
}
}
for (size_t i = 0; i < outs_number; ++i) {
for (int i = 0; i < outs_number; ++i) {
outs_dims[i][axis] = sections[i];
}
}
......
......@@ -44,7 +44,7 @@ inline std::vector<int64_t> GetNewDataFromShapeTensor(
&cpu_starts_tensor);
new_data = cpu_starts_tensor.data<int32_t>();
}
for (size_t i = 0; i < new_data_tensor->numel(); ++i) {
for (int i = 0; i < new_data_tensor->numel(); ++i) {
vec_new_data.push_back(static_cast<int64_t>(*(new_data + i)));
}
return vec_new_data;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册