未验证 提交 ce704ee9 编写于 作者: Z zhangchunle 提交者: GitHub

fix sign-compare warning (#43401)

上级 31b73346
......@@ -136,7 +136,7 @@ int32_t BrpcPsClient::CreateClient2ClientConnection(
server_ip_port.append(":");
server_ip_port.append(std::to_string(client_list[i].port));
_client_channels[i].reset(new brpc::Channel());
if (_client_channels[i]->Init(server_ip_port.c_str(), "", &options) != 0) {
if (_client_channels[i]->Init(server_ip_port.c_str(), "", &options)) {
VLOG(0) << "BrpcPSClient connect to Client:" << server_ip_port
<< " Failed! Try again.";
std::string int_ip_port =
......@@ -1195,7 +1195,8 @@ std::future<int32_t> BrpcPsClient::SendClient2ClientMsg(
int msg_type, int to_client_id, const std::string &msg) {
auto promise = std::make_shared<std::promise<int32_t>>();
std::future<int> fut = promise->get_future();
if (to_client_id >= _client_channels.size()) {
if (to_client_id >= 0 &&
static_cast<size_t>(to_client_id) >= _client_channels.size()) {
VLOG(0) << "to_client_id is out of range clients, which size is "
<< _client_channels.size();
promise->set_value(-1);
......@@ -1778,7 +1779,7 @@ void BrpcPsClient::PushDenseTaskConsume() {
});
++merge_count;
}
for (uint32_t i = 0; i < merge_count; ++i) {
for (size_t i = 0; i < merge_count; ++i) {
merge_status[i].wait();
}
......
......@@ -713,7 +713,7 @@ int32_t BrpcPsService::CacheShuffle(Table *table,
};
std::vector<Table *> table_ptrs;
for (size_t i = 3; i < request.params_size(); ++i) {
for (int i = 3; i < request.params_size(); ++i) {
int table_id = std::stoi(request.params(i));
Table *table_ptr = _server->GetTable(table_id);
table_ptrs.push_back(table_ptr);
......
......@@ -681,7 +681,7 @@ void AsyncCommunicator::PushSparseFromTensorAsync(
if (tensor->lod().size() > 0) {
for (size_t i = 0; i < tensor->lod()[0].size() - 1; ++i) {
for (int j = tensor->lod()[0][i]; j < tensor->lod()[0][i + 1];
for (size_t j = tensor->lod()[0][i]; j < tensor->lod()[0][i + 1];
++j, output_len += fea_dim) {
uint64_t real_id = static_cast<uint64_t>(ids[j]);
if (real_id == padding_id) {
......@@ -727,7 +727,7 @@ void AsyncCommunicator::PushSparseFromTensorAsync(
++input_idx;
}
}
CHECK(output_len == g_tensor->numel());
CHECK(static_cast<size_t>(output_len) == g_tensor->numel());
}
std::vector<float *> push_g_vec(input_idx, nullptr);
......
......@@ -547,7 +547,8 @@ int32_t GraphBrpcService::sample_neighbors_across_multi_servers(
seq.push_back(request_idx);
}
size_t remote_call_num = request_call_num;
if (request2server.size() != 0 && request2server.back() == rank) {
if (request2server.size() != 0 &&
static_cast<size_t>(request2server.back()) == rank) {
remote_call_num--;
local_buffers.resize(node_id_buckets.back().size());
local_actual_sizes.resize(node_id_buckets.back().size());
......@@ -582,7 +583,7 @@ int32_t GraphBrpcService::sample_neighbors_across_multi_servers(
for (size_t i = 0; i < node_num; i++) {
if (fail_num > 0 && failed[seq[i]]) {
size = 0;
} else if (request2server[seq[i]] != rank) {
} else if (static_cast<size_t>(request2server[seq[i]]) != rank) {
res[seq[i]]->copy_and_forward(&size, sizeof(int));
} else {
size = local_actual_sizes[local_index++];
......@@ -596,7 +597,7 @@ int32_t GraphBrpcService::sample_neighbors_across_multi_servers(
for (size_t i = 0; i < node_num; i++) {
if (fail_num > 0 && failed[seq[i]]) {
continue;
} else if (request2server[seq[i]] != rank) {
} else if (static_cast<size_t>(request2server[seq[i]]) != rank) {
char temp[actual_size[i] + 1];
res[seq[i]]->copy_and_forward(temp, actual_size[i]);
cntl->response_attachment().append(temp, actual_size[i]);
......
......@@ -43,7 +43,7 @@ int32_t PSClient::Configure(
const auto &work_param = _config.worker_param().downpour_worker_param();
for (size_t i = 0; i < work_param.downpour_table_param_size(); ++i) {
for (int i = 0; i < work_param.downpour_table_param_size(); ++i) {
auto *accessor = CREATE_PSCORE_CLASS(
ValueAccessor,
work_param.downpour_table_param(i).accessor().accessor_class());
......
......@@ -23,7 +23,7 @@ namespace distributed {
int32_t PsLocalClient::Initialize() {
const auto& downpour_param = _config.server_param().downpour_server_param();
TableManager::Instance().Initialize();
for (size_t i = 0; i < downpour_param.downpour_table_param_size(); ++i) {
for (int i = 0; i < downpour_param.downpour_table_param_size(); ++i) {
auto* table = CREATE_PSCORE_CLASS(
Table, downpour_param.downpour_table_param(i).table_class());
table->SetShard(0, 1);
......
......@@ -51,7 +51,7 @@ void GraphPyService::add_table_feat_conf(std::string table_name,
int feat_idx = table_feat_mapping[idx][feat_name];
VLOG(0) << "table_name " << table_name << " mapping id " << idx;
VLOG(0) << " feat name " << feat_name << " feat id" << feat_idx;
if (feat_idx < table_feat_conf_feat_name[idx].size()) {
if (static_cast<size_t>(feat_idx) < table_feat_conf_feat_name[idx].size()) {
// overide
table_feat_conf_feat_name[idx][feat_idx] = feat_name;
table_feat_conf_feat_dtype[idx][feat_idx] = feat_dtype;
......
......@@ -81,14 +81,14 @@ class GraphPyService {
graph_proto->set_table_name("cpu_graph_table");
graph_proto->set_use_cache(false);
for (int i = 0; i < id_to_edge.size(); i++)
for (size_t i = 0; i < id_to_edge.size(); i++)
graph_proto->add_edge_types(id_to_edge[i]);
for (int i = 0; i < id_to_feature.size(); i++) {
for (size_t i = 0; i < id_to_feature.size(); i++) {
graph_proto->add_node_types(id_to_feature[i]);
auto feat_node = id_to_feature[i];
::paddle::distributed::GraphFeature* g_f =
graph_proto->add_graph_feature();
for (int x = 0; x < table_feat_conf_feat_name[i].size(); x++) {
for (size_t x = 0; x < table_feat_conf_feat_name[i].size(); x++) {
g_f->add_name(table_feat_conf_feat_name[i][x]);
g_f->add_dtype(table_feat_conf_feat_dtype[i][x]);
g_f->add_shape(table_feat_conf_feat_shape[i][x]);
......
......@@ -76,7 +76,7 @@ int32_t PSServer::Configure(
uint32_t barrier_table = UINT32_MAX;
uint32_t global_step_table = UINT32_MAX;
for (size_t i = 0; i < downpour_param.downpour_table_param_size(); ++i) {
for (int i = 0; i < downpour_param.downpour_table_param_size(); ++i) {
auto *table = CREATE_PSCORE_CLASS(
Table, downpour_param.downpour_table_param(i).table_class());
......
......@@ -1205,7 +1205,7 @@ uint32_t GraphTable::get_thread_pool_index_by_shard_index(int64_t shard_index) {
int32_t GraphTable::clear_nodes(int type_id, int idx) {
auto &search_shards = type_id == 0 ? edge_shards[idx] : feature_shards[idx];
for (int i = 0; i < search_shards.size(); i++) {
for (size_t i = 0; i < search_shards.size(); i++) {
search_shards[i]->clear();
}
return 0;
......@@ -1478,7 +1478,7 @@ std::vector<std::vector<int64_t>> GraphTable::get_all_id(int type_id, int idx,
std::vector<std::vector<int64_t>> res(slice_num);
auto &search_shards = type_id == 0 ? edge_shards[idx] : feature_shards[idx];
std::vector<std::future<std::vector<int64_t>>> tasks;
for (int i = 0; i < search_shards.size(); i++) {
for (size_t i = 0; i < search_shards.size(); i++) {
tasks.push_back(_shards_task_pool[i % task_pool_size_]->enqueue(
[&search_shards, i]() -> std::vector<int64_t> {
return search_shards[i]->get_all_id();
......
......@@ -81,8 +81,8 @@ int32_t MemoryDenseTable::InitializeValue() {
fixed_len_params_dim_ = 0;
for (int x = 0; x < size; ++x) {
int dim = common.dims()[x];
if (dim != param_dim_) {
auto& dim = common.dims()[x];
if (static_cast<int>(dim) != param_dim_) {
fixed_len_params_dim_ += dim;
} else {
param_col_ids_.push_back(x);
......
......@@ -625,7 +625,7 @@ int32_t SSDSparseTable::Load(const std::string& path,
}
//加载path目录下数据[start_idx, end_idx)
int32_t SSDSparseTable::Load(size_t start_idx, int end_idx,
int32_t SSDSparseTable::Load(size_t start_idx, size_t end_idx,
const std::vector<std::string>& file_list,
const std::string& param) {
if (start_idx >= file_list.size()) {
......@@ -699,7 +699,8 @@ int32_t SSDSparseTable::Load(size_t start_idx, int end_idx,
ssd_values.emplace_back(std::make_pair((char*)data_buffer_ptr,
value_size * sizeof(float)));
data_buffer_ptr += feature_value_size;
if (ssd_keys.size() == FLAGS_pserver_load_batch_size) {
if (static_cast<int>(ssd_keys.size()) ==
FLAGS_pserver_load_batch_size) {
_db->put_batch(local_shard_id, ssd_keys, ssd_values,
ssd_keys.size());
ssd_keys.clear();
......
......@@ -79,7 +79,7 @@ class SSDSparseTable : public MemorySparseTable {
virtual int32_t Load(const std::string& path,
const std::string& param) override;
//加载path目录下数据[start_idx, end_idx)
virtual int32_t Load(size_t start_idx, int end_idx,
virtual int32_t Load(size_t start_idx, size_t end_idx,
const std::vector<std::string>& file_list,
const std::string& param);
int64_t LocalSize();
......
......@@ -536,7 +536,7 @@ void FleetWrapper::PushSparseFromTensorAsync(
output_len = 0;
if (tensor->lod().size() > 0) {
for (int i = 0; i < tensor->lod()[0].size() - 1; ++i) {
for (size_t i = 0; i < tensor->lod()[0].size() - 1; ++i) {
for (size_t j = tensor->lod()[0][i]; j < tensor->lod()[0][i + 1];
++j, output_len += fea_dim) {
uint64_t real_id = static_cast<uint64_t>(ids[j]);
......@@ -566,7 +566,7 @@ void FleetWrapper::PushSparseFromTensorAsync(
}
}
} else {
for (int i = 0; i < len; ++i, output_len += fea_dim) {
for (size_t i = 0; i < len; ++i, output_len += fea_dim) {
uint64_t real_id = static_cast<uint64_t>(ids[i]);
if (real_id == padding_id) {
continue;
......@@ -592,7 +592,7 @@ void FleetWrapper::PushSparseFromTensorAsync(
++input_idx;
}
}
CHECK(output_len == g_tensor->numel());
CHECK(static_cast<int64_t>(output_len) == g_tensor->numel());
}
std::vector<float*> push_g_vec(input_idx, nullptr);
......
......@@ -295,7 +295,7 @@ void RunBrpcPushSparse() {
fea_temp_value_ptr.data(), 0, fea_keys.data(), fea_keys.size(), true);
pull_update_status.wait();
for (size_t idx = 0; idx < tensor->numel(); ++idx) {
for (int64_t idx = 0; idx < tensor->numel(); ++idx) {
EXPECT_FLOAT_EQ(fea_temp_values[idx], fea_values[idx] - 1.0);
}
......
......@@ -222,15 +222,15 @@ TEST(downpour_feature_value_accessor_test, test_update) {
v.embed_w = value[i][5];
int idx = 6;
for (auto j = 0u; j < acc->common_feature_value.embed_sgd_dim; ++j) {
for (int j = 0; j < acc->common_feature_value.embed_sgd_dim; ++j) {
v.embed_g2sum.push_back(value[i][idx + j]);
}
idx += acc->common_feature_value.embed_sgd_dim;
for (auto j = 0u; j < acc->common_feature_value.embedx_dim; ++j) {
for (int j = 0; j < acc->common_feature_value.embedx_dim; ++j) {
v.embedx_w.push_back(value[i][idx + j]);
}
idx += acc->common_feature_value.embedx_dim;
for (auto j = 0u; j < acc->common_feature_value.embedx_sgd_dim; ++j) {
for (int j = 0; j < acc->common_feature_value.embedx_sgd_dim; ++j) {
v.embedx_g2sum.push_back(value[i][idx + j]);
}
......@@ -239,7 +239,7 @@ TEST(downpour_feature_value_accessor_test, test_update) {
push_v.show = grad[i][1];
push_v.click = grad[i][2];
push_v.embed_g = grad[i][3];
for (auto j = 0; j < parameter.embedx_dim(); ++j) {
for (int j = 0; j < parameter.embedx_dim(); ++j) {
push_v.embedx_g.push_back(grad[i][4 + j]);
}
......
......@@ -142,7 +142,7 @@ TEST(MemorySparseTable, SGD) {
// table->PullSparse(pull_values.data(), value);
for (size_t i = 0; i < init_keys.size(); ++i) {
for (size_t j = 2; j < emb_dim + 3; ++j) {
for (int j = 2; j < emb_dim + 3; ++j) {
auto update_val = init_values[i * (emb_dim + 1) + j] -
0.1 * total_gradients[3 + i * (emb_dim + 4) + j];
VLOG(3) << total_gradients[i * (emb_dim + 4) + j + 3] << ":"
......
......@@ -221,8 +221,8 @@ void BindGraphPyClient(py::module* m) {
auto feats =
self.get_node_feat(node_type, node_ids, feature_names);
std::vector<std::vector<py::bytes>> bytes_feats(feats.size());
for (int i = 0; i < feats.size(); ++i) {
for (int j = 0; j < feats[i].size(); ++j) {
for (size_t i = 0; i < feats.size(); ++i) {
for (size_t j = 0; j < feats[i].size(); ++j) {
bytes_feats[i].push_back(py::bytes(feats[i][j]));
}
}
......@@ -234,8 +234,8 @@ void BindGraphPyClient(py::module* m) {
std::vector<std::string> feature_names,
std::vector<std::vector<py::bytes>> bytes_feats) {
std::vector<std::vector<std::string>> feats(bytes_feats.size());
for (int i = 0; i < bytes_feats.size(); ++i) {
for (int j = 0; j < bytes_feats[i].size(); ++j) {
for (size_t i = 0; i < bytes_feats.size(); ++i) {
for (size_t j = 0; j < bytes_feats[i].size(); ++j) {
feats[i].push_back(std::string(bytes_feats[i][j]));
}
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册