未验证 提交 2a795dfa 编写于 作者: Z zhangchunle 提交者: GitHub

fix sign-compare warning4 (#43625)

上级 007f3614
...@@ -136,7 +136,7 @@ std::future<int32_t> BrpcPsServer::SendPServer2PServerMsg( ...@@ -136,7 +136,7 @@ std::future<int32_t> BrpcPsServer::SendPServer2PServerMsg(
int msg_type, int to_pserver_id, const std::string &msg) { int msg_type, int to_pserver_id, const std::string &msg) {
auto promise = std::make_shared<std::promise<int32_t>>(); auto promise = std::make_shared<std::promise<int32_t>>();
std::future<int> fut = promise->get_future(); std::future<int> fut = promise->get_future();
if (to_pserver_id >= _pserver_channels.size()) { if (static_cast<size_t>(to_pserver_id) >= _pserver_channels.size()) {
LOG(FATAL) << "to_pserver_id is out of range pservers, which size is " LOG(FATAL) << "to_pserver_id is out of range pservers, which size is "
<< _pserver_channels.size(); << _pserver_channels.size();
promise->set_value(-1); promise->set_value(-1);
......
...@@ -727,7 +727,7 @@ void AsyncCommunicator::PushSparseFromTensorAsync( ...@@ -727,7 +727,7 @@ void AsyncCommunicator::PushSparseFromTensorAsync(
++input_idx; ++input_idx;
} }
} }
CHECK(static_cast<size_t>(output_len) == g_tensor->numel()); CHECK(static_cast<int64_t>(output_len) == g_tensor->numel());
} }
std::vector<float *> push_g_vec(input_idx, nullptr); std::vector<float *> push_g_vec(input_idx, nullptr);
......
...@@ -42,7 +42,7 @@ int32_t BarrierTable::Barrier(const uint32_t trainer_id, ...@@ -42,7 +42,7 @@ int32_t BarrierTable::Barrier(const uint32_t trainer_id,
<< " add trainer id: " << trainer_id; << " add trainer id: " << trainer_id;
} }
if (trainer_ids_.size() < trigger_.load()) { if (static_cast<int>(trainer_ids_.size()) < trigger_.load()) {
std::vector<uint32_t> diffs(trainer_all_.size()); std::vector<uint32_t> diffs(trainer_all_.size());
auto iter = std::set_difference(trainer_all_.begin(), trainer_all_.end(), auto iter = std::set_difference(trainer_all_.begin(), trainer_all_.end(),
trainer_ids_.begin(), trainer_ids_.end(), trainer_ids_.begin(), trainer_ids_.end(),
......
...@@ -234,7 +234,7 @@ int32_t CtrDoubleAccessor::Merge(float** update_values, ...@@ -234,7 +234,7 @@ int32_t CtrDoubleAccessor::Merge(float** update_values,
update_value[i] += other_update_value[i]; update_value[i] += other_update_value[i];
}*/ }*/
for (size_t i = 0; i < total_dim; ++i) { for (size_t i = 0; i < total_dim; ++i) {
if (i != CtrDoublePushValue::SlotIndex()) { if (static_cast<int>(i) != CtrDoublePushValue::SlotIndex()) {
update_value[i] += other_update_value[i]; update_value[i] += other_update_value[i];
} }
} }
......
...@@ -276,7 +276,7 @@ int32_t MemoryDenseTable::Load(const std::string& path, ...@@ -276,7 +276,7 @@ int32_t MemoryDenseTable::Load(const std::string& path,
CHECK(str_len == param_col_ids_.size()) CHECK(str_len == param_col_ids_.size())
<< "expect " << param_col_ids_.size() << " float, but got " << "expect " << param_col_ids_.size() << " float, but got "
<< str_len; << str_len;
for (int col_idx = 0; col_idx < str_len; ++col_idx) { for (size_t col_idx = 0; col_idx < str_len; ++col_idx) {
if (param_col_ids_[col_idx] < 0) { if (param_col_ids_[col_idx] < 0) {
continue; continue;
} }
......
...@@ -53,7 +53,8 @@ int32_t MemorySparseTable::InitializeValue() { ...@@ -53,7 +53,8 @@ int32_t MemorySparseTable::InitializeValue() {
_avg_local_shard_num = _avg_local_shard_num =
sparse_local_shard_num(_sparse_table_shard_num, _shard_num); sparse_local_shard_num(_sparse_table_shard_num, _shard_num);
_real_local_shard_num = _avg_local_shard_num; _real_local_shard_num = _avg_local_shard_num;
if (_real_local_shard_num * (_shard_idx + 1) > _sparse_table_shard_num) { if (static_cast<int>(_real_local_shard_num * (_shard_idx + 1)) >
_sparse_table_shard_num) {
_real_local_shard_num = _real_local_shard_num =
_sparse_table_shard_num - _real_local_shard_num * _shard_idx; _sparse_table_shard_num - _real_local_shard_num * _shard_idx;
_real_local_shard_num = _real_local_shard_num =
......
...@@ -202,7 +202,7 @@ int32_t SparseAccessor::Merge(float** update_values, ...@@ -202,7 +202,7 @@ int32_t SparseAccessor::Merge(float** update_values,
float* update_value = update_values[value_item]; float* update_value = update_values[value_item];
const float* other_update_value = other_update_values[value_item]; const float* other_update_value = other_update_values[value_item];
for (size_t i = 0; i < total_dim; ++i) { for (size_t i = 0; i < total_dim; ++i) {
if (i != SparsePushValue::SlotIndex()) { if (static_cast<int>(i) != SparsePushValue::SlotIndex()) {
update_value[i] += other_update_value[i]; update_value[i] += other_update_value[i];
} }
} }
......
...@@ -637,8 +637,9 @@ int32_t SSDSparseTable::Load(size_t start_idx, size_t end_idx, ...@@ -637,8 +637,9 @@ int32_t SSDSparseTable::Load(size_t start_idx, size_t end_idx,
size_t mf_value_size = size_t mf_value_size =
_value_accesor->GetAccessorInfo().mf_size / sizeof(float); _value_accesor->GetAccessorInfo().mf_size / sizeof(float);
end_idx = end_idx = static_cast<int>(end_idx) < _sparse_table_shard_num
end_idx < _sparse_table_shard_num ? end_idx : _sparse_table_shard_num; ? end_idx
: _sparse_table_shard_num;
int thread_num = (end_idx - start_idx) < 20 ? (end_idx - start_idx) : 20; int thread_num = (end_idx - start_idx) < 20 ? (end_idx - start_idx) : 20;
omp_set_num_threads(thread_num); omp_set_num_threads(thread_num);
#pragma omp parallel for schedule(dynamic) #pragma omp parallel for schedule(dynamic)
......
...@@ -555,10 +555,12 @@ void FleetWrapper::PushSparseFromTensorAsync( ...@@ -555,10 +555,12 @@ void FleetWrapper::PushSparseFromTensorAsync(
// in // in
// ctr_accessor.h // ctr_accessor.h
push_values.back()[0] = 2; // TODO(zhaocaibei123): slot push_values.back()[0] = 2; // TODO(zhaocaibei123): slot
push_values.back()[1] = push_values.back()[1] = (static_cast<int>(i) >= show_size
(i >= show_size ? 1 : static_cast<float>(show_tensor[i])); ? 1
push_values.back()[2] = : static_cast<float>(show_tensor[i]));
(i >= clk_size ? 0 : static_cast<float>(clk_tensor[i])); push_values.back()[2] = (static_cast<int>(i) >= clk_size
? 0
: static_cast<float>(clk_tensor[i]));
float* data = push_values.back().data() + 3; float* data = push_values.back().data() + 3;
memcpy(data, g + output_len, sizeof(float) * fea_dim); memcpy(data, g + output_len, sizeof(float) * fea_dim);
} }
...@@ -582,10 +584,12 @@ void FleetWrapper::PushSparseFromTensorAsync( ...@@ -582,10 +584,12 @@ void FleetWrapper::PushSparseFromTensorAsync(
// slot show clk grad... consistent with CtrCommonPushValue defined in // slot show clk grad... consistent with CtrCommonPushValue defined in
// ctr_accessor.h // ctr_accessor.h
push_values.back()[0] = 2; // TODO(zhaocaibei123): slot push_values.back()[0] = 2; // TODO(zhaocaibei123): slot
push_values.back()[1] = push_values.back()[1] = (static_cast<int>(i) >= show_size
(i >= show_size ? 1 : static_cast<float>(show_tensor[i])); ? 1
push_values.back()[2] = : static_cast<float>(show_tensor[i]));
(i >= clk_size ? 0 : static_cast<float>(clk_tensor[i])); push_values.back()[2] = (static_cast<int>(i) >= clk_size
? 0
: static_cast<float>(clk_tensor[i]));
float* data = push_values.back().data() + 3; float* data = push_values.back().data() + 3;
memcpy(data, g + output_len, sizeof(float) * fea_dim); memcpy(data, g + output_len, sizeof(float) * fea_dim);
} }
......
...@@ -239,7 +239,7 @@ TEST(downpour_feature_value_accessor_test, test_update) { ...@@ -239,7 +239,7 @@ TEST(downpour_feature_value_accessor_test, test_update) {
push_v.show = grad[i][1]; push_v.show = grad[i][1];
push_v.click = grad[i][2]; push_v.click = grad[i][2];
push_v.embed_g = grad[i][3]; push_v.embed_g = grad[i][3];
for (int j = 0; j < parameter.embedx_dim(); ++j) { for (unsigned int j = 0; j < parameter.embedx_dim(); ++j) {
push_v.embedx_g.push_back(grad[i][4 + j]); push_v.embedx_g.push_back(grad[i][4 + j]);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册