未验证 提交 01fa4ead 编写于 作者: T Tao Luo 提交者: GitHub

fix -Wno-error=sign-compare warning in gcc8 (#21434)

* fix -Wno-error=sign-compare warning in gcc8

test=develop

* fix warning in distributed codes

test=develop
上级 37f3e56d
......@@ -1072,13 +1072,13 @@ void PrivateInstantDataFeed<T>::Init(const DataFeedDesc& data_feed_desc) {
use_slots_is_dense_.push_back(slot.is_dense());
std::vector<int> local_shape;
if (slot.is_dense()) {
for (size_t j = 0; j < slot.shape_size(); ++j) {
for (int j = 0; j < slot.shape_size(); ++j) {
if (slot.shape(j) == -1) {
multi_inductive_shape_index_[i].push_back(j);
}
}
}
for (size_t j = 0; j < slot.shape_size(); ++j) {
for (int j = 0; j < slot.shape_size(); ++j) {
local_shape.push_back(slot.shape(j));
}
use_slots_shape_.push_back(local_shape);
......
......@@ -200,7 +200,7 @@ template <typename T>
void DatasetImpl<T>::PreLoadIntoMemory() {
VLOG(3) << "DatasetImpl<T>::PreLoadIntoMemory() begin";
if (preload_thread_num_ != 0) {
CHECK(preload_thread_num_ == preload_readers_.size());
CHECK(static_cast<size_t>(preload_thread_num_) == preload_readers_.size());
preload_threads_.clear();
for (int64_t i = 0; i < preload_thread_num_; ++i) {
preload_threads_.push_back(
......@@ -208,7 +208,7 @@ void DatasetImpl<T>::PreLoadIntoMemory() {
preload_readers_[i].get()));
}
} else {
CHECK(thread_num_ == readers_.size());
CHECK(static_cast<size_t>(thread_num_) == readers_.size());
preload_threads_.clear();
for (int64_t i = 0; i < thread_num_; ++i) {
preload_threads_.push_back(std::thread(
......@@ -337,7 +337,7 @@ void DatasetImpl<T>::GlobalShuffle(int thread_num) {
}
std::shuffle(send_index.begin(), send_index.end(),
fleet_ptr->LocalRandomEngine());
for (auto index = 0u; index < this->trainer_num_; ++index) {
for (int index = 0; index < this->trainer_num_; ++index) {
int i = send_index[index];
if (ars[i].Length() == 0) {
continue;
......@@ -398,7 +398,7 @@ void DatasetImpl<T>::DynamicAdjustChannelNum(int channel_num) {
uint64_t output_channels_data_size = 0;
uint64_t consume_channels_data_size = 0;
CHECK(multi_output_channel_.size() == multi_consume_channel_.size());
for (int i = 0; i < multi_output_channel_.size(); ++i) {
for (size_t i = 0; i < multi_output_channel_.size(); ++i) {
output_channels_data_size += multi_output_channel_[i]->Size();
consume_channels_data_size += multi_consume_channel_[i]->Size();
}
......@@ -424,7 +424,7 @@ void DatasetImpl<T>::DynamicAdjustChannelNum(int channel_num) {
std::vector<paddle::framework::Channel<T>> new_channels;
std::vector<paddle::framework::Channel<T>> new_other_channels;
std::vector<T> local_vec;
for (int i = 0; i < origin_channels->size(); ++i) {
for (size_t i = 0; i < origin_channels->size(); ++i) {
local_vec.clear();
(*origin_channels)[i]->Close();
(*origin_channels)[i]->ReadAll(local_vec);
......@@ -506,10 +506,12 @@ void DatasetImpl<T>::CreateReaders() {
if (input_channel_ != nullptr) {
readers_[i]->SetInputChannel(input_channel_.get());
}
if (cur_channel_ == 0 && channel_idx < multi_output_channel_.size()) {
if (cur_channel_ == 0 &&
static_cast<size_t>(channel_idx) < multi_output_channel_.size()) {
readers_[i]->SetOutputChannel(multi_output_channel_[channel_idx].get());
readers_[i]->SetConsumeChannel(multi_consume_channel_[channel_idx].get());
} else if (channel_idx < multi_output_channel_.size()) {
} else if (static_cast<size_t>(channel_idx) <
multi_output_channel_.size()) {
readers_[i]->SetOutputChannel(multi_consume_channel_[channel_idx].get());
readers_[i]->SetConsumeChannel(multi_output_channel_[channel_idx].get());
}
......
......@@ -154,13 +154,13 @@ TEST(GraphTest, WriteAfterRead) {
ASSERT_EQ(n->outputs[0]->Name(), "b");
ASSERT_TRUE(ir::IsControlDepVar(*n->outputs[1]));
control_dep1 = n->outputs[1];
ASSERT_EQ(n->outputs.size(), 2);
ASSERT_EQ(n->outputs.size(), 2UL);
}
if (n->Name() == "dummy") {
ASSERT_EQ(n->inputs[0]->Name(), "c");
ASSERT_TRUE(ir::IsControlDepVar(*n->inputs[1]));
control_dep2 = n->inputs[1];
ASSERT_EQ(n->inputs.size(), 2);
ASSERT_EQ(n->inputs.size(), 2UL);
}
}
ASSERT_EQ(control_dep1, control_dep2);
......@@ -192,14 +192,14 @@ TEST(GraphTest, WriteAfterWrite) {
if (n->Name() == "sum") {
ASSERT_EQ(n->outputs[0]->Name(), "b");
ASSERT_TRUE(ir::IsControlDepVar(*n->outputs[1]));
ASSERT_EQ(n->outputs.size(), 2);
ASSERT_EQ(n->outputs.size(), 2UL);
control_dep1 = n->outputs[1];
}
if (n->Name() == "dummy") {
ASSERT_EQ(n->inputs[0]->Name(), "c");
ASSERT_TRUE(ir::IsControlDepVar(*n->inputs[1]));
control_dep2 = n->inputs[1];
ASSERT_EQ(n->inputs.size(), 2);
ASSERT_EQ(n->inputs.size(), 2UL);
}
}
ASSERT_NE(control_dep1, nullptr);
......
......@@ -140,15 +140,15 @@ TEST(test_layer, test_clear_backward_info) {
op->InsertGradPendingOps(preceding_op.get());
*(op->GetMutableInsMap()) = ins;
*(op->GetMutableOutsMap()) = outs;
ASSERT_GT(op->GetInsMap().size(), 0);
ASSERT_GT(op->GetOutsMap().size(), 0);
ASSERT_GT(op->GradPendingOps().size(), 0);
ASSERT_GT(op->GetInsMap().size(), 0UL);
ASSERT_GT(op->GetOutsMap().size(), 0UL);
ASSERT_GT(op->GradPendingOps().size(), 0UL);
op->ClearBackwardTrace();
ASSERT_EQ(op->GetInsMap().size(), 0);
ASSERT_EQ(op->GetOutsMap().size(), 0);
ASSERT_EQ(op->GradPendingOps().size(), 0);
ASSERT_EQ(op->GetInsMap().size(), 0UL);
ASSERT_EQ(op->GetOutsMap().size(), 0UL);
ASSERT_EQ(op->GradPendingOps().size(), 0UL);
}
TEST(test_layer, test_varbase_basic) {
......
......@@ -72,7 +72,7 @@ struct DataRecord {
"size of each slot should be equal");
}
size_t num_batches = num_samples / bs;
EXPECT_GT(num_batches, 0);
EXPECT_GT(num_batches, 0UL);
batched_data.resize(num_batches);
for (auto &one_batch : batched_data) {
one_batch.resize(datasets.size());
......
......@@ -179,7 +179,7 @@ void CompareResult(const std::vector<PaddleTensor> &outputs,
case PaddleDType::FLOAT32: {
float *pdata = static_cast<float *>(out.data.data());
float *pdata_ref = ref_out.data<float>(&place, &ref_size);
EXPECT_EQ(size, ref_size);
EXPECT_EQ(size, static_cast<size_t>(ref_size));
for (size_t j = 0; j < size; ++j) {
CheckError(pdata_ref[j], pdata[j]);
}
......@@ -188,7 +188,7 @@ void CompareResult(const std::vector<PaddleTensor> &outputs,
case PaddleDType::INT32: {
int32_t *pdata = static_cast<int32_t *>(out.data.data());
int32_t *pdata_ref = ref_out.data<int32_t>(&place, &ref_size);
EXPECT_EQ(size, ref_size);
EXPECT_EQ(size, static_cast<size_t>(ref_size));
for (size_t j = 0; j < size; ++j) {
EXPECT_EQ(pdata_ref[j], pdata[j]);
}
......@@ -197,7 +197,7 @@ void CompareResult(const std::vector<PaddleTensor> &outputs,
case PaddleDType::UINT8: {
uint8_t *pdata = static_cast<uint8_t *>(out.data.data());
uint8_t *pdata_ref = ref_out.data<uint8_t>(&place, &ref_size);
EXPECT_EQ(size, ref_size);
EXPECT_EQ(size, static_cast<size_t>(ref_size));
for (size_t j = 0; j < size; ++j) {
EXPECT_EQ(pdata_ref[j], pdata[j]);
}
......
......@@ -45,8 +45,8 @@ TEST(BestFitAllocator, test_allocation) {
dynamic_cast<BestFitAllocation*>(allocation.get());
ASSERT_NE(best_fit_allocation, nullptr);
ASSERT_FALSE(best_fit_allocation->ChunkIterator()->is_free);
ASSERT_EQ(best_fit_allocation->ChunkIterator()->offset_, 0);
ASSERT_EQ(allocation->size(), 80);
ASSERT_EQ(best_fit_allocation->ChunkIterator()->offset_, 0UL);
ASSERT_EQ(allocation->size(), 80UL);
ASSERT_EQ(allocation->ptr(), nullptr);
}
......@@ -58,7 +58,7 @@ TEST(BestFitAllocator, test_allocation) {
{
auto best_fit_allocation =
dynamic_cast<BestFitAllocation*>(allocation2.get());
ASSERT_EQ(best_fit_allocation->ChunkIterator()->offset_, 80);
ASSERT_EQ(best_fit_allocation->ChunkIterator()->offset_, 80UL);
}
allocation2.reset();
allocation2 = allocator.Allocate(60);
......@@ -66,7 +66,7 @@ TEST(BestFitAllocator, test_allocation) {
{
auto best_fit_allocation =
dynamic_cast<BestFitAllocation*>(allocation2.get());
ASSERT_EQ(best_fit_allocation->ChunkIterator()->offset_, 80);
ASSERT_EQ(best_fit_allocation->ChunkIterator()->offset_, 80UL);
}
allocation.reset();
......@@ -76,7 +76,7 @@ TEST(BestFitAllocator, test_allocation) {
{
auto best_fit_allocation =
dynamic_cast<BestFitAllocation*>(allocation.get());
ASSERT_EQ(best_fit_allocation->ChunkIterator()->offset_, 0);
ASSERT_EQ(best_fit_allocation->ChunkIterator()->offset_, 0UL);
}
allocation.reset();
......
......@@ -20,9 +20,9 @@ namespace memory {
namespace allocation {
TEST(aligned, aligned_size) {
ASSERT_EQ(AlignedSize(1024, 1024), 1024);
ASSERT_EQ(AlignedSize(1023, 1024), 1024);
ASSERT_EQ(AlignedSize(1025, 1024), 2048);
ASSERT_EQ(AlignedSize(1024, 1024), 1024UL);
ASSERT_EQ(AlignedSize(1023, 1024), 1024UL);
ASSERT_EQ(AlignedSize(1025, 1024), 2048UL);
}
struct StubAllocator : public Allocator {
......@@ -58,22 +58,22 @@ TEST(aligned_allocator, aligned_allocator) {
std::make_shared<AlignedAllocator>(allocator, alignment);
auto alloc1 = aligned_allocator->Allocate(1345);
ASSERT_EQ(allocator->AllocNum(), 1);
ASSERT_EQ(allocator->AllocNum(), 1UL);
ASSERT_TRUE(IsAligned(alloc1, alignment));
alloc1.reset();
ASSERT_EQ(allocator->AllocNum(), 0);
ASSERT_EQ(allocator->AllocNum(), 0UL);
{
auto alloc2 = aligned_allocator->Allocate(200);
ASSERT_TRUE(IsAligned(alloc2, alignment));
ASSERT_EQ(allocator->AllocNum(), 1);
ASSERT_EQ(allocator->AllocNum(), 1UL);
auto alloc3 = aligned_allocator->Allocate(3021);
ASSERT_TRUE(IsAligned(alloc3, alignment));
ASSERT_EQ(allocator->AllocNum(), 2);
ASSERT_EQ(allocator->AllocNum(), 2UL);
}
ASSERT_EQ(allocator->AllocNum(), 0);
ASSERT_EQ(allocator->AllocNum(), 0UL);
}
} // namespace allocation
......
......@@ -104,7 +104,7 @@ class ArgsortKernel : public framework::OpKernel<T> {
}
trans.push_back(axis);
framework::DDim trans_dims(in_dims);
for (int i = 0; i < trans.size(); i++) {
for (size_t i = 0; i < trans.size(); i++) {
trans_dims[i] = in_dims[trans[i]];
}
......
......@@ -289,7 +289,7 @@ void OpTester::SetupTensor(framework::LoDTensor *tensor,
}
} else if (initializer == "file") {
std::ifstream is(filename);
for (size_t i = 0; i < cpu_tensor.numel(); ++i) {
for (int i = 0; i < cpu_tensor.numel(); ++i) {
T value;
is >> value;
cpu_ptr[i] = static_cast<T>(value);
......
......@@ -101,7 +101,7 @@ class BprLossGradientOpKernel : public framework::OpKernel<T> {
}
auto p_index = sample_id * num_classes + label_data[sample_id];
for (size_t ni = 0; ni < num_classes; ni++) {
if (label_data[sample_id] == ni) continue;
if (label_data[sample_id] == static_cast<int>(ni)) continue;
auto n_index = sample_id * num_classes + ni;
auto grad_ = -dy_data[sample_id] /
((num_classes - 1) *
......
......@@ -159,7 +159,7 @@ void CropTensorFunction(const framework::ExecutionContext& context) {
std::vector<int> shape = GetShape(context);
// out_dims setted by arrt(shape)
if (shape.size() == 0) {
for (size_t i = 0; i < out_dims.size(); ++i) {
for (int i = 0; i < out_dims.size(); ++i) {
shape.push_back(out_dims[i]);
}
}
......
......@@ -96,7 +96,7 @@ class CollectFpnProposalsOpKernel : public framework::OpKernel<T> {
auto cur_scores_lod = multi_layer_scores[i]->lod().back();
int cur_batch_id = 0;
for (int j = 0; j < cur_level_num; ++j) {
if (j >= cur_scores_lod[cur_batch_id + 1]) {
if (static_cast<size_t>(j) >= cur_scores_lod[cur_batch_id + 1]) {
cur_batch_id++;
}
int cur_index = j + integral_of_all_rois[i];
......
......@@ -76,7 +76,7 @@ class DistributeFpnProposalsOpKernel : public framework::OpKernel<T> {
// record the number of rois in each level
std::vector<int> num_rois_level(num_level, 0);
std::vector<int> num_rois_level_integral(num_level + 1, 0);
for (int i = 0; i < fpn_rois_lod.size() - 1; ++i) {
for (size_t i = 0; i < fpn_rois_lod.size() - 1; ++i) {
Tensor fpn_rois_slice =
fpn_rois->Slice(fpn_rois_lod[i], fpn_rois_lod[i + 1]);
const T* rois_data = fpn_rois_slice.data<T>();
......@@ -111,7 +111,7 @@ class DistributeFpnProposalsOpKernel : public framework::OpKernel<T> {
int* restore_index_data = restore_index->data<int>();
std::vector<int> restore_index_inter(fpn_rois_num, -1);
// distribute the rois into different fpn level by target level
for (int i = 0; i < fpn_rois_lod.size() - 1; ++i) {
for (size_t i = 0; i < fpn_rois_lod.size() - 1; ++i) {
Tensor fpn_rois_slice =
fpn_rois->Slice(fpn_rois_lod[i], fpn_rois_lod[i + 1]);
const T* rois_data = fpn_rois_slice.data<T>();
......
......@@ -193,7 +193,7 @@ class PriorBoxOpKernel : public framework::OpKernel<T> {
#pragma omp parallel for collapse(2)
#endif
for (int i = 0; i < box_num; ++i) {
for (int j = 0; j < variances.size(); ++j) {
for (size_t j = 0; j < variances.size(); ++j) {
e_vars(i, j) = variances[j];
}
}
......
......@@ -48,7 +48,7 @@ TEST(ConcurrentSet, All) {
EXPECT_EQ(in, out);
concurrent_set.GetAndClear(&ret).wait();
EXPECT_EQ(ret.size(), 0);
EXPECT_EQ(ret.size(), 0UL);
}
TEST(AsyncSparseParamUpdateRecorder, All) {
......@@ -90,7 +90,7 @@ TEST(AsyncSparseParamUpdateRecorder, All) {
EXPECT_EQ(in, out);
recorder.GetAndClear("param1", i, &ret);
EXPECT_EQ(ret.size(), 0);
EXPECT_EQ(ret.size(), 0UL);
}
}
......
......@@ -98,7 +98,7 @@ TEST(communicator, merge_selected_rows) {
out_values.push_back(static_cast<float>(i * (10 - i)));
}
for (size_t i = 0; i < out_slr.rows().size(); ++i) {
ASSERT_EQ(out_slr.rows()[i], i);
ASSERT_EQ(out_slr.rows()[i], static_cast<int>(i));
for (auto j = 0; j < width; ++j) {
ASSERT_EQ(out_data[i * width + j], out_values[i]);
}
......
......@@ -119,7 +119,7 @@ void ParameterRecv<T>::operator()(const RpcContext &rpc_ctx,
<< sstream.str();
}
for (auto i = 0; i < recv_slr.rows().size(); ++i) {
for (size_t i = 0; i < recv_slr.rows().size(); ++i) {
auto row_id = recv_slr.rows()[i] + row_offset;
PADDLE_ENFORCE_LT(row_id, recv_dims[0]);
memcpy(recv_tensor->data<T>() + row_id * width,
......@@ -148,7 +148,7 @@ void ParameterRecv<T>::operator()(const RpcContext &rpc_ctx,
std::vector<int64_t> abs_sections =
ToAbsoluteSection(rpc_ctx.height_sections);
for (int i = 0; i < rpc_ctx.splited_var_names.size(); i++) {
for (size_t i = 0; i < rpc_ctx.splited_var_names.size(); i++) {
auto &recv_var_name = rpc_ctx.splited_var_names[i];
auto *var = local_scope->FindVar(recv_var_name);
auto *var_slr = var->GetMutable<framework::SelectedRows>();
......
......@@ -109,7 +109,7 @@ void ParameterSend<T>::operator()(const RpcContext &rpc_ctx,
// create output var in local scope
size_t row_offset = 0;
for (auto i = 0; i < out_num; ++i) {
for (size_t i = 0; i < out_num; ++i) {
framework::Tensor *out = local_scope->Var(rpc_ctx.splited_var_names[i])
->GetMutable<framework::LoDTensor>();
*out = send_tensor.Slice(row_offset, row_offset + outs_dims[i][0]);
......@@ -196,7 +196,7 @@ void ParameterSend<T>::operator()(const RpcContext &rpc_ctx,
auto place = platform::CPUPlace();
for (int ctx = 0; ctx < rpc_ctx.splited_var_names.size(); ctx++) {
for (size_t ctx = 0; ctx < rpc_ctx.splited_var_names.size(); ctx++) {
for (int part = 0; part < multi_parts; part++) {
auto out_idx = ctx * multi_parts + part;
auto rows_idx = outs_rows_idx[out_idx];
......
......@@ -67,7 +67,7 @@ class FilterByInstagKernel : public framework::OpKernel<T> {
auto x2_lods = x2->lod()[0];
Vector<size_t> x1_lods(1, 0);
if (!is_x1_lod) {
for (size_t i = 0; i < x1->dims()[0]; i++) {
for (int i = 0; i < x1->dims()[0]; i++) {
x1_lods.push_back(i + 1);
}
} else {
......@@ -129,13 +129,13 @@ class FilterByInstagKernel : public framework::OpKernel<T> {
out_lod_info.push_back(out_lods);
out->set_lod(out_lod_info);
memset(out_data, 0, out->numel() * sizeof(T));
for (size_t i = 0; i < loss_weight->numel(); i++) {
for (int i = 0; i < loss_weight->numel(); i++) {
loss_weight_data[i] = 1;
}
for (size_t i = 0; i < out_lods.size() - 1; i++) {
size_t pos = out_lods[i];
for (size_t k = map_data[i * 3 + 1];
for (int k = map_data[i * 3 + 1];
k < map_data[i * 3 + 1] + map_data[i * 3 + 2]; k++) {
memcpy(out_data + pos * x1_embed_size, x1_data + k * x1_embed_size,
x1_embed_size * sizeof(T));
......@@ -184,11 +184,11 @@ class FilterByInstagGradKernel : public framework::OpKernel<T> {
memset(x1_grad_data, 0, x1->dims()[0] * x1->dims()[1] * sizeof(T));
if (loss_weight->numel() != 1 || loss_weight_data[0] != 0) {
auto output_dims = output_grad->dims();
for (size_t i = 0; i < mmap->dims()[0]; i++) {
for (int i = 0; i < mmap->dims()[0]; i++) {
int src_ln = mmap_data[i * 3], dst_ln = mmap_data[i * 3 + 1];
int line_cnt = mmap_data[i * 3 + 2];
for (size_t l = 0; l < line_cnt; l++) {
for (size_t j = 0; j < output_dims[1]; j++) {
for (int l = 0; l < line_cnt; l++) {
for (int j = 0; j < output_dims[1]; j++) {
x1_grad_data[(dst_ln + l) * output_dims[1] + j] =
output_grad_data[(src_ln + l) * output_dims[1] + j];
}
......
......@@ -1141,13 +1141,13 @@ TEST(JITKernel_helper, attr) {
<< jit::to_string(jit::kVScal) << jit::to_string(jit::kSgd)
<< jit::to_string(jit::kVSigmoid) << jit::to_string(jit::kVSquare)
<< jit::to_string(jit::kVSub) << jit::to_string(jit::kVTanh);
EXPECT_EQ(out.str().size(), 234);
EXPECT_EQ(out.str().size(), 234UL);
// SeqPoolTypes
out.str("");
out << jit::to_string(jit::kSum) << jit::to_string(jit::kAvg)
<< jit::to_string(jit::kSqrt);
EXPECT_EQ(out.str().size(), 13);
EXPECT_EQ(out.str().size(), 13UL);
EXPECT_EQ(jit::to_kerneltype("relu"), jit::kVRelu);
EXPECT_EQ(jit::to_kerneltype("Identity"), jit::kVIdentity);
......@@ -1157,27 +1157,27 @@ TEST(JITKernel_helper, attr) {
out.str("");
out << jit::lstm_attr_t(8, jit::kVIdentity, jit::kVSigmoid, jit::kVTanh);
EXPECT_EQ(out.str().size(), 89);
EXPECT_EQ(out.str().size(), 89UL);
out.str("");
out << jit::gru_attr_t(8, jit::kVIdentity, jit::kVSigmoid);
EXPECT_EQ(out.str().size(), 52);
EXPECT_EQ(out.str().size(), 52UL);
out.str("");
out << jit::seq_pool_attr_t(8, jit::SeqPoolType::kSum);
EXPECT_EQ(out.str().size(), 44);
EXPECT_EQ(out.str().size(), 44UL);
out.str("");
out << jit::emb_seq_pool_attr_t(1, 2, 3, 4, 5, jit::SeqPoolType::kAvg);
EXPECT_EQ(out.str().size(), 93);
EXPECT_EQ(out.str().size(), 93UL);
out.str("");
out << jit::sgd_attr_t(1, 2, 3, 4, 5);
EXPECT_EQ(out.str().size(), 81);
EXPECT_EQ(out.str().size(), 81UL);
out.str("");
out << jit::matmul_attr_t(1, 2, 3);
EXPECT_EQ(out.str().size(), 14);
EXPECT_EQ(out.str().size(), 14UL);
}
// test keys
......
......@@ -181,7 +181,7 @@ void compare_clip(
T* ytgt_data = ytgt.data();
tgt(n, threshold, x_data, ytgt_data);
ref(n, threshold, x_data, yref_data);
for (int i = 0; i < n; ++i) {
for (size_t i = 0; i < n; ++i) {
EXPECT_NEAR(ytgt_data[i], yref_data[i], 1e-3);
}
}
......
......@@ -72,8 +72,8 @@ class PoolCUDNNOpKernel : public framework::OpKernel<T> {
}
UpdatePadding(&paddings, global_pooling, adaptive, padding_algorithm,
data_dims, strides, ksize);
if (data_dims.size() * 2 == paddings.size()) {
for (size_t i = 0; i < data_dims.size(); ++i) {
if (data_dims.size() * 2 == static_cast<int>(paddings.size())) {
for (int i = 0; i < data_dims.size(); ++i) {
paddings.erase(paddings.begin() + i + 1);
}
}
......@@ -205,8 +205,8 @@ class PoolCUDNNGradOpKernel : public framework::OpKernel<T> {
}
UpdatePadding(&paddings, global_pooling, adaptive, padding_algorithm,
data_dims, strides, ksize);
if (data_dims.size() * 2 == paddings.size()) {
for (size_t i = 0; i < data_dims.size(); ++i) {
if (data_dims.size() * 2 == static_cast<int>(paddings.size())) {
for (int i = 0; i < data_dims.size(); ++i) {
paddings.erase(paddings.begin() + i + 1);
}
}
......
......@@ -148,8 +148,8 @@ class PoolKernel : public framework::OpKernel<T> {
UpdatePadding(&paddings, global_pooling, adaptive, padding_algorithm,
data_dims, strides, ksize);
if (data_dims.size() * 2 == paddings.size()) {
for (size_t i = 0; i < data_dims.size(); ++i) {
if (data_dims.size() * 2 == static_cast<int>(paddings.size())) {
for (int i = 0; i < data_dims.size(); ++i) {
paddings.erase(paddings.begin() + i + 1);
}
}
......@@ -234,8 +234,8 @@ class PoolGradKernel : public framework::OpKernel<T> {
}
UpdatePadding(&paddings, global_pooling, adaptive, padding_algorithm,
data_dims, strides, ksize);
if (data_dims.size() * 2 == paddings.size()) {
for (size_t i = 0; i < data_dims.size(); ++i) {
if (data_dims.size() * 2 == static_cast<int>(paddings.size())) {
for (int i = 0; i < data_dims.size(); ++i) {
paddings.erase(paddings.begin() + i + 1);
}
}
......
......@@ -164,7 +164,7 @@ class CPUPyramidHashOPKernel : public framework::OpKernel<T> {
unsigned int pos1 = XXH32(hash_id, len * sizeof(T), 0) % _space_len;
unsigned int pos2 = XXH32(hash_id, len * sizeof(T), _rand_len) % _space_len;
for (unsigned int j = 0; j != _num_emb; j += _rand_len) {
for (int j = 0; j != _num_emb; j += _rand_len) {
if (j + _rand_len < _num_emb) {
__builtin_prefetch(weights + pos2);
__builtin_prefetch(top_pos + j + _rand_len);
......@@ -204,7 +204,7 @@ class CPUPyramidHashOPKernel : public framework::OpKernel<T> {
auto* buff = ctx.Output<LoDTensor>("X_Temp_Out");
buff->Resize(framework::make_ddim({bottom->dims()[0], bottom->dims()[1]}));
T* bottom_data = buff->mutable_data<T>(ctx.GetPlace());
for (size_t i = 0; i < bottom->dims()[0]; i++) {
for (int i = 0; i < bottom->dims()[0]; i++) {
bottom_data[i] = bottom_data_ori[i];
}
......@@ -237,7 +237,7 @@ class CPUPyramidHashOPKernel : public framework::OpKernel<T> {
int* iter = drop_pos->mutable_data<int>(ctx.GetPlace());
int* iter_end = iter;
for (int i = 0; i < top_offset.size() - 1; ++i) {
for (size_t i = 0; i < top_offset.size() - 1; ++i) {
int w = offset[i + 1] - offset[i];
int nsentense_with_pyramid = 0;
if (w < 2) {
......@@ -283,7 +283,7 @@ class CPUPyramidHashOPKernel : public framework::OpKernel<T> {
iter = drop_pos->mutable_data<int>(ctx.GetPlace());
int top_counter = 0;
for (int i = 0; i < offset.size() - 1; ++i) {
for (size_t i = 0; i < offset.size() - 1; ++i) {
int w_drop = drop_pos_offset[i + 1] - drop_pos_offset[i];
int w = offset[i + 1] - offset[i];
if (w_drop == 0) {
......@@ -376,7 +376,7 @@ class CPUPyramidHashOPGradKernel : public framework::OpKernel<T> {
void hash_embedding_bp(const T* hash_id, int len, const T* top_pos,
T* weights, T mlr, int _num_emb, int _rand_len,
int _space_len) const {
for (unsigned int j = 0; j != _num_emb; j += _rand_len) {
for (int j = 0; j != _num_emb; j += _rand_len) {
unsigned int pos = XXH32(hash_id, len * sizeof(T), j) % _space_len;
avx_axpy(top_pos + j, weights + pos, _rand_len, mlr);
}
......@@ -398,7 +398,7 @@ class CPUPyramidHashOPGradKernel : public framework::OpKernel<T> {
auto* bottom_data = buff->data<T>();
int _slot_len = bottom->dims()[0];
if (_slot_len == bottom->lod()[0].size() - 1 &&
if (static_cast<size_t>(_slot_len) == bottom->lod()[0].size() - 1 &&
std::count(bottom_data, bottom_data + _slot_len, -1) == _slot_len) {
return;
}
......@@ -412,7 +412,7 @@ class CPUPyramidHashOPGradKernel : public framework::OpKernel<T> {
const int* iter = drop_pos->data<int>();
int top_counter = 0;
for (int i = 0; i < offset.size() - 1; ++i) {
for (size_t i = 0; i < offset.size() - 1; ++i) {
int w = offset[i + 1] - offset[i];
int w_drop = drop_pos_offset[i + 1] - drop_pos_offset[i];
if (w_drop == 0) {
......
......@@ -149,7 +149,7 @@ class WarpCTCKernel : public framework::OpKernel<T> {
logits_lod.push_back(0);
label_lod.push_back(0);
for (auto i = 0; i < num_sequences; i++) {
for (size_t i = 0; i < num_sequences; i++) {
logits_lod.push_back(logits_lod[i] +
logits_length_cpu.data<int64_t>()[i]);
label_lod.push_back(label_lod[i] +
......
......@@ -52,7 +52,7 @@ TEST(ENFORCE, FAILED) {
PADDLE_ENFORCE(false);
} catch (paddle::platform::EnforceNotMet& error) {
caught_exception = true;
EXPECT_NE(std::string(error.what()).find(" at "), 0);
EXPECT_NE(std::string(error.what()).find(" at "), 0UL);
}
EXPECT_TRUE(caught_exception);
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册