diff --git a/paddle/gserver/tests/test_PyDataProvider2.cpp b/paddle/gserver/tests/test_PyDataProvider2.cpp index c5fe31b29187f4a5b429a928d1870a06848691fa..e75e53ab7f431a34798e8a79985f30441005098c 100644 --- a/paddle/gserver/tests/test_PyDataProvider2.cpp +++ b/paddle/gserver/tests/test_PyDataProvider2.cpp @@ -321,7 +321,7 @@ TEST(PyDataProvider2, input_order) { if (!realBatchSize) { break; } - ASSERT_EQ(batch.getStreams().size(), 2); + ASSERT_EQ(batch.getStreams().size(), (size_t)2); for (size_t i = 0; i < realBatchSize; ++i) { ASSERT_EQ(batch.getStream(0).ids->getData()[i], 0); ASSERT_EQ(batch.getStream(1).ids->getData()[i], 1); diff --git a/paddle/utils/tests/test_ThreadBarrier.cpp b/paddle/utils/tests/test_ThreadBarrier.cpp index 241cdda7bd1c90335e85c7a559afd0c84c255009..90bd6c21bc8e5ac05b248a0517f9e4fb43d04054 100644 --- a/paddle/utils/tests/test_ThreadBarrier.cpp +++ b/paddle/utils/tests/test_ThreadBarrier.cpp @@ -32,7 +32,7 @@ void testNormalImpl(size_t thread_num, std::vector threads; threads.reserve(thread_num); - for (int32_t i = 0; i < thread_num; ++i) { + for (size_t i = 0; i < thread_num; ++i) { threads.emplace_back([&thread_num, &mutex, &tids, &barrier, &callback]{ callback(thread_num, mutex, tids, barrier);