提交 54a2b1f6 编写于 作者: H hedaoyuan 提交者: GitHub

Merge pull request #1003 from hedaoyuan/remove_main

Remove the main function inside the test file
......@@ -107,7 +107,6 @@ function(link_paddle_exe TARGET_NAME)
paddle_parameter
paddle_proto
paddle_cuda
paddle_test_main
${METRIC_LIBS}
${PROTOBUF_LIBRARY}
${LIBGLOG_LIBRARY}
......@@ -155,8 +154,9 @@ endfunction()
# Rest Arguemnts: not used.
function(link_paddle_test TARGET_NAME)
link_paddle_exe(${TARGET_NAME})
target_link_libraries(${TARGET_NAME} ${GTEST_MAIN_LIBRARIES}
${GTEST_LIBRARIES})
target_link_libraries(${TARGET_NAME}
paddle_test_main
${GTEST_LIBRARIES})
endfunction()
# add_unittest_without_exec
......
add_subdirectory(cuda)
add_subdirectory(function)
add_subdirectory(utils)
add_subdirectory(testing)
add_subdirectory(math)
add_subdirectory(parameter)
add_subdirectory(gserver)
......
......@@ -11,14 +11,14 @@ endif()
add_library(paddle_function STATIC ${cpp_files} ${cu_objs})
add_library(paddle_test_main STATIC TestMain.cpp)
if(WITH_GPU)
if(WITH_TESTING)
# TODO:
# file(GLOB test_files . *OpTest.cpp)
# add_executable(${test_bin} EXCLUDE_FROM_ALL ${test_files})
add_simple_unittest(CrossMapNormalOpTest)
endif()
endif()
add_style_check_target(paddle_function ${h_files})
add_style_check_target(paddle_function ${cpp_files})
......
......@@ -65,9 +65,3 @@ TEST(LinearChainCRF, decoding) {
}
}
}
int main(int argc, char** argv) {
initMain(argc, argv);
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
......@@ -730,9 +730,3 @@ TEST(ProtoSequenceDataProvider, test) {
} // end for (int numIdSlots : numSlotsArray)
} // end for (int numSparseNonValueVecSlots : numSlotsArray)
}
int main(int argc, char** argv) {
initMain(argc, argv);
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
......@@ -242,9 +242,3 @@ TEST(Layer, WarpCTCLayer) {
}
}
}
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
initMain(argc, argv);
return RUN_ALL_TESTS();
}
......@@ -120,9 +120,3 @@ TEST(MemoryHandle, Gpu) {
}
}
#endif
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
initMain(argc, argv);
return RUN_ALL_TESTS();
}
......@@ -242,10 +242,4 @@ TEST(BaseMatrix, Other) {
}
}
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
paddle::initMain(argc, argv);
return RUN_ALL_TESTS();
}
#endif
......@@ -77,11 +77,4 @@ TEST(CpuGpuVector, subCreate) {
checkDataEqual(v1Check->getData() + offset, v2Check->getData(), size2);
}
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
initMain(argc, argv);
int ret = RUN_ALL_TESTS();
return ret;
}
#endif
......@@ -114,9 +114,3 @@ TEST(ExecViaCpu, test1) {
testWrapper(functor);
}
#endif
int main(int argc, char** argv) {
paddle::initMain(argc, argv);
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
......@@ -291,10 +291,4 @@ TEST(Matrix, multiBinaryCrossEntropy) {
}
}
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
paddle::initMain(argc, argv);
return RUN_ALL_TESTS();
}
#endif
......@@ -169,9 +169,3 @@ TEST(SIMDFunction, decayL1_WithoutLR) {
ASSERT_NEAR(dest[i], simd_dest[i], EPSILON);
}
}
int main(int argc, char** argv) {
paddle::initMain(argc, argv);
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
......@@ -561,9 +561,3 @@ TEST(Matrix, SparseMatrixCSCFormatTrimFrom) {
checkSMatrixEqual2(matA, matD);
#endif
}
int main(int argc, char** argv) {
paddle::initMain(argc, argv);
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
......@@ -1163,11 +1163,3 @@ TEST(Quaternary, CompareOp) {
TestQuaternaryMatrix<GpuMatrix> testGpu(testQuaternaryCompareOp<GpuMatrix>);
#endif
}
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
hl_start();
hl_init(0);
return RUN_ALL_TESTS();
}
......@@ -459,11 +459,3 @@ void testSparseMomentum(size_t size, bool useGpu) {
}
TEST(Training, SparseMomentum) { testCase(testSparseMomentum); }
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
initMain(argc, argv);
hl_start();
hl_init(FLAGS_gpu_id);
return RUN_ALL_TESTS();
}
......@@ -53,9 +53,3 @@ TEST(MatrixBatchTransTest, test_batch_matrix_transpose) {
checkMatrixEqual(cBatchTransMat, cMat_d2h);
}
#endif
int main(int argc, char** argv) {
paddle::initMain(argc, argv);
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
......@@ -139,11 +139,3 @@ TEST(sgdUpdate, GPU) {
testMatrixCase(testSgdUpdate<GpuMatrix>);
}
#endif
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
hl_start();
hl_init(0);
return RUN_ALL_TESTS();
}
......@@ -1262,10 +1262,4 @@ TEST(Matrix, MaxOutFwdBwd) {
}
}
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
initMain(argc, argv);
return RUN_ALL_TESTS();
}
#endif
......@@ -171,11 +171,4 @@ TEST(SMatrix, sMatrixCollectBias) {
}
}
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
initMain(argc, argv);
int ret = RUN_ALL_TESTS();
return ret;
}
#endif
......@@ -23,15 +23,6 @@ limitations under the License. */
using namespace paddle; // NOLINT
int main(int argc, char** argv) {
paddle::initMain(argc, argv);
testing::InitGoogleTest(&argc, argv);
int ret = RUN_ALL_TESTS();
return ret;
}
class CommonTest : public ::testing::Test {
protected:
CommonTest() : testStat_("test") {}
......
# for paddle test case
if(WITH_TESTING)
add_library(paddle_test_main STATIC TestMain.cpp)
add_dependencies(paddle_test_main gen_proto_cpp)
endif()
......@@ -96,9 +96,3 @@ TEST(CustomStackTrace, normalTest) {
}
});
}
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
paddle::initMain(argc, argv);
return RUN_ALL_TESTS();
}
......@@ -44,8 +44,3 @@ TEST(SIMDFlags, normalPrint) {
LOG(INFO) << "Has AVX2: " << std::boolalpha << HAS_AVX2;
LOG(INFO) << "Has AVX512: " << std::boolalpha << HAS_AVX512;
}
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
......@@ -53,9 +53,3 @@ TEST(ThreadSpinLock, normalTest) {
});
}
}
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
paddle::initMain(argc, argv);
return RUN_ALL_TESTS();
}
......@@ -79,8 +79,3 @@ TEST(AsyncThreadPool, addBatchJobWithResults) {
ASSERT_EQ(res[i], i);
}
}
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
......@@ -64,9 +64,3 @@ TEST(ThreadBarrier, normalTest) {
});
}
}
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
paddle::initMain(argc, argv);
return RUN_ALL_TESTS();
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册