diff --git a/paddle/legacy/gserver/tests/test_CompareSparse.cpp b/paddle/legacy/gserver/tests/test_CompareSparse.cpp index 2fbc404125a9364ac44a990f8ec92962cf7d1298..c14e80036ca727264bb7e4d1cc8609961ff66a4c 100644 --- a/paddle/legacy/gserver/tests/test_CompareSparse.cpp +++ b/paddle/legacy/gserver/tests/test_CompareSparse.cpp @@ -22,7 +22,7 @@ limitations under the License. */ using namespace paddle; // NOLINT using namespace std; // NOLINT -static const string& configFile1 = "gserver/tests/sequence_lstm.conf"; +static const string& configFile1 = "legacy/gserver/tests/sequence_lstm.conf"; DECLARE_bool(use_gpu); DECLARE_string(config); diff --git a/paddle/legacy/gserver/tests/test_CompareTwoNets.cpp b/paddle/legacy/gserver/tests/test_CompareTwoNets.cpp index 1c9b4002a34ca5a9b668be69bd0ad392eb763803..3ac86ce516afa751b5625293be901ffa81eb698a 100644 --- a/paddle/legacy/gserver/tests/test_CompareTwoNets.cpp +++ b/paddle/legacy/gserver/tests/test_CompareTwoNets.cpp @@ -40,9 +40,10 @@ DEFINE_double( DECLARE_bool(thread_local_rand_use_global_seed); DECLARE_int32(seed); -static const string& config_file_a = "gserver/tests/sequence_recurrent.py"; +static const string& config_file_a = + "legacy/gserver/tests/sequence_recurrent.py"; static const string& config_file_b = - "gserver/tests/sequence_recurrent_group.py"; + "legacy/gserver/tests/sequence_recurrent_group.py"; struct ComData { vector outArgs; diff --git a/paddle/legacy/gserver/tests/test_MKLDNN.cpp b/paddle/legacy/gserver/tests/test_MKLDNN.cpp index c1f52540a6fa851e372d64136dc769dae264a5e3..80dea89f3cac9e43b996780591e65b05ff2eca2f 100644 --- a/paddle/legacy/gserver/tests/test_MKLDNN.cpp +++ b/paddle/legacy/gserver/tests/test_MKLDNN.cpp @@ -426,7 +426,7 @@ DECLARE_string(config_args); TEST(MKLDNNNet, net) { std::vector cases = {"simple", "branch"}; for (auto name : cases) { - std::string config = "./gserver/tests/mkldnn_" + name + "_net.conf"; + std::string config = "./legacy/gserver/tests/mkldnn_" + name + "_net.conf"; for (auto channels : {2, 32}) { std::ostringstream oss; oss << "channels=" << channels; diff --git a/paddle/legacy/gserver/tests/test_NetworkCompare.cpp b/paddle/legacy/gserver/tests/test_NetworkCompare.cpp index fda3f2f7934adde09303f443ca5e8de6a7d077cd..5a6b2245830832c1ca60ec657231c1bc2900f158 100644 --- a/paddle/legacy/gserver/tests/test_NetworkCompare.cpp +++ b/paddle/legacy/gserver/tests/test_NetworkCompare.cpp @@ -220,33 +220,33 @@ void compareNetwork(const std::string& config_file_a, } TEST(Compare, concat_dotmul) { - std::string config_file_a = "./gserver/tests/concat_dotmul_a.conf"; - std::string config_file_b = "./gserver/tests/concat_dotmul_b.conf"; + std::string config_file_a = "./legacy/gserver/tests/concat_dotmul_a.conf"; + std::string config_file_b = "./legacy/gserver/tests/concat_dotmul_b.conf"; compareNetwork(config_file_a, config_file_b); } TEST(Compare, concat_fullmatrix) { - std::string config_file_a = "./gserver/tests/concat_fullmatrix_a.conf"; - std::string config_file_b = "./gserver/tests/concat_fullmatrix_b.conf"; + std::string config_file_a = "./legacy/gserver/tests/concat_fullmatrix_a.conf"; + std::string config_file_b = "./legacy/gserver/tests/concat_fullmatrix_b.conf"; compareNetwork(config_file_a, config_file_b); } TEST(Compare, concat_table) { - std::string config_file_a = "./gserver/tests/concat_table_a.conf"; - std::string config_file_b = "./gserver/tests/concat_table_b.conf"; + std::string config_file_a = "./legacy/gserver/tests/concat_table_a.conf"; + std::string config_file_b = "./legacy/gserver/tests/concat_table_b.conf"; compareNetwork(config_file_a, config_file_b); } TEST(Compare, concat_slice) { - std::string config_file_a = "./gserver/tests/concat_slice_a.conf"; - std::string config_file_b = "./gserver/tests/concat_slice_b.conf"; + std::string config_file_a = "./legacy/gserver/tests/concat_slice_a.conf"; + std::string config_file_b = "./legacy/gserver/tests/concat_slice_b.conf"; compareNetwork(config_file_a, config_file_b); } #ifdef PADDLE_WITH_CUDA TEST(Compare, img_pool) { - std::string config_file_a = "./gserver/tests/img_pool_a.conf"; - std::string config_file_b = "./gserver/tests/img_pool_b.conf"; + std::string config_file_a = "./legacy/gserver/tests/img_pool_a.conf"; + std::string config_file_b = "./legacy/gserver/tests/img_pool_b.conf"; bool useGpu = FLAGS_use_gpu; FLAGS_use_gpu = true; compareNetwork(config_file_a, config_file_b); @@ -254,8 +254,8 @@ TEST(Compare, img_pool) { } TEST(Compare, img_conv) { - std::string config_file_a = "./gserver/tests/img_conv_a.conf"; - std::string config_file_b = "./gserver/tests/img_conv_b.conf"; + std::string config_file_a = "./legacy/gserver/tests/img_conv_a.conf"; + std::string config_file_b = "./legacy/gserver/tests/img_conv_b.conf"; bool useGpu = FLAGS_use_gpu; FLAGS_use_gpu = true; compareNetwork(config_file_a, config_file_b); @@ -264,8 +264,8 @@ TEST(Compare, img_conv) { // Test cudnn_conv and exconv give the same result TEST(Compare, img_conv2) { - std::string config_file_a = "./gserver/tests/img_conv_cudnn.py"; - std::string config_file_b = "./gserver/tests/img_conv_exconv.py"; + std::string config_file_a = "./legacy/gserver/tests/img_conv_cudnn.py"; + std::string config_file_b = "./legacy/gserver/tests/img_conv_exconv.py"; bool useGpu = FLAGS_use_gpu; double eps = FLAGS_checkgrad_eps; FLAGS_use_gpu = true; diff --git a/paddle/legacy/gserver/tests/test_PyDataProvider.cpp b/paddle/legacy/gserver/tests/test_PyDataProvider.cpp index f956b825aaed800e1267c0eb91b0704dfb74e4d8..9cde4ecca52957a6de30bb37a497d4af162d804c 100644 --- a/paddle/legacy/gserver/tests/test_PyDataProvider.cpp +++ b/paddle/legacy/gserver/tests/test_PyDataProvider.cpp @@ -35,7 +35,8 @@ TEST(PyDataProvider, py_fill_slots) { config.set_load_data_module(std::string("pyDataProvider")); config.set_load_data_object(std::string("SimpleDataProvider")); config.clear_files(); - std::string dataFile = "gserver/tests/pyDataProvider/pyDataProviderList"; + std::string dataFile = + "legacy/gserver/tests/pyDataProvider/pyDataProviderList"; config.set_files(dataFile); #ifndef PADDLE_WITH_CUDA bool useGpu = false; @@ -68,7 +69,8 @@ TEST(PyDataProvider, py_fill_nest_slots) { config.set_load_data_module(std::string("pyDataProvider")); config.set_load_data_object(std::string("SimpleNestDataProvider")); config.clear_files(); - std::string dataFile = "gserver/tests/pyDataProvider/pyDataProviderList"; + std::string dataFile = + "legacy/gserver/tests/pyDataProvider/pyDataProviderList"; config.set_files(dataFile); EXPECT_EQ(config.IsInitialized(), true); #ifndef PADDLE_WITH_CUDA diff --git a/paddle/legacy/gserver/tests/test_RecurrentGradientMachine.cpp b/paddle/legacy/gserver/tests/test_RecurrentGradientMachine.cpp index 29b9ca41b6c8fb0007a6749ce2b98eb726c943d0..405a45b086c5870c3b0e264f4d9353752746f20e 100644 --- a/paddle/legacy/gserver/tests/test_RecurrentGradientMachine.cpp +++ b/paddle/legacy/gserver/tests/test_RecurrentGradientMachine.cpp @@ -102,11 +102,11 @@ void test(const string& conf1, const string& conf2, double eps, bool useGpu) { FLAGS_use_gpu = useGpu; int num_passes = 5; real* cost1 = new real[num_passes]; - const string dir1 = "gserver/tests/t1"; + const string dir1 = "legacy/gserver/tests/t1"; CalCost(conf1, dir1, cost1, num_passes); real* cost2 = new real[num_passes]; - const string dir2 = "gserver/tests/t2"; + const string dir2 = "legacy/gserver/tests/t2"; CalCost(conf2, dir2, cost2, num_passes); for (int i = 0; i < num_passes; i++) { @@ -121,8 +121,8 @@ void test(const string& conf1, const string& conf2, double eps, bool useGpu) { TEST(RecurrentGradientMachine, HasSubSequence) { for (bool useGpu : {false, true}) { - test("gserver/tests/sequence_layer_group.conf", - "gserver/tests/sequence_nest_layer_group.conf", + test("legacy/gserver/tests/sequence_layer_group.conf", + "legacy/gserver/tests/sequence_nest_layer_group.conf", 1e-5, useGpu); } @@ -130,8 +130,8 @@ TEST(RecurrentGradientMachine, HasSubSequence) { TEST(RecurrentGradientMachine, rnn) { for (bool useGpu : {false, true}) { - test("gserver/tests/sequence_rnn.conf", - "gserver/tests/sequence_nest_rnn.conf", + test("legacy/gserver/tests/sequence_rnn.conf", + "legacy/gserver/tests/sequence_nest_rnn.conf", 1e-6, useGpu); } @@ -139,8 +139,8 @@ TEST(RecurrentGradientMachine, rnn) { TEST(RecurrentGradientMachine, rnn_multi_input) { for (bool useGpu : {false, true}) { - test("gserver/tests/sequence_rnn_multi_input.conf", - "gserver/tests/sequence_nest_rnn_multi_input.conf", + test("legacy/gserver/tests/sequence_rnn_multi_input.conf", + "legacy/gserver/tests/sequence_nest_rnn_multi_input.conf", 1e-6, useGpu); } @@ -148,8 +148,8 @@ TEST(RecurrentGradientMachine, rnn_multi_input) { TEST(RecurrentGradientMachine, rnn_multi_unequalength_input) { for (bool useGpu : {false, true}) { - test("gserver/tests/sequence_rnn_multi_unequalength_inputs.py", - "gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.py", + test("legacy/gserver/tests/sequence_rnn_multi_unequalength_inputs.py", + "legacy/gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.py", 1e-6, useGpu); } @@ -157,8 +157,8 @@ TEST(RecurrentGradientMachine, rnn_multi_unequalength_input) { TEST(RecurrentGradientMachine, rnn_mixed_input) { for (bool useGpu : {false, true}) { - test("gserver/tests/sequence_rnn_mixed_inputs.py", - "gserver/tests/sequence_rnn_matched_inputs.py", + test("legacy/gserver/tests/sequence_rnn_mixed_inputs.py", + "legacy/gserver/tests/sequence_rnn_matched_inputs.py", 1e-6, useGpu); } diff --git a/paddle/legacy/gserver/tests/test_SelectiveFCLayer.cpp b/paddle/legacy/gserver/tests/test_SelectiveFCLayer.cpp index b5033d5fcb7b691608be1c4798f3048fb9a372d2..2ae051b4d733a80d5012d06069979d3fbc668eac 100644 --- a/paddle/legacy/gserver/tests/test_SelectiveFCLayer.cpp +++ b/paddle/legacy/gserver/tests/test_SelectiveFCLayer.cpp @@ -76,7 +76,7 @@ void calcOutput(ComData& comData, FLAGS_config = configFile; FLAGS_config_args = configArgs; FLAGS_use_gpu = useGpu; - FLAGS_init_model_path = "gserver/tests/SelectiveFcTest/model"; + FLAGS_init_model_path = "legacy/gserver/tests/SelectiveFcTest/model"; *ThreadLocalRand::getSeed() = 0; srand(0); @@ -311,13 +311,13 @@ LayerPtr initFcLayer(LayerPtr dataLayer, #ifndef PADDLE_TYPE_DOUBLE // The parameter file used in fc.conf and selective_fc.conf is float TEST(Layer, SelectiveFcLayer_train_dense_mul) { - const string& fcConfig = "gserver/tests/SelectiveFcTest/conf/fc.conf"; + const string& fcConfig = "legacy/gserver/tests/SelectiveFcTest/conf/fc.conf"; const string& fcConfigArgs = - "filelist=gserver/tests/SelectiveFcTest/dense_mul_list"; + "filelist=legacy/gserver/tests/SelectiveFcTest/dense_mul_list"; const string& selFcConfig = - "gserver/tests/SelectiveFcTest/conf/selective_fc.conf"; + "legacy/gserver/tests/SelectiveFcTest/conf/selective_fc.conf"; const string& selConfigArgs = - "filelist=gserver/tests/SelectiveFcTest/dense_mul_list"; + "filelist=legacy/gserver/tests/SelectiveFcTest/dense_mul_list"; for (auto useGpu : {false, true}) { #ifndef PADDLE_WITH_CUDA @@ -350,7 +350,7 @@ void testSelectiveFcLayerTrainSparseMul(const LayerConfig& config, creatDataLayer("data", batchSize, dataLayerSize, values, useGpu); const string& selfcParaFile = - "gserver/tests/SelectiveFcTest/model/rand_fc_param.w.transpose"; + "legacy/gserver/tests/SelectiveFcTest/model/rand_fc_param.w.transpose"; const string& selfcParaName = "rand_fc_param.w.transpose"; std::shared_ptr selfcLayer = @@ -396,7 +396,7 @@ void testSelectiveFcLayerTrainSparseMul(const LayerConfig& config, size_t nnz = cpuOutMatSelfc->getElementCnt(); const string& fcParaFile = - "gserver/tests/SelectiveFcTest/model/rand_fc_param.w"; + "legacy/gserver/tests/SelectiveFcTest/model/rand_fc_param.w"; const string& fcParaName = "rand_fc_param.w"; LayerConfig fcLayerConfig; fcLayerConfig.set_name("fc_layer");