提交 4121ad3e 编写于 作者: X Xin Pan

fix test paths

上级 312f9170
...@@ -22,7 +22,7 @@ limitations under the License. */ ...@@ -22,7 +22,7 @@ limitations under the License. */
using namespace paddle; // NOLINT using namespace paddle; // NOLINT
using namespace std; // NOLINT using namespace std; // NOLINT
static const string& configFile1 = "gserver/tests/sequence_lstm.conf"; static const string& configFile1 = "legacy/gserver/tests/sequence_lstm.conf";
DECLARE_bool(use_gpu); DECLARE_bool(use_gpu);
DECLARE_string(config); DECLARE_string(config);
......
...@@ -40,9 +40,10 @@ DEFINE_double( ...@@ -40,9 +40,10 @@ DEFINE_double(
DECLARE_bool(thread_local_rand_use_global_seed); DECLARE_bool(thread_local_rand_use_global_seed);
DECLARE_int32(seed); DECLARE_int32(seed);
static const string& config_file_a = "gserver/tests/sequence_recurrent.py"; static const string& config_file_a =
"legacy/gserver/tests/sequence_recurrent.py";
static const string& config_file_b = static const string& config_file_b =
"gserver/tests/sequence_recurrent_group.py"; "legacy/gserver/tests/sequence_recurrent_group.py";
struct ComData { struct ComData {
vector<Argument> outArgs; vector<Argument> outArgs;
......
...@@ -426,7 +426,7 @@ DECLARE_string(config_args); ...@@ -426,7 +426,7 @@ DECLARE_string(config_args);
TEST(MKLDNNNet, net) { TEST(MKLDNNNet, net) {
std::vector<std::string> cases = {"simple", "branch"}; std::vector<std::string> cases = {"simple", "branch"};
for (auto name : cases) { for (auto name : cases) {
std::string config = "./gserver/tests/mkldnn_" + name + "_net.conf"; std::string config = "./legacy/gserver/tests/mkldnn_" + name + "_net.conf";
for (auto channels : {2, 32}) { for (auto channels : {2, 32}) {
std::ostringstream oss; std::ostringstream oss;
oss << "channels=" << channels; oss << "channels=" << channels;
......
...@@ -220,33 +220,33 @@ void compareNetwork(const std::string& config_file_a, ...@@ -220,33 +220,33 @@ void compareNetwork(const std::string& config_file_a,
} }
TEST(Compare, concat_dotmul) { TEST(Compare, concat_dotmul) {
std::string config_file_a = "./gserver/tests/concat_dotmul_a.conf"; std::string config_file_a = "./legacy/gserver/tests/concat_dotmul_a.conf";
std::string config_file_b = "./gserver/tests/concat_dotmul_b.conf"; std::string config_file_b = "./legacy/gserver/tests/concat_dotmul_b.conf";
compareNetwork(config_file_a, config_file_b); compareNetwork(config_file_a, config_file_b);
} }
TEST(Compare, concat_fullmatrix) { TEST(Compare, concat_fullmatrix) {
std::string config_file_a = "./gserver/tests/concat_fullmatrix_a.conf"; std::string config_file_a = "./legacy/gserver/tests/concat_fullmatrix_a.conf";
std::string config_file_b = "./gserver/tests/concat_fullmatrix_b.conf"; std::string config_file_b = "./legacy/gserver/tests/concat_fullmatrix_b.conf";
compareNetwork(config_file_a, config_file_b); compareNetwork(config_file_a, config_file_b);
} }
TEST(Compare, concat_table) { TEST(Compare, concat_table) {
std::string config_file_a = "./gserver/tests/concat_table_a.conf"; std::string config_file_a = "./legacy/gserver/tests/concat_table_a.conf";
std::string config_file_b = "./gserver/tests/concat_table_b.conf"; std::string config_file_b = "./legacy/gserver/tests/concat_table_b.conf";
compareNetwork(config_file_a, config_file_b); compareNetwork(config_file_a, config_file_b);
} }
TEST(Compare, concat_slice) { TEST(Compare, concat_slice) {
std::string config_file_a = "./gserver/tests/concat_slice_a.conf"; std::string config_file_a = "./legacy/gserver/tests/concat_slice_a.conf";
std::string config_file_b = "./gserver/tests/concat_slice_b.conf"; std::string config_file_b = "./legacy/gserver/tests/concat_slice_b.conf";
compareNetwork(config_file_a, config_file_b); compareNetwork(config_file_a, config_file_b);
} }
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
TEST(Compare, img_pool) { TEST(Compare, img_pool) {
std::string config_file_a = "./gserver/tests/img_pool_a.conf"; std::string config_file_a = "./legacy/gserver/tests/img_pool_a.conf";
std::string config_file_b = "./gserver/tests/img_pool_b.conf"; std::string config_file_b = "./legacy/gserver/tests/img_pool_b.conf";
bool useGpu = FLAGS_use_gpu; bool useGpu = FLAGS_use_gpu;
FLAGS_use_gpu = true; FLAGS_use_gpu = true;
compareNetwork(config_file_a, config_file_b); compareNetwork(config_file_a, config_file_b);
...@@ -254,8 +254,8 @@ TEST(Compare, img_pool) { ...@@ -254,8 +254,8 @@ TEST(Compare, img_pool) {
} }
TEST(Compare, img_conv) { TEST(Compare, img_conv) {
std::string config_file_a = "./gserver/tests/img_conv_a.conf"; std::string config_file_a = "./legacy/gserver/tests/img_conv_a.conf";
std::string config_file_b = "./gserver/tests/img_conv_b.conf"; std::string config_file_b = "./legacy/gserver/tests/img_conv_b.conf";
bool useGpu = FLAGS_use_gpu; bool useGpu = FLAGS_use_gpu;
FLAGS_use_gpu = true; FLAGS_use_gpu = true;
compareNetwork(config_file_a, config_file_b); compareNetwork(config_file_a, config_file_b);
...@@ -264,8 +264,8 @@ TEST(Compare, img_conv) { ...@@ -264,8 +264,8 @@ TEST(Compare, img_conv) {
// Test cudnn_conv and exconv give the same result // Test cudnn_conv and exconv give the same result
TEST(Compare, img_conv2) { TEST(Compare, img_conv2) {
std::string config_file_a = "./gserver/tests/img_conv_cudnn.py"; std::string config_file_a = "./legacy/gserver/tests/img_conv_cudnn.py";
std::string config_file_b = "./gserver/tests/img_conv_exconv.py"; std::string config_file_b = "./legacy/gserver/tests/img_conv_exconv.py";
bool useGpu = FLAGS_use_gpu; bool useGpu = FLAGS_use_gpu;
double eps = FLAGS_checkgrad_eps; double eps = FLAGS_checkgrad_eps;
FLAGS_use_gpu = true; FLAGS_use_gpu = true;
......
...@@ -35,7 +35,8 @@ TEST(PyDataProvider, py_fill_slots) { ...@@ -35,7 +35,8 @@ TEST(PyDataProvider, py_fill_slots) {
config.set_load_data_module(std::string("pyDataProvider")); config.set_load_data_module(std::string("pyDataProvider"));
config.set_load_data_object(std::string("SimpleDataProvider")); config.set_load_data_object(std::string("SimpleDataProvider"));
config.clear_files(); config.clear_files();
std::string dataFile = "gserver/tests/pyDataProvider/pyDataProviderList"; std::string dataFile =
"legacy/gserver/tests/pyDataProvider/pyDataProviderList";
config.set_files(dataFile); config.set_files(dataFile);
#ifndef PADDLE_WITH_CUDA #ifndef PADDLE_WITH_CUDA
bool useGpu = false; bool useGpu = false;
...@@ -68,7 +69,8 @@ TEST(PyDataProvider, py_fill_nest_slots) { ...@@ -68,7 +69,8 @@ TEST(PyDataProvider, py_fill_nest_slots) {
config.set_load_data_module(std::string("pyDataProvider")); config.set_load_data_module(std::string("pyDataProvider"));
config.set_load_data_object(std::string("SimpleNestDataProvider")); config.set_load_data_object(std::string("SimpleNestDataProvider"));
config.clear_files(); config.clear_files();
std::string dataFile = "gserver/tests/pyDataProvider/pyDataProviderList"; std::string dataFile =
"legacy/gserver/tests/pyDataProvider/pyDataProviderList";
config.set_files(dataFile); config.set_files(dataFile);
EXPECT_EQ(config.IsInitialized(), true); EXPECT_EQ(config.IsInitialized(), true);
#ifndef PADDLE_WITH_CUDA #ifndef PADDLE_WITH_CUDA
......
...@@ -102,11 +102,11 @@ void test(const string& conf1, const string& conf2, double eps, bool useGpu) { ...@@ -102,11 +102,11 @@ void test(const string& conf1, const string& conf2, double eps, bool useGpu) {
FLAGS_use_gpu = useGpu; FLAGS_use_gpu = useGpu;
int num_passes = 5; int num_passes = 5;
real* cost1 = new real[num_passes]; real* cost1 = new real[num_passes];
const string dir1 = "gserver/tests/t1"; const string dir1 = "legacy/gserver/tests/t1";
CalCost(conf1, dir1, cost1, num_passes); CalCost(conf1, dir1, cost1, num_passes);
real* cost2 = new real[num_passes]; real* cost2 = new real[num_passes];
const string dir2 = "gserver/tests/t2"; const string dir2 = "legacy/gserver/tests/t2";
CalCost(conf2, dir2, cost2, num_passes); CalCost(conf2, dir2, cost2, num_passes);
for (int i = 0; i < num_passes; i++) { for (int i = 0; i < num_passes; i++) {
...@@ -121,8 +121,8 @@ void test(const string& conf1, const string& conf2, double eps, bool useGpu) { ...@@ -121,8 +121,8 @@ void test(const string& conf1, const string& conf2, double eps, bool useGpu) {
TEST(RecurrentGradientMachine, HasSubSequence) { TEST(RecurrentGradientMachine, HasSubSequence) {
for (bool useGpu : {false, true}) { for (bool useGpu : {false, true}) {
test("gserver/tests/sequence_layer_group.conf", test("legacy/gserver/tests/sequence_layer_group.conf",
"gserver/tests/sequence_nest_layer_group.conf", "legacy/gserver/tests/sequence_nest_layer_group.conf",
1e-5, 1e-5,
useGpu); useGpu);
} }
...@@ -130,8 +130,8 @@ TEST(RecurrentGradientMachine, HasSubSequence) { ...@@ -130,8 +130,8 @@ TEST(RecurrentGradientMachine, HasSubSequence) {
TEST(RecurrentGradientMachine, rnn) { TEST(RecurrentGradientMachine, rnn) {
for (bool useGpu : {false, true}) { for (bool useGpu : {false, true}) {
test("gserver/tests/sequence_rnn.conf", test("legacy/gserver/tests/sequence_rnn.conf",
"gserver/tests/sequence_nest_rnn.conf", "legacy/gserver/tests/sequence_nest_rnn.conf",
1e-6, 1e-6,
useGpu); useGpu);
} }
...@@ -139,8 +139,8 @@ TEST(RecurrentGradientMachine, rnn) { ...@@ -139,8 +139,8 @@ TEST(RecurrentGradientMachine, rnn) {
TEST(RecurrentGradientMachine, rnn_multi_input) { TEST(RecurrentGradientMachine, rnn_multi_input) {
for (bool useGpu : {false, true}) { for (bool useGpu : {false, true}) {
test("gserver/tests/sequence_rnn_multi_input.conf", test("legacy/gserver/tests/sequence_rnn_multi_input.conf",
"gserver/tests/sequence_nest_rnn_multi_input.conf", "legacy/gserver/tests/sequence_nest_rnn_multi_input.conf",
1e-6, 1e-6,
useGpu); useGpu);
} }
...@@ -148,8 +148,8 @@ TEST(RecurrentGradientMachine, rnn_multi_input) { ...@@ -148,8 +148,8 @@ TEST(RecurrentGradientMachine, rnn_multi_input) {
TEST(RecurrentGradientMachine, rnn_multi_unequalength_input) { TEST(RecurrentGradientMachine, rnn_multi_unequalength_input) {
for (bool useGpu : {false, true}) { for (bool useGpu : {false, true}) {
test("gserver/tests/sequence_rnn_multi_unequalength_inputs.py", test("legacy/gserver/tests/sequence_rnn_multi_unequalength_inputs.py",
"gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.py", "legacy/gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.py",
1e-6, 1e-6,
useGpu); useGpu);
} }
...@@ -157,8 +157,8 @@ TEST(RecurrentGradientMachine, rnn_multi_unequalength_input) { ...@@ -157,8 +157,8 @@ TEST(RecurrentGradientMachine, rnn_multi_unequalength_input) {
TEST(RecurrentGradientMachine, rnn_mixed_input) { TEST(RecurrentGradientMachine, rnn_mixed_input) {
for (bool useGpu : {false, true}) { for (bool useGpu : {false, true}) {
test("gserver/tests/sequence_rnn_mixed_inputs.py", test("legacy/gserver/tests/sequence_rnn_mixed_inputs.py",
"gserver/tests/sequence_rnn_matched_inputs.py", "legacy/gserver/tests/sequence_rnn_matched_inputs.py",
1e-6, 1e-6,
useGpu); useGpu);
} }
......
...@@ -76,7 +76,7 @@ void calcOutput(ComData& comData, ...@@ -76,7 +76,7 @@ void calcOutput(ComData& comData,
FLAGS_config = configFile; FLAGS_config = configFile;
FLAGS_config_args = configArgs; FLAGS_config_args = configArgs;
FLAGS_use_gpu = useGpu; FLAGS_use_gpu = useGpu;
FLAGS_init_model_path = "gserver/tests/SelectiveFcTest/model"; FLAGS_init_model_path = "legacy/gserver/tests/SelectiveFcTest/model";
*ThreadLocalRand::getSeed() = 0; *ThreadLocalRand::getSeed() = 0;
srand(0); srand(0);
...@@ -311,13 +311,13 @@ LayerPtr initFcLayer(LayerPtr dataLayer, ...@@ -311,13 +311,13 @@ LayerPtr initFcLayer(LayerPtr dataLayer,
#ifndef PADDLE_TYPE_DOUBLE #ifndef PADDLE_TYPE_DOUBLE
// The parameter file used in fc.conf and selective_fc.conf is float // The parameter file used in fc.conf and selective_fc.conf is float
TEST(Layer, SelectiveFcLayer_train_dense_mul) { TEST(Layer, SelectiveFcLayer_train_dense_mul) {
const string& fcConfig = "gserver/tests/SelectiveFcTest/conf/fc.conf"; const string& fcConfig = "legacy/gserver/tests/SelectiveFcTest/conf/fc.conf";
const string& fcConfigArgs = const string& fcConfigArgs =
"filelist=gserver/tests/SelectiveFcTest/dense_mul_list"; "filelist=legacy/gserver/tests/SelectiveFcTest/dense_mul_list";
const string& selFcConfig = const string& selFcConfig =
"gserver/tests/SelectiveFcTest/conf/selective_fc.conf"; "legacy/gserver/tests/SelectiveFcTest/conf/selective_fc.conf";
const string& selConfigArgs = const string& selConfigArgs =
"filelist=gserver/tests/SelectiveFcTest/dense_mul_list"; "filelist=legacy/gserver/tests/SelectiveFcTest/dense_mul_list";
for (auto useGpu : {false, true}) { for (auto useGpu : {false, true}) {
#ifndef PADDLE_WITH_CUDA #ifndef PADDLE_WITH_CUDA
...@@ -350,7 +350,7 @@ void testSelectiveFcLayerTrainSparseMul(const LayerConfig& config, ...@@ -350,7 +350,7 @@ void testSelectiveFcLayerTrainSparseMul(const LayerConfig& config,
creatDataLayer("data", batchSize, dataLayerSize, values, useGpu); creatDataLayer("data", batchSize, dataLayerSize, values, useGpu);
const string& selfcParaFile = const string& selfcParaFile =
"gserver/tests/SelectiveFcTest/model/rand_fc_param.w.transpose"; "legacy/gserver/tests/SelectiveFcTest/model/rand_fc_param.w.transpose";
const string& selfcParaName = "rand_fc_param.w.transpose"; const string& selfcParaName = "rand_fc_param.w.transpose";
std::shared_ptr<SelectiveFullyConnectedLayer> selfcLayer = std::shared_ptr<SelectiveFullyConnectedLayer> selfcLayer =
...@@ -396,7 +396,7 @@ void testSelectiveFcLayerTrainSparseMul(const LayerConfig& config, ...@@ -396,7 +396,7 @@ void testSelectiveFcLayerTrainSparseMul(const LayerConfig& config,
size_t nnz = cpuOutMatSelfc->getElementCnt(); size_t nnz = cpuOutMatSelfc->getElementCnt();
const string& fcParaFile = const string& fcParaFile =
"gserver/tests/SelectiveFcTest/model/rand_fc_param.w"; "legacy/gserver/tests/SelectiveFcTest/model/rand_fc_param.w";
const string& fcParaName = "rand_fc_param.w"; const string& fcParaName = "rand_fc_param.w";
LayerConfig fcLayerConfig; LayerConfig fcLayerConfig;
fcLayerConfig.set_name("fc_layer"); fcLayerConfig.set_name("fc_layer");
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册