From 22b9b6662b215b663ce2cebdf7624ea1212bb9c1 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 15 Dec 2016 21:01:47 +0800 Subject: [PATCH] Add unittest to coverage SgdThreadUpdater's enableBufType --- paddle/trainer/ThreadParameterUpdater.cpp | 3 +++ paddle/trainer/tests/fake_file_list.list | 1 + .../tests/simple_sparse_neural_network.py | 23 +++++++++++++++++++ .../tests/simple_sparse_neural_network_dp.py | 21 +++++++++++++++++ paddle/trainer/tests/test_TrainerOnePass.cpp | 9 +++++++- 5 files changed, 56 insertions(+), 1 deletion(-) create mode 100644 paddle/trainer/tests/fake_file_list.list create mode 100644 paddle/trainer/tests/simple_sparse_neural_network.py create mode 100644 paddle/trainer/tests/simple_sparse_neural_network_dp.py diff --git a/paddle/trainer/ThreadParameterUpdater.cpp b/paddle/trainer/ThreadParameterUpdater.cpp index 9caa92a4d75..049022b1f10 100644 --- a/paddle/trainer/ThreadParameterUpdater.cpp +++ b/paddle/trainer/ThreadParameterUpdater.cpp @@ -55,6 +55,9 @@ void SgdThreadUpdater::init(std::vector& parameters) { // not create parameter buf for PARAMETER_GRADIENT for sparse update in // Parameter::enableType(). But gradient parameter buf is still used // in SgdThreadUpdater. We need to explicitly create it. + // + // The AverageOptimizer::restore/apply method will use PARAMETER_GRADIENT + // as a temp buffer. para->enableBufType(PARAMETER_GRADIENT); } } diff --git a/paddle/trainer/tests/fake_file_list.list b/paddle/trainer/tests/fake_file_list.list new file mode 100644 index 00000000000..f27ceed277f --- /dev/null +++ b/paddle/trainer/tests/fake_file_list.list @@ -0,0 +1 @@ +do_not_matter.txt diff --git a/paddle/trainer/tests/simple_sparse_neural_network.py b/paddle/trainer/tests/simple_sparse_neural_network.py new file mode 100644 index 00000000000..9604e1b9b45 --- /dev/null +++ b/paddle/trainer/tests/simple_sparse_neural_network.py @@ -0,0 +1,23 @@ +from paddle.trainer_config_helpers import * + +settings(batch_size=128, learning_method=AdaGradOptimizer(), learning_rate=1e-4) + +file_list = 'trainer/tests/fake_file_list.list' + +define_py_data_sources2( + train_list=file_list, + test_list=file_list, + module="simple_sparse_neural_network_dp", + obj="process") + +embedding = embedding_layer( + input=data_layer( + name="word_ids", size=65536), + size=128, + param_attr=ParamAttr(sparse_update=True)) +prediction = fc_layer(input=embedding, size=10, act=SoftmaxActivation()) + +outputs( + classification_cost( + input=prediction, label=data_layer( + name='label', size=10))) diff --git a/paddle/trainer/tests/simple_sparse_neural_network_dp.py b/paddle/trainer/tests/simple_sparse_neural_network_dp.py new file mode 100644 index 00000000000..8bfd1f37e71 --- /dev/null +++ b/paddle/trainer/tests/simple_sparse_neural_network_dp.py @@ -0,0 +1,21 @@ +from paddle.trainer.PyDataProvider2 import provider, integer_sequence, integer_value +import random + + +def init_hook(settings, is_train, **kwargs): + settings.is_train = is_train + + +@provider( + input_types={'word_ids': integer_value(65536), + 'label': integer_value(10)}, + min_pool_size=0, + init_hook=init_hook) +def process(settings, filename): + if settings.is_train: + data_size = 2**20 + else: + data_size = 2**10 + + for _ in xrange(data_size): + yield random.randint(0, 65535), random.randint(0, 9) diff --git a/paddle/trainer/tests/test_TrainerOnePass.cpp b/paddle/trainer/tests/test_TrainerOnePass.cpp index ee21008aec5..4d0174f784a 100644 --- a/paddle/trainer/tests/test_TrainerOnePass.cpp +++ b/paddle/trainer/tests/test_TrainerOnePass.cpp @@ -27,6 +27,9 @@ static const string& configFile1 = "trainer/tests/sample_trainer_config.conf"; static const string& configFile2 = "trainer/tests/sample_trainer_config_parallel.conf"; +static const string& configFileSimpleSparse = + "trainer/tests/simple_sparse_neural_network.py"; + DECLARE_bool(use_gpu); DECLARE_string(config); DECLARE_int32(gpu_id); @@ -298,11 +301,15 @@ TEST(checkRemoteUpdater, cpuDeltaTrainerOldUpdater) { checkRemoteParameterUpdaterTest(configFile1, false, false, 1, true, 10); } +TEST(SgdThreadUpdater, simpleSparseNN) { + trainerOnePassTest(configFileSimpleSparse, false, false, 1, 0.5, true); +} + int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); initMain(argc, argv); initPython(argc, argv); gNumDevices = hl_get_device_count(); - testing::InitGoogleTest(&argc, argv); FLAGS_num_passes = 1; // train one pass FLAGS_saving_period = 100000; // do not save parameteres -- GitLab