diff --git a/paddle/trainer/ThreadParameterUpdater.cpp b/paddle/trainer/ThreadParameterUpdater.cpp index 9caa92a4d7557c0c8633d881820862bbbd5df87e..049022b1f106a0bedec1e4ebc687adb316662500 100644 --- a/paddle/trainer/ThreadParameterUpdater.cpp +++ b/paddle/trainer/ThreadParameterUpdater.cpp @@ -55,6 +55,9 @@ void SgdThreadUpdater::init(std::vector& parameters) { // not create parameter buf for PARAMETER_GRADIENT for sparse update in // Parameter::enableType(). But gradient parameter buf is still used // in SgdThreadUpdater. We need to explicitly create it. + // + // The AverageOptimizer::restore/apply method will use PARAMETER_GRADIENT + // as a temp buffer. para->enableBufType(PARAMETER_GRADIENT); } } diff --git a/paddle/trainer/tests/CMakeLists.txt b/paddle/trainer/tests/CMakeLists.txt index 60c129f4e2386e90e84e516170fb8e388a89c511..28c3d6f2631f9e28e3f1ff086b1e8edf994e73a4 100644 --- a/paddle/trainer/tests/CMakeLists.txt +++ b/paddle/trainer/tests/CMakeLists.txt @@ -27,7 +27,8 @@ add_test(NAME test_Trainer add_unittest_without_exec(test_TrainerOnePass test_TrainerOnePass.cpp) add_test(NAME test_TrainerOnePass - COMMAND ${PROJ_ROOT}/paddle/.set_python_path.sh -d ${PROJ_ROOT}/python/ + COMMAND ${PROJ_ROOT}/paddle/.set_python_path.sh -d + ${PROJ_ROOT}/python/:${PROJ_ROOT}/paddle/trainer/tests ${PROJ_ROOT}/paddle/.set_port.sh -p port ${CMAKE_CURRENT_BINARY_DIR}/test_TrainerOnePass WORKING_DIRECTORY ${PROJ_ROOT}/paddle/) diff --git a/paddle/trainer/tests/fake_file_list.list b/paddle/trainer/tests/fake_file_list.list new file mode 100644 index 0000000000000000000000000000000000000000..f27ceed277f97ab9c8ea1c9b9d8475b13ccf3ddd --- /dev/null +++ b/paddle/trainer/tests/fake_file_list.list @@ -0,0 +1 @@ +do_not_matter.txt diff --git a/paddle/trainer/tests/simple_sparse_neural_network.py b/paddle/trainer/tests/simple_sparse_neural_network.py new file mode 100644 index 0000000000000000000000000000000000000000..9604e1b9b45e571130c2f1bdc6d6a5fbd9c177c4 --- /dev/null +++ b/paddle/trainer/tests/simple_sparse_neural_network.py @@ -0,0 +1,23 @@ +from paddle.trainer_config_helpers import * + +settings(batch_size=128, learning_method=AdaGradOptimizer(), learning_rate=1e-4) + +file_list = 'trainer/tests/fake_file_list.list' + +define_py_data_sources2( + train_list=file_list, + test_list=file_list, + module="simple_sparse_neural_network_dp", + obj="process") + +embedding = embedding_layer( + input=data_layer( + name="word_ids", size=65536), + size=128, + param_attr=ParamAttr(sparse_update=True)) +prediction = fc_layer(input=embedding, size=10, act=SoftmaxActivation()) + +outputs( + classification_cost( + input=prediction, label=data_layer( + name='label', size=10))) diff --git a/paddle/trainer/tests/simple_sparse_neural_network_dp.py b/paddle/trainer/tests/simple_sparse_neural_network_dp.py new file mode 100644 index 0000000000000000000000000000000000000000..8bfd1f37e7114f2dcd0798ff1e8180b111ad988f --- /dev/null +++ b/paddle/trainer/tests/simple_sparse_neural_network_dp.py @@ -0,0 +1,21 @@ +from paddle.trainer.PyDataProvider2 import provider, integer_sequence, integer_value +import random + + +def init_hook(settings, is_train, **kwargs): + settings.is_train = is_train + + +@provider( + input_types={'word_ids': integer_value(65536), + 'label': integer_value(10)}, + min_pool_size=0, + init_hook=init_hook) +def process(settings, filename): + if settings.is_train: + data_size = 2**20 + else: + data_size = 2**10 + + for _ in xrange(data_size): + yield random.randint(0, 65535), random.randint(0, 9) diff --git a/paddle/trainer/tests/test_TrainerOnePass.cpp b/paddle/trainer/tests/test_TrainerOnePass.cpp index ee21008aec56da289dab88f72f57a1703e392fad..4d0174f784a0dc7314977d586c3ad1f0f9c69f6d 100644 --- a/paddle/trainer/tests/test_TrainerOnePass.cpp +++ b/paddle/trainer/tests/test_TrainerOnePass.cpp @@ -27,6 +27,9 @@ static const string& configFile1 = "trainer/tests/sample_trainer_config.conf"; static const string& configFile2 = "trainer/tests/sample_trainer_config_parallel.conf"; +static const string& configFileSimpleSparse = + "trainer/tests/simple_sparse_neural_network.py"; + DECLARE_bool(use_gpu); DECLARE_string(config); DECLARE_int32(gpu_id); @@ -298,11 +301,15 @@ TEST(checkRemoteUpdater, cpuDeltaTrainerOldUpdater) { checkRemoteParameterUpdaterTest(configFile1, false, false, 1, true, 10); } +TEST(SgdThreadUpdater, simpleSparseNN) { + trainerOnePassTest(configFileSimpleSparse, false, false, 1, 0.5, true); +} + int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); initMain(argc, argv); initPython(argc, argv); gNumDevices = hl_get_device_count(); - testing::InitGoogleTest(&argc, argv); FLAGS_num_passes = 1; // train one pass FLAGS_saving_period = 100000; // do not save parameteres