From 5a685841317625786d4c37eb79abfd22cec995d6 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 22 Dec 2016 07:57:04 +0000 Subject: [PATCH] Test on GPU --- demo/mnist/api_train.py | 17 +++++++---------- paddle/api/Vector.cpp | 2 +- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/demo/mnist/api_train.py b/demo/mnist/api_train.py index 129922c30b..48ba61c47d 100644 --- a/demo/mnist/api_train.py +++ b/demo/mnist/api_train.py @@ -53,7 +53,7 @@ def input_order_converter(generator): def main(): - api.initPaddle("-use_gpu=true", "-trainer_count=4") # use 4 cpu cores + api.initPaddle("-use_gpu=false", "-trainer_count=4") # use 4 cpu cores config = paddle.trainer.config_parser.parse_config( 'simple_mnist_network.py', '') @@ -106,7 +106,7 @@ def main(): # TrainData will stored in a data pool. Currently implementation is not care # about memory, speed. Just a very naive implementation. train_data_generator = input_order_converter(read_from_mnist(train_file)) - train_data = BatchPool(train_data_generator, 128) + train_data = BatchPool(train_data_generator, 512) # outArgs is Neural Network forward result. Here is not useful, just passed # to gradient_machine.forward @@ -126,16 +126,13 @@ def main(): # batch_evaluator can be used between start/finish. batch_evaluator.start() - # A callback when backward. - # It is used for updating weight values vy calculated Gradient. - def updater_callback(param): - updater.update(param) - # forwardBackward is a shortcut for forward and backward. # It is sometimes faster than invoke forward/backward separately, # because in GradientMachine, it may be async. - m.forwardBackward( - converter(data_batch), outArgs, pass_type, updater_callback) + m.forwardBackward(converter(data_batch), outArgs, pass_type) + + for each_param in m.getParameters(): + updater.update(each_param) # Get cost. We use numpy to calculate total cost for this batch. cost_vec = outArgs.getSlotValue(0) @@ -159,7 +156,7 @@ def main(): updater.apply() test_evaluator.start() test_data_generator = input_order_converter(read_from_mnist(test_file)) - for data_batch in generator_to_batch(test_data_generator, 128): + for data_batch in generator_to_batch(test_data_generator, 512): # in testing stage, only forward is needed. m.forward(converter(data_batch), outArgs, api.PASS_TEST) m.eval(test_evaluator) diff --git a/paddle/api/Vector.cpp b/paddle/api/Vector.cpp index 874f2fd044..db8f005929 100644 --- a/paddle/api/Vector.cpp +++ b/paddle/api/Vector.cpp @@ -253,7 +253,7 @@ void Vector::copyToNumpyArray(float** view_m_data, int* dim1) { *view_m_data = new float[*dim1]; if (auto cpuVec = dynamic_cast(m->vec.get())) { std::memcpy(*view_m_data, cpuVec->getData(), sizeof(float) * (*dim1)); - } else if (auto gpuVec = dynamic_cast(m->vec.get())) { + } else if (auto gpuVec = dynamic_cast(m->vec.get())) { hl_memcpy_device2host( *view_m_data, gpuVec->getData(), sizeof(float) * (*dim1)); } else { -- GitLab