提交 5a685841 编写于 作者: Y Yu Yang

Test on GPU

上级 65e957ca
...@@ -53,7 +53,7 @@ def input_order_converter(generator): ...@@ -53,7 +53,7 @@ def input_order_converter(generator):
def main(): def main():
api.initPaddle("-use_gpu=true", "-trainer_count=4") # use 4 cpu cores api.initPaddle("-use_gpu=false", "-trainer_count=4") # use 4 cpu cores
config = paddle.trainer.config_parser.parse_config( config = paddle.trainer.config_parser.parse_config(
'simple_mnist_network.py', '') 'simple_mnist_network.py', '')
...@@ -106,7 +106,7 @@ def main(): ...@@ -106,7 +106,7 @@ def main():
# TrainData will stored in a data pool. Currently implementation is not care # TrainData will stored in a data pool. Currently implementation is not care
# about memory, speed. Just a very naive implementation. # about memory, speed. Just a very naive implementation.
train_data_generator = input_order_converter(read_from_mnist(train_file)) train_data_generator = input_order_converter(read_from_mnist(train_file))
train_data = BatchPool(train_data_generator, 128) train_data = BatchPool(train_data_generator, 512)
# outArgs is Neural Network forward result. Here is not useful, just passed # outArgs is Neural Network forward result. Here is not useful, just passed
# to gradient_machine.forward # to gradient_machine.forward
...@@ -126,16 +126,13 @@ def main(): ...@@ -126,16 +126,13 @@ def main():
# batch_evaluator can be used between start/finish. # batch_evaluator can be used between start/finish.
batch_evaluator.start() batch_evaluator.start()
# A callback when backward.
# It is used for updating weight values vy calculated Gradient.
def updater_callback(param):
updater.update(param)
# forwardBackward is a shortcut for forward and backward. # forwardBackward is a shortcut for forward and backward.
# It is sometimes faster than invoke forward/backward separately, # It is sometimes faster than invoke forward/backward separately,
# because in GradientMachine, it may be async. # because in GradientMachine, it may be async.
m.forwardBackward( m.forwardBackward(converter(data_batch), outArgs, pass_type)
converter(data_batch), outArgs, pass_type, updater_callback)
for each_param in m.getParameters():
updater.update(each_param)
# Get cost. We use numpy to calculate total cost for this batch. # Get cost. We use numpy to calculate total cost for this batch.
cost_vec = outArgs.getSlotValue(0) cost_vec = outArgs.getSlotValue(0)
...@@ -159,7 +156,7 @@ def main(): ...@@ -159,7 +156,7 @@ def main():
updater.apply() updater.apply()
test_evaluator.start() test_evaluator.start()
test_data_generator = input_order_converter(read_from_mnist(test_file)) test_data_generator = input_order_converter(read_from_mnist(test_file))
for data_batch in generator_to_batch(test_data_generator, 128): for data_batch in generator_to_batch(test_data_generator, 512):
# in testing stage, only forward is needed. # in testing stage, only forward is needed.
m.forward(converter(data_batch), outArgs, api.PASS_TEST) m.forward(converter(data_batch), outArgs, api.PASS_TEST)
m.eval(test_evaluator) m.eval(test_evaluator)
......
...@@ -253,7 +253,7 @@ void Vector::copyToNumpyArray(float** view_m_data, int* dim1) { ...@@ -253,7 +253,7 @@ void Vector::copyToNumpyArray(float** view_m_data, int* dim1) {
*view_m_data = new float[*dim1]; *view_m_data = new float[*dim1];
if (auto cpuVec = dynamic_cast<paddle::CpuVector*>(m->vec.get())) { if (auto cpuVec = dynamic_cast<paddle::CpuVector*>(m->vec.get())) {
std::memcpy(*view_m_data, cpuVec->getData(), sizeof(float) * (*dim1)); std::memcpy(*view_m_data, cpuVec->getData(), sizeof(float) * (*dim1));
} else if (auto gpuVec = dynamic_cast<paddle::CpuVector*>(m->vec.get())) { } else if (auto gpuVec = dynamic_cast<paddle::GpuVector*>(m->vec.get())) {
hl_memcpy_device2host( hl_memcpy_device2host(
*view_m_data, gpuVec->getData(), sizeof(float) * (*dim1)); *view_m_data, gpuVec->getData(), sizeof(float) * (*dim1));
} else { } else {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册