From b25a9c488d3d6945157adeeac2c504ca3be977da Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Tue, 3 Apr 2018 00:16:39 -0700 Subject: [PATCH] Reduce test size to avoid GPU memory issue. --- .../paddle/fluid/tests/unittests/CMakeLists.txt | 2 ++ .../tests/unittests/test_parallel_executor.py | 16 ++++++++-------- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index 0ad273c7161..1b2d29a47fd 100644 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -29,6 +29,7 @@ function(py_test_modules TARGET_NAME) endfunction() # test time consuming OPs in a separate process for expliot parallism +list(REMOVE_ITEM TEST_OPS test_parallel_executor) list(REMOVE_ITEM TEST_OPS test_warpctc_op) list(REMOVE_ITEM TEST_OPS test_dyn_rnn) list(REMOVE_ITEM TEST_OPS test_mul_op) @@ -64,6 +65,7 @@ else() endif(WITH_FAST_BUNDLE_TEST) # tests with high overhead +py_test_modules(test_parallel_executor MODULES test_parallel_executor) py_test_modules(test_warpctc_op MODULES test_warpctc_op ENVS FLAGS_warpctc_dir=${WARPCTC_LIB_DIR}) py_test_modules(test_train_dyn_rnn MODULES test_dyn_rnn) py_test_modules(test_mul_op MODULES test_mul_op) diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor.py b/python/paddle/fluid/tests/unittests/test_parallel_executor.py index 60130298af7..f132a754a21 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor.py @@ -135,18 +135,18 @@ def bottleneck_block(input, num_filters, stride, cardinality, reduction_ratio): return fluid.layers.elementwise_add(x=short, y=scale, act='relu') -def SE_ResNeXt152(batch_size=4): +def SE_ResNeXt152Small(batch_size=2): img = fluid.layers.fill_constant( shape=[batch_size, 3, 224, 224], dtype='float32', value=0.0) label = fluid.layers.fill_constant( shape=[batch_size, 1], dtype='int64', value=0.0) conv = conv_bn_layer( - input=img, num_filters=64, filter_size=3, stride=2, act='relu') + input=img, num_filters=16, filter_size=3, stride=2, act='relu') conv = conv_bn_layer( - input=conv, num_filters=64, filter_size=3, stride=1, act='relu') + input=conv, num_filters=16, filter_size=3, stride=1, act='relu') conv = conv_bn_layer( - input=conv, num_filters=128, filter_size=3, stride=1, act='relu') + input=conv, num_filters=16, filter_size=3, stride=1, act='relu') conv = fluid.layers.pool2d( input=conv, pool_size=3, pool_stride=2, pool_padding=1, pool_type='max') @@ -226,7 +226,7 @@ class TestMNIST(TestParallelExecutorBase): def setUpClass(cls): # Convert mnist to recordio file with fluid.program_guard(fluid.Program(), fluid.Program()): - reader = paddle.batch(mnist.train(), batch_size=32) + reader = paddle.batch(mnist.train(), batch_size=4) feeder = fluid.DataFeeder( feed_list=[ # order is image and label fluid.layers.data( @@ -268,15 +268,15 @@ class TestResnet(TestParallelExecutorBase): def test_resnet(self): import functools - batch_size = 4 + batch_size = 2 self.check_network_convergence( functools.partial( - SE_ResNeXt152, batch_size=batch_size), + SE_ResNeXt152Small, batch_size=batch_size), iter=20, batch_size=batch_size) self.check_network_convergence( functools.partial( - SE_ResNeXt152, batch_size=batch_size), + SE_ResNeXt152Small, batch_size=batch_size), iter=20, batch_size=batch_size, allow_op_delay=True) -- GitLab