From de469d58380dd4376d905165678ad05eee9e3e17 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Sun, 11 Feb 2018 10:17:23 +0800 Subject: [PATCH] optimize test --- .../tests/test_python_operator_overriding.py | 53 +++++++++++-------- 1 file changed, 32 insertions(+), 21 deletions(-) diff --git a/python/paddle/v2/fluid/tests/test_python_operator_overriding.py b/python/paddle/v2/fluid/tests/test_python_operator_overriding.py index 94f3fc958e..b9e2623bdd 100644 --- a/python/paddle/v2/fluid/tests/test_python_operator_overriding.py +++ b/python/paddle/v2/fluid/tests/test_python_operator_overriding.py @@ -22,44 +22,55 @@ import paddle.v2.fluid as fluid class TestPythonOperatorOverride(unittest.TestCase): - def check_result(self, fn, x_val, y_val, place, dtype): + def check_result(self, fn, place, dtype): shape = [9, 10] - x_data = np.full(shape, x_val).astype(dtype) - y_data = np.full(shape, y_val).astype(dtype) + x_data = np.random.random(size=shape).astype(dtype) + y_data = np.random.random(size=shape).astype(dtype) python_out = fn(x_data, y_data) x_var = layers.create_global_var( - shape=shape, value=x_val, dtype=dtype, persistable=True) + name='x', shape=shape, value=0.0, dtype=dtype, persistable=True) y_var = layers.create_global_var( - shape=shape, value=y_val, dtype=dtype, persistable=True) + name='y', shape=shape, value=0.0, dtype=dtype, persistable=True) out = fn(x_var, y_var) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) fluid_out = exe.run(fluid.default_main_program(), - feed=[], + feed={'x': x_data, + 'y': y_data}, fetch_list=[out]) np.testing.assert_array_equal(python_out, fluid_out[0]) def test_override(self): - cpu_place = fluid.CPUPlace() - test_data = [(lambda _a, _b: _a == _b, 0.1, 1.1, cpu_place, 'float32'), - (lambda _a, _b: _a == _b, 1.2, 1.1, cpu_place, 'float32'), - (lambda _a, _b: _a < _b, 0.1, 1.1, cpu_place, 'float32'), - (lambda _a, _b: _a < _b, 2.1, 1.1, cpu_place, 'float32'), - (lambda _a, _b: _a <= _b, 0.1, 1.1, cpu_place, 'float32'), - (lambda _a, _b: _a <= _b, 1.1, 1.1, cpu_place, 'float32'), - (lambda _a, _b: _a >= _b, 1.1, 1.1, cpu_place, 'float32')] - - main_program = framework.Program() - startup_program = framework.Program() - - with framework.program_guard(main_program, startup_program): - for fn, x_val, y_val, place, dtype in test_data: - self.check_result(fn, x_val, y_val, place, dtype) + # compare func to check + compare_fns = [ + lambda _a, _b: _a == _b, + lambda _a, _b: _a == _b, + lambda _a, _b: _a < _b, + lambda _a, _b: _a < _b, + lambda _a, _b: _a <= _b, + lambda _a, _b: _a <= _b, + lambda _a, _b: _a >= _b, + ] + + # places to check + places = [fluid.CPUPlace()] + if fluid.core.is_compiled_with_cuda(): + places.append(fluid.CUDAPlace(0)) + + # dtypes to check + dtypes = ['int32', 'float32'] + + for place in places: + for dtype in dtypes: + for compare_fn in compare_fns: + with framework.program_guard(framework.Program(), + gframework.Program()): + self.check_result(compare_fn, place, dtype) if __name__ == '__main__': -- GitLab