From 63d4d05a39c5623c9b317ffffe94e8e0e2c16bc4 Mon Sep 17 00:00:00 2001 From: Allen Guo Date: Fri, 6 May 2022 10:28:48 +0800 Subject: [PATCH] [IPU] update UTs 0 (#42516) * update UTs 0 * fix ci * fix ci 3 --- .../fluid/tests/unittests/ipu/op_test_ipu.py | 221 ++++++++++++------ .../unittests/ipu/test_activation_x_op_ipu.py | 71 ++---- .../unittests/ipu/test_arg_max_op_ipu.py | 85 ++----- .../tests/unittests/ipu/test_assign_op_ipu.py | 180 +++----------- .../tests/unittests/ipu/test_avg_shard_ipu.py | 89 ++----- .../unittests/ipu/test_batch_norm_op_ipu.py | 76 ++---- .../ipu/test_batchs_per_step_simple_ipu.py | 87 ------- .../tests/unittests/ipu/test_cast_op_ipu.py | 207 ++++------------ .../tests/unittests/ipu/test_concat_op_ipu.py | 83 ++----- .../tests/unittests/ipu/test_conv_op_ipu.py | 78 ++----- .../ipu/test_cross_entropy2_op_ipu.py | 103 ++------ .../tests/unittests/ipu/test_cumsum_op_ipu.py | 68 ++---- .../unittests/ipu/test_dropout_op_ipu.py | 74 ++---- .../unittests/ipu/test_elemetwise_x_op_ipu.py | 74 ++---- .../tests/unittests/ipu/test_equal_op_ipu.py | 77 ++---- .../tests/unittests/ipu/test_expand_op_ipu.py | 127 ++-------- .../ipu/test_fill_any_like_op_ipu.py | 71 ++---- .../ipu/test_fill_constant_op_ipu.py | 68 ++---- .../unittests/ipu/test_flatten_op_ipu.py | 75 ++---- 19 files changed, 514 insertions(+), 1400 deletions(-) delete mode 100644 python/paddle/fluid/tests/unittests/ipu/test_batchs_per_step_simple_ipu.py diff --git a/python/paddle/fluid/tests/unittests/ipu/op_test_ipu.py b/python/paddle/fluid/tests/unittests/ipu/op_test_ipu.py index 26fd42be6cd..2583d9409a0 100644 --- a/python/paddle/fluid/tests/unittests/ipu/op_test_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/op_test_ipu.py @@ -15,9 +15,10 @@ import os import random import unittest -import numpy as np from enum import IntEnum +from typing import Dict, List, Optional +import numpy as np import paddle import paddle.static @@ -33,31 +34,27 @@ map_np_dtype_to_fluid_dtype = { } +def np_dtype_to_fluid_str(dtype: np.dtype) -> str: + return map_np_dtype_to_fluid_dtype[dtype.name] + + class ExecutionModeFull(IntEnum): # Run fp32 model on cpu CPU_FP32 = 1 # Run fp32 model on ipu IPU_FP32 = 2 - # Convert model to fp16 using popart transform + # Convert model to fp16 using mixed-precision approch # All parameters will be converted to fp16 - # TODO rename to IPU_FP16 - IPU_POPART_FP16 = 3 - # Mix-precision mode, using `paddle.static.amp.fp16_guard()` to control the - # precision of each operator - IPU_MIXED_PRECISION = 4 + IPU_FP16 = 3 class ExecutionMode(IntEnum): CPU_FP32 = ExecutionModeFull.CPU_FP32 IPU_FP32 = ExecutionModeFull.IPU_FP32 - IPU_POPART_FP16 = ExecutionModeFull.IPU_POPART_FP16 - + IPU_FP16 = ExecutionModeFull.IPU_FP16 -def np_dtype_to_fluid_str(dtype: np.dtype) -> str: - return map_np_dtype_to_fluid_dtype[dtype.name] - -class IPUOpTest(unittest.TestCase): +class IPUTest(unittest.TestCase): @classmethod def setUpClass(cls): # Get random seeds @@ -67,12 +64,7 @@ class IPUOpTest(unittest.TestCase): cls.SEED = 2021 np.random.seed(cls.SEED) random.seed(cls.SEED) - - # For ipu, most ops support fp16 - cls.amp_list = paddle.static.amp.CustomOpLists( - custom_black_list=[], custom_white_list=[]) - cls.amp_list.unsupported_list = {} - cls.amp_list.black_list = {} + paddle.seed(cls.SEED) # Enable paddle static graph mode paddle.enable_static() @@ -83,6 +75,7 @@ class IPUOpTest(unittest.TestCase): np.random.set_state(cls._np_rand_state) random.setstate(cls._py_rand_state) + # Check if ipumodel mode is enabled @classmethod def use_ipumodel(cls): if 'POPLAR_IPUMODEL' not in os.environ: @@ -92,6 +85,69 @@ class IPUOpTest(unittest.TestCase): if flag.upper() in ['1', "TRUE"]: return True + # Decorator for static graph building + def static_graph(builder): + def wrapper(self, *args, **kwargs): + self.scope = paddle.static.Scope() + self.main_prog = paddle.static.Program() + self.startup_prog = paddle.static.Program() + self.main_prog.random_seed = self.SEED + self.startup_prog.random_seed = self.SEED + with paddle.static.scope_guard(self.scope): + with paddle.utils.unique_name.guard( + paddle.utils.unique_name.generate('')): + with paddle.static.program_guard(self.main_prog, + self.startup_prog): + builder(self, *args, **kwargs) + + return wrapper + + # Cast a fp32 model to a full-fp16 model + @classmethod + def cast_model_to_fp16(cls, main_program): + amp_list = paddle.static.amp.CustomOpLists() + amp_list.unsupported_list = {} + to_fp16_var_names = paddle.static.amp.cast_model_to_fp16( + main_program, amp_list, use_fp16_guard=False) + paddle.static.amp.cast_parameters_to_fp16( + paddle.CPUPlace(), + main_program, + to_fp16_var_names=to_fp16_var_names) + + +class IPUOpTest(IPUTest): + @classmethod + def setUpClass(cls): + super().setUpClass() + + # Items that a op_tester needs + cls.main_prog: paddle.static.Program = None + cls.startup_prog: paddle.static.Program = None + cls.scope: paddle.static.Scope = None + cls.feed_list: List[str] = None + cls.fetch_list: List[str] = None + cls.output_dict: Optional[Dict] = {} + + @property + def fp16_enabled(self): + return True + + def skip_mode(self, exec_mode): + if exec_mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: + return True + else: + return False + + def is_ipu_mode(self, exec_mode): + if exec_mode == ExecutionMode.CPU_FP32: + return False + return True + + def is_fp16_mode(self, exec_mode): + if exec_mode != ExecutionMode.IPU_FP16: + return False + return True + def set_atol(self): self.atol = 1e-10 self.rtol = 1e-6 @@ -102,55 +158,90 @@ class IPUOpTest(unittest.TestCase): self.is_training = False self.epoch = 1 - def check(self, outputs, check_shape=False): - cpu_fp32 = outputs[ExecutionMode.CPU_FP32] - ipu_fp32 = outputs[ExecutionMode.IPU_FP32] - max_diff = np.abs(cpu_fp32 - ipu_fp32).max() - fp32_flag = np.allclose( - cpu_fp32, ipu_fp32, rtol=self.rtol, atol=self.atol) - self.assertTrue(fp32_flag, "max diff is %f" % (max_diff)) + def run_op_test(self, exec_mode, ipu_strategy=None): + # NOTE: some op has no inputs + # if len(self.feed_list) == 0 or len(self.fetch_list) == 0: + # raise ValueError('feed_list or fetch_list is empty') + if self.is_ipu_mode(exec_mode): + place = paddle.IPUPlace() + else: + place = paddle.CPUPlace() + exe = paddle.static.Executor(place) + exe.run(self.startup_prog) + if self.is_ipu_mode(exec_mode): + if ipu_strategy is None: + ipu_strategy = paddle.static.IpuStrategy() + ipu_strategy.set_graph_config(is_training=self.is_training) + if self.is_fp16_mode(exec_mode): + ipu_strategy.set_precision_config(enable_fp16=True) + IPUOpTest.cast_model_to_fp16(self.main_prog) + program = paddle.static.IpuCompiledProgram( + self.main_prog, ipu_strategy=ipu_strategy).compile( + self.feed_list, self.fetch_list) + else: + program = self.main_prog + + feed = self.feed_fp32 + if self.is_fp16_mode(exec_mode): + feed = self.feed_fp16 + + if self.is_training: + result = [] + for _ in range(self.epoch): + loss_res = exe.run(program, + feed=feed, + fetch_list=self.fetch_list) + result.append(loss_res) + else: + result = exe.run(program, feed=feed, fetch_list=self.fetch_list) + + if isinstance(result, list) and len(result) == 1: + self.output_dict[exec_mode] = result[0] + else: + self.output_dict[exec_mode] = result + + def check(self, check_shape=False, output_dict=None): + if output_dict is None: + output_dict = self.output_dict + if len(output_dict) == 0: + raise ValueError("output_dict is empty") + cpu_fp32 = output_dict[ExecutionMode.CPU_FP32] + ipu_fp32 = output_dict[ExecutionMode.IPU_FP32] + cpu_fp32 = np.asarray(cpu_fp32).astype(np.float32).flatten() + ipu_fp32 = np.asarray(ipu_fp32).astype(np.float32).flatten() + pass_check = np.allclose( + ipu_fp32, cpu_fp32, rtol=self.rtol, atol=self.atol) + if not pass_check: + max_atol = np.abs(ipu_fp32 - cpu_fp32).max() + cpu_fp32_abs = np.abs(cpu_fp32) + cpu_fp32_abs[cpu_fp32_abs == 0.0] = 1e-20 + max_rtol = (np.abs(ipu_fp32 - cpu_fp32) / cpu_fp32_abs).max() + raise AssertionError( + f"ipu_fp32 check failed. max_atol is {max_atol}, max_rtol is {max_rtol}" + ) if check_shape: self.assertTrue(cpu_fp32.shape == ipu_fp32.shape) - ipu_popart_fp16 = None - if ExecutionMode.IPU_POPART_FP16 in outputs.keys(): - ipu_popart_fp16 = outputs[ExecutionMode.IPU_POPART_FP16] - max_diff = np.abs(ipu_popart_fp16.astype(np.float32) - - cpu_fp32).max() - fp16_flag = np.allclose( - ipu_popart_fp16.astype(np.float32), - cpu_fp32, - rtol=self.rtol_fp16, - atol=self.atol_fp16) - self.assertTrue(fp16_flag, "max diff is %f" % (max_diff)) + if ExecutionMode.IPU_FP16 in output_dict.keys(): + ipu_fp16 = output_dict[ExecutionMode.IPU_FP16] + ipu_fp16 = np.asarray(ipu_fp16).astype(np.float32).flatten() + pass_check = np.allclose( + ipu_fp16, cpu_fp32, rtol=self.rtol_fp16, atol=self.atol_fp16) + if not pass_check: + max_atol = np.abs(ipu_fp16 - cpu_fp32).max() + cpu_fp32_abs = np.abs(cpu_fp32) + cpu_fp32_abs[cpu_fp32_abs == 0.0] = 1e-20 + max_rtol = (np.abs(ipu_fp16 - cpu_fp32) / cpu_fp32_abs).max() + raise AssertionError( + f"ipu_fp16 check failed. max_atol is {max_atol}, max_rtol is {max_rtol}" + ) if check_shape: - self.assertTrue(ipu_popart_fp16.shape == cpu_fp32.shape) - - ipu_mixed_precision = None - if ExecutionModeFull.IPU_MIXED_PRECISION in outputs.keys(): - ipu_mixed_precision = outputs[ - ExecutionModeFull.IPU_MIXED_PRECISION] - max_diff = np.abs( - ipu_mixed_precision.astype(np.float32) - cpu_fp32).max() - fp16_flag = np.allclose( - ipu_mixed_precision.astype(np.float32), - cpu_fp32, - rtol=self.rtol_fp16, - atol=self.atol_fp16) - self.assertTrue(fp16_flag, "max diff is %f" % (max_diff)) - - if check_shape: - self.assertTrue(ipu_mixed_precision.shape == cpu_fp32.shape) - - if ExecutionMode.IPU_POPART_FP16 in outputs.keys( - ) and ExecutionModeFull.IPU_MIXED_PRECISION in outputs.keys(): - max_diff = np.abs(ipu_popart_fp16 - ipu_mixed_precision).max() - self.assertEqual(ipu_popart_fp16.all(), - ipu_mixed_precision.all(), - "max diff is %f" % (max_diff)) - - if check_shape: - self.assertTrue( - ipu_popart_fp16.shape == ipu_mixed_precision.shape) + self.assertTrue(ipu_fp16.shape == cpu_fp32.shape) + + # Execution Mode + class ExecutionMode(IntEnum): + CPU_FP32 = ExecutionModeFull.CPU_FP32 + IPU_FP32 = ExecutionModeFull.IPU_FP32 + IPU_FP16 = ExecutionModeFull.IPU_FP16 diff --git a/python/paddle/fluid/tests/unittests/ipu/test_activation_x_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_activation_x_op_ipu.py index 138365b650f..b90c3374db9 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_activation_x_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_activation_x_op_ipu.py @@ -18,8 +18,7 @@ import numpy as np import paddle import paddle.nn.functional as F import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import (ExecutionMode, - IPUOpTest) +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -32,10 +31,6 @@ class TestRelu(IPUOpTest): self.set_data_feed() self.set_feed_attr() - @property - def fp16_enabled(self): - return True - def set_test_op(self): self.op = paddle.fluid.layers.relu self.op_attrs = {} @@ -49,60 +44,22 @@ class TestRelu(IPUOpTest): self.feed_shape = [x.shape for x in self.feed_fp32.values()] self.feed_list = list(self.feed_fp32.keys()) - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - - out = self.op(x, **self.op_attrs) - - fetch_list = [out.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + out = self.op(x, **self.op_attrs) + self.fetch_list = [out.name] - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - - ipu_strategy.set_graph_config(is_training=self.is_training) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed_fp32 - if exec_mode > ExecutionMode.IPU_FP32: - feed = self.feed_fp16 - - result = exe.run(program, feed=feed, fetch_list=fetch_list) - return result[0] + def run_model(self, exec_mode): + self.run_op_test(exec_mode) def test(self): - output_dict = {} - for mode in ExecutionMode: - if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: - break - output_dict[mode] = self._test_base(mode).flatten() - - self.check(output_dict) + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() class TestTanh(TestRelu): diff --git a/python/paddle/fluid/tests/unittests/ipu/test_arg_max_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_arg_max_op_ipu.py index d14eba98ef5..c48ce75ccd9 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_arg_max_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_arg_max_op_ipu.py @@ -17,8 +17,7 @@ import unittest import numpy as np import paddle import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import (ExecutionMode, - IPUOpTest) +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -31,12 +30,8 @@ class TestBase(IPUOpTest): self.set_feed_attr() self.set_op_attrs() - @property - def fp16_enabled(self): - return True - def set_data_feed(self): - data = np.random.uniform(size=[10, 1000]) + data = np.random.uniform(size=[10, 500]).astype(np.float16) self.feed_fp32 = {"in_0": data.astype(np.float32)} self.feed_fp16 = {"in_0": data.astype(np.float16)} @@ -48,64 +43,24 @@ class TestBase(IPUOpTest): def set_op_attrs(self): self.attrs = {"axis": -1} - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - - out = paddle.fluid.layers.argmax(x, **self.attrs) - - fetch_list = [out.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() - - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed_fp32 - if exec_mode > ExecutionMode.IPU_FP32: - feed = self.feed_fp16 - - result = exe.run(program, feed=feed, fetch_list=fetch_list) - return result[0].astype(np.int32) - - def test_base(self): - output_dict_fp32 = {} - output_dict_fp16 = {} - for mode in ExecutionMode: - if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: - break - - if mode > ExecutionMode.IPU_FP32: - output_dict_fp16[mode] = self._test_base(mode).flatten() - else: - output_dict_fp32[mode] = self._test_base(mode).flatten() - - self.check(output_dict_fp32) + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + out = paddle.fluid.layers.argmax(x, **self.attrs) + self.fetch_list = [out.name] + + def run_model(self, exec_mode): + self.run_op_test(exec_mode) + + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + for k, v in self.output_dict.items(): + self.output_dict[k] = v.astype(np.int32) + self.check() class TestCase1(TestBase): diff --git a/python/paddle/fluid/tests/unittests/ipu/test_assign_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_assign_op_ipu.py index 35f4ca17d5e..1239a97f2f6 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_assign_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_assign_op_ipu.py @@ -17,7 +17,7 @@ import unittest import numpy as np import paddle import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -29,10 +29,6 @@ class TestBase(IPUOpTest): self.set_data_feed() self.set_feed_attr() - @property - def fp16_enabled(self): - return True - def set_data_feed(self): data = np.random.uniform(size=[2, 3, 1]) self.feed_fp32 = {'in_0': data.astype(np.float32)} @@ -42,60 +38,23 @@ class TestBase(IPUOpTest): self.feed_shape = [x.shape for x in self.feed_fp32.values()] self.feed_list = list(self.feed_fp32.keys()) - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - - assign = paddle.assign(x) - out = paddle.fluid.layers.elementwise_add(assign, assign) - - fetch_list = [out.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() - - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed_fp32 - if exec_mode > ExecutionMode.IPU_FP32: - feed = self.feed_fp16 - - result = exe.run(program, feed=feed, fetch_list=fetch_list) - return result[0] + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + x = paddle.assign(x) + out = paddle.fluid.layers.elementwise_add(x, x) + self.fetch_list = [out.name] - def test(self): - output_dict = {} - for mode in ExecutionMode: - if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: - break - output_dict[mode] = self._test_base(mode).flatten() + def run_model(self, exec_mode): + self.run_op_test(exec_mode) - self.check(output_dict) + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() class TestAssignFp32Value(TestBase): @@ -107,51 +66,13 @@ class TestAssignFp32Value(TestBase): data = np.random.uniform(size=[2, 3, 1]) self.assign_fp32 = data.astype(np.float32) - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - - assign = paddle.assign(self.assign_fp32) - out = paddle.fluid.layers.elementwise_add(x, assign) - - fetch_list = [out.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() - - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed_fp32 - if exec_mode > ExecutionMode.IPU_FP32: - feed = self.feed_fp16 - - result = exe.run(program, feed=feed, fetch_list=fetch_list) - return result[0] + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + assign = paddle.assign(self.assign_fp32) + out = paddle.fluid.layers.elementwise_add(x, assign) + self.fetch_list = [out.name] class TestAssignBoolValue(TestBase): @@ -162,52 +83,15 @@ class TestAssignBoolValue(TestBase): data = np.random.choice([True, False], size=(2, 3, 1)) self.assign_bool = data.astype(np.bool) - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - x = paddle.less_than(x, x) - assign = paddle.assign(self.assign_bool) - out = paddle.logical_and(x, assign) - out = paddle.cast(out, 'float32') - - fetch_list = [out.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() - - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed_fp32 - if exec_mode > ExecutionMode.IPU_FP32: - feed = self.feed_fp16 - - result = exe.run(program, feed=feed, fetch_list=fetch_list) - return result[0] + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + x = paddle.less_than(x, x) + assign = paddle.assign(self.assign_bool) + x = paddle.logical_and(x, assign) + out = paddle.cast(x, 'float32') + self.fetch_list = [out.name] if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ipu/test_avg_shard_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_avg_shard_ipu.py index f34e5b0d8b9..cf494034fd8 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_avg_shard_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_avg_shard_ipu.py @@ -17,7 +17,7 @@ import unittest import numpy as np import paddle import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -29,10 +29,6 @@ class TestBase(IPUOpTest): self.set_data_feed() self.set_feed_attr() - @property - def fp16_enabled(self): - return True - def set_atol(self): self.atol = 2e-6 self.rtol = 1e-5 @@ -48,67 +44,32 @@ class TestBase(IPUOpTest): self.feed_shape = [x.shape for x in self.feed_fp32.values()] self.feed_list = list(self.feed_fp32.keys()) - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - - x = paddle.static.nn.conv2d( - x, num_filters=3, filter_size=3, bias_attr=False) - x = paddle.static.nn.conv2d( - x, num_filters=3, filter_size=3, bias_attr=False) - x = paddle.static.nn.conv2d( - x, num_filters=3, filter_size=3, bias_attr=False) - x = paddle.static.nn.conv2d( - x, num_filters=3, filter_size=3, bias_attr=False) - - fetch_list = [x.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() - - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - ipu_strategy.set_options({'need_avg_shard': True}) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed_fp32 - if exec_mode > ExecutionMode.IPU_FP32: - feed = self.feed_fp16 - - result = exe.run(program, feed=feed, fetch_list=fetch_list) - return result[0] + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + x = paddle.static.nn.conv2d( + x, num_filters=3, filter_size=3, bias_attr=False) + x = paddle.static.nn.conv2d( + x, num_filters=3, filter_size=3, bias_attr=False) + x = paddle.static.nn.conv2d( + x, num_filters=3, filter_size=3, bias_attr=False) + x = paddle.static.nn.conv2d( + x, num_filters=3, filter_size=3, bias_attr=False) + self.fetch_list = [x.name] + + def run_model(self, exec_mode): + ipu_strategy = paddle.static.IpuStrategy() + ipu_strategy.set_graph_config(is_training=self.is_training) + ipu_strategy.set_options({'need_avg_shard': True}) + self.run_op_test(exec_mode, ipu_strategy) def test(self): - output_dict = {} - for mode in ExecutionMode: - if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: - break - output_dict[mode] = self._test_base(mode).flatten() - - self.check(output_dict) + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ipu/test_batch_norm_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_batch_norm_op_ipu.py index c640cd441f1..adb2abfc474 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_batch_norm_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_batch_norm_op_ipu.py @@ -17,8 +17,7 @@ import unittest import numpy as np import paddle import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import (ExecutionMode, - IPUOpTest) +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -31,10 +30,6 @@ class TestBase(IPUOpTest): self.set_feed_attr() self.set_op_attrs() - @property - def fp16_enabled(self): - return True - def set_atol(self): self.atol = 1e-6 self.rtol = 1e-5 @@ -56,61 +51,24 @@ class TestBase(IPUOpTest): self.attrs['data_layout'] = 'NCHW' self.attrs['in_place'] = False - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - - conv1 = paddle.static.nn.conv2d( - x, num_filters=3, filter_size=3, bias_attr=False) - out = paddle.fluid.layers.batch_norm(conv1, **self.attrs) - - fetch_list = [out.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() - - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed_fp32 - if exec_mode > ExecutionMode.IPU_FP32: - feed = self.feed_fp16 - - result = exe.run(program, feed=feed, fetch_list=fetch_list) - return result[0] + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + x = paddle.static.nn.conv2d( + x, num_filters=3, filter_size=3, bias_attr=False) + x = paddle.fluid.layers.batch_norm(x, **self.attrs) + self.fetch_list = [x.name] - def test(self): - output_dict = {} - for mode in ExecutionMode: - if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: - break - output_dict[mode] = self._test_base(mode).flatten() + def run_model(self, exec_mode): + self.run_op_test(exec_mode) - self.check(output_dict) + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() class TestCase1(TestBase): diff --git a/python/paddle/fluid/tests/unittests/ipu/test_batchs_per_step_simple_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_batchs_per_step_simple_ipu.py deleted file mode 100644 index ef61e651b2a..00000000000 --- a/python/paddle/fluid/tests/unittests/ipu/test_batchs_per_step_simple_ipu.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import numpy as np -import unittest -import paddle -import paddle.static - -paddle.enable_static() -SEED = 2021 - - -@unittest.skipIf(not paddle.is_compiled_with_ipu(), - "core is not compiled with IPU") -class TestFunc(unittest.TestCase): - def _test_func(self, run_ipu=True): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = SEED - startup_prog.random_seed = SEED - np.random.seed(SEED) - - bps = 5 - n = 1 if run_ipu else -1 - c, h, w = 3, 10, 10 - np_image = np.random.uniform(size=[1 * bps, c, h, w]).astype(np.float32) - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - image = paddle.static.data( - name='image', shape=[n, c, h, w], dtype='float32') - conv2d = paddle.static.nn.conv2d( - image, num_filters=3, filter_size=3, bias_attr=False) - - out = conv2d - - if run_ipu: - place = paddle.IPUPlace() - else: - place = paddle.CPUPlace() - - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if run_ipu: - feed_list = [image.name] - fetch_list = [out.name] - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=False) - ipu_strategy.set_pipelining_config(batches_per_step=bps) - program = paddle.static.IpuCompiledProgram( - main_prog, ipu_strategy=ipu_strategy).compile(feed_list, - fetch_list) - else: - program = main_prog - - result = exe.run(program, - feed={image.name: np_image}, - fetch_list=[out]) - return result[0] - - def test_func(self): - ipu_res = self._test_func(True) - cpu_res = self._test_func(False) - - if np.prod(ipu_res.shape) == np.prod(cpu_res.shape): - ipu_res = ipu_res.reshape(cpu_res.shape) - - self.assertTrue(np.allclose(ipu_res, cpu_res, atol=1e-4)) - - -if __name__ == "__main__": - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_cast_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_cast_op_ipu.py index 2de23d95e1c..4d412f2a799 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_cast_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_cast_op_ipu.py @@ -30,175 +30,81 @@ class TestBase(IPUOpTest): self.set_feed_attr() self.set_op_attrs() - def set_atol(self): - self.atol = 1e-3 + @property + def fp16_enabled(self): + return False def set_data_feed(self): - self.feed = { - "x": np.random.uniform(size=[1, 3, 3, 3]).astype('float32'), - } + data = np.random.uniform(size=[1, 3, 3, 3]) + self.feed_fp32 = {'x': data.astype(np.float32)} def set_feed_attr(self): - self.feed_shape = [x.shape for x in self.feed.values()] - self.feed_list = list(self.feed.keys()) - self.feed_dtype = [x.dtype for x in self.feed.values()] + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) + self.feed_dtype = [x.dtype for x in self.feed_fp32.values()] def set_op_attrs(self): self.attrs = {} self.attrs['dtype'] = 'float16' - def _test_base(self, run_ipu=True): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype=self.feed_dtype[0]) - out = paddle.cast(x, **self.attrs) - fetch_list = [out.name] - - if run_ipu: - place = paddle.IPUPlace() - else: - place = paddle.CPUPlace() - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if run_ipu: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - result = exe.run(program, feed=self.feed, fetch_list=fetch_list) - return result[0] - - def test_base(self): - res0 = self._test_base(True) - res1 = self._test_base(False) - - self.assertTrue( - np.allclose( - res0.flatten(), res1.flatten(), atol=self.atol)) - - self.assertTrue(res0.shape == res1.shape) + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], + shape=self.feed_shape[0], + dtype=self.feed_dtype[0]) + out = paddle.cast(x, **self.attrs) + self.fetch_list = [out.name] + + def run_model(self, exec_mode): + self.run_op_test(exec_mode) + + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() class TestEnableFp16(TestBase): - def set_atol(self): - self.atol = 1e-10 + @property + def fp16_enabled(self): + return True + + def run_model(self, exec_mode): + self.run_op_test(exec_mode) def set_data_feed(self): - self.feed = {"x": np.array([1, 200, 3000, 40000]).astype('int32'), } + data = np.random.uniform(size=[1, 3, 3, 3]) + self.feed_fp32 = {'x': data.astype(np.float32)} + self.feed_fp16 = {'x': data.astype(np.float16)} def set_op_attrs(self): self.attrs = {} self.attrs['dtype'] = 'float32' - def _test_base(self, run_ipu=True): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype=self.feed_dtype[0]) - out = paddle.cast(x, **self.attrs) - fetch_list = [out.name] - - if run_ipu: - place = paddle.IPUPlace() - else: - place = paddle.CPUPlace() - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if run_ipu: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - result = exe.run(program, feed=self.feed, fetch_list=fetch_list) - return result[0] - class TestDisableTransferCast(TestEnableFp16): - def set_atol(self): - self.atol = 1e-10 - def set_data_feed(self): - self.feed = {"x": np.array([1, 200, 3000, 40000]).astype('int32'), } + data = np.random.uniform(size=[1, 3, 3, 3]) + self.feed_fp32 = {'x': data.astype(np.float32)} + self.feed_fp16 = {'x': data.astype(np.float16)} def set_op_attrs(self): self.attrs = {} self.attrs['dtype'] = 'float32' - def _test_base(self, run_ipu=True): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype=self.feed_dtype[0]) - out = paddle.cast(x, **self.attrs) - fetch_list = [out.name] - - if run_ipu: - place = paddle.IPUPlace() - else: - place = paddle.CPUPlace() - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if run_ipu: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - ipu_strategy.set_precision_config(enable_fp16=True) - ipu_strategy.set_options({"transfer_cast_op": False}) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - result = exe.run(program, feed=self.feed, fetch_list=fetch_list) - return result[0] + def run_model(self, exec_mode): + ipu_strategy = paddle.static.IpuStrategy() + ipu_strategy.set_graph_config(is_training=self.is_training) + ipu_strategy.set_options({"transfer_cast_op": False}) + self.run_op_test(exec_mode) class TestCase2(TestBase): - def set_atol(self): - self.atol = 1e-10 - def set_data_feed(self): - self.feed = { + self.feed_fp32 = { "x": np.random.uniform(size=[1, 3, 3, 3]).astype('float16'), } @@ -208,11 +114,8 @@ class TestCase2(TestBase): class TestCase3(TestBase): - def set_atol(self): - self.atol = 1e-10 - def set_data_feed(self): - self.feed = { + self.feed_fp32 = { "x": np.random.uniform(size=[1, 3, 3, 3]).astype('float32'), } @@ -222,11 +125,8 @@ class TestCase3(TestBase): class TestCase4(TestBase): - def set_atol(self): - self.atol = 1e-10 - def set_data_feed(self): - self.feed = { + self.feed_fp32 = { "x": np.random.uniform(size=[1, 3, 3, 3]).astype('int32'), } @@ -236,11 +136,8 @@ class TestCase4(TestBase): class TestCase5(TestBase): - def set_atol(self): - self.atol = 1e-10 - def set_data_feed(self): - self.feed = { + self.feed_fp32 = { "x": np.random.uniform(size=[1, 3, 3, 3]).astype('float16'), } @@ -250,11 +147,8 @@ class TestCase5(TestBase): class TestCase6(TestBase): - def set_atol(self): - self.atol = 1e-10 - def set_data_feed(self): - self.feed = { + self.feed_fp32 = { "x": np.random.uniform(size=[1, 3, 3, 3]).astype('int32'), } @@ -273,7 +167,7 @@ class TestCase2(TestBase): @unittest.skip('skip float16 to float32') class TestCase3(TestBase): def set_data_feed(self): - self.feed = { + self.feed_fp32 = { "x": np.random.uniform(size=[1, 3, 3, 3]).astype('float16'), } @@ -285,10 +179,11 @@ class TestCase3(TestBase): @unittest.skip('int32 to int8 is not supported') class TestCase4(TestBase): def set_atol(self): + super().set_atol() self.atol = 1 def set_data_feed(self): - self.feed = { + self.feed_fp32 = { "x": np.random.randint( low=1, high=100, size=[1, 3, 3, 3]).astype('int32'), } diff --git a/python/paddle/fluid/tests/unittests/ipu/test_concat_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_concat_op_ipu.py index c5a80902839..a5410ab4990 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_concat_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_concat_op_ipu.py @@ -17,8 +17,7 @@ import unittest import numpy as np import paddle import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import (ExecutionMode, - IPUOpTest) +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -31,14 +30,9 @@ class TestBase(IPUOpTest): self.set_feed_attr() self.set_op_attrs() - @property - def fp16_enabled(self): - return True - def set_data_feed(self): data1 = np.random.uniform(size=[1, 3, 10, 10]) data2 = np.random.uniform(size=[1, 3, 10, 10]) - self.feed_fp32 = { 'x': data1.astype(np.float32), 'y': data2.astype(np.float32) @@ -55,63 +49,24 @@ class TestBase(IPUOpTest): def set_op_attrs(self): self.attrs = {"axis": 0} - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - y = paddle.static.data( - name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='float32') - - out = paddle.fluid.layers.concat([x, y], **self.attrs) - - fetch_list = [out.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() - - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed_fp32 - if exec_mode > ExecutionMode.IPU_FP32: - feed = self.feed_fp16 - - result = exe.run(program, feed=feed, fetch_list=fetch_list) - return result[0] - - def test_base(self): - output_dict = {} - for mode in ExecutionMode: - if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: - break - output_dict[mode] = self._test_base(mode).flatten() - - self.check(output_dict) + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + y = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32') + out = paddle.fluid.layers.concat([x, y], **self.attrs) + self.fetch_list = [out.name] + + def run_model(self, exec_mode): + self.run_op_test(exec_mode) + + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() class TestCase1(TestBase): diff --git a/python/paddle/fluid/tests/unittests/ipu/test_conv_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_conv_op_ipu.py index ade54fda869..e450621b11d 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_conv_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_conv_op_ipu.py @@ -17,7 +17,7 @@ import unittest import numpy as np import paddle import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -26,26 +26,19 @@ class TestBase(IPUOpTest): def setUp(self): self.set_atol() self.set_training() - self.set_data_feed() - self.set_feed_attr() + self.set_feed() self.set_op_attrs() - @property - def fp16_enabled(self): - return True - def set_atol(self): self.atol = 1e-6 self.rtol = 1e-6 self.atol_fp16 = 1e-3 self.rtol_fp16 = 1e-3 - def set_data_feed(self): + def set_feed(self): data = np.random.uniform(size=[1, 3, 10, 10]) self.feed_fp32 = {'in_0': data.astype(np.float32)} self.feed_fp16 = {'in_0': data.astype(np.float16)} - - def set_feed_attr(self): self.feed_shape = [x.shape for x in self.feed_fp32.values()] self.feed_list = list(self.feed_fp32.keys()) @@ -59,59 +52,22 @@ class TestBase(IPUOpTest): self.attrs['groups'] = 1 self.attrs['data_format'] = 'NCHW' - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - image = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - - out = paddle.fluid.layers.conv2d(image, **self.attrs) - - fetch_list = [out.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() - - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed_fp32 - if exec_mode > ExecutionMode.IPU_FP32: - feed = self.feed_fp16 - - result = exe.run(program, feed=feed, fetch_list=fetch_list) - return result[0] + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + x = paddle.fluid.layers.conv2d(x, **self.attrs) + self.fetch_list = [x.name] - def test(self): - output_dict = {} - for mode in ExecutionMode: - if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: - break - output_dict[mode] = self._test_base(mode).flatten() + def run_model(self, exec_mode): + self.run_op_test(exec_mode) - self.check(output_dict) + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() class TestCase1(TestBase): diff --git a/python/paddle/fluid/tests/unittests/ipu/test_cross_entropy2_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_cross_entropy2_op_ipu.py index 3a21f0cb007..d035673e219 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_cross_entropy2_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_cross_entropy2_op_ipu.py @@ -17,7 +17,7 @@ import unittest import numpy as np import paddle import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -30,10 +30,6 @@ class TestBase(IPUOpTest): self.set_feed_attr() self.set_op_attrs() - @property - def fp16_enabled(self): - return True - def set_data_feed(self): x = np.random.uniform(size=[3, 7]) label = np.arange(3).reshape([3, 1]) @@ -53,81 +49,31 @@ class TestBase(IPUOpTest): def set_op_attrs(self): self.attrs = {'soft_label': False, } - def np_nll_loss(self): - tmp = -np.log(self.feed_fp32['x']) - label = self.feed_fp32['label'] - indice = [range(label.shape[0]), label.flatten()] - self.np_ref = tmp[indice] - - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype="float32") - - if exec_mode != ExecutionMode.CPU_FP32: - label = paddle.static.data( - name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='int32') - else: - label = paddle.static.data( - name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='int64') - - out = paddle.fluid.layers.cross_entropy( - input=x, label=label, **self.attrs) - - fetch_list = [out.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() - - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed_fp32 - if exec_mode > ExecutionMode.IPU_FP32: - feed = self.feed_fp16 - - if exec_mode != ExecutionMode.CPU_FP32: - feed['label'] = feed['label'].astype(np.int32) - - result = exe.run(program, feed=feed, fetch_list=fetch_list) - return result[0] + @IPUOpTest.static_graph + def build_model(self, on_ipu): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32") + if on_ipu: + label = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='int32') + else: + label = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='int64') + out = paddle.fluid.layers.cross_entropy( + input=x, label=label, **self.attrs) + self.fetch_list = [out.name] + + def run_model(self, exec_mode): + if self.is_ipu_mode(exec_mode): + self.feed_fp32['label'] = self.feed_fp32['label'].astype(np.int32) + self.run_op_test(exec_mode) def test(self): - output_dict = {} - for mode in ExecutionMode: - if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: - break - output_dict[mode] = self._test_base(mode).flatten() - self.np_nll_loss() - - self.check(output_dict) + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model(self.is_ipu_mode(m)) + self.run_model(m) + self.check() class TestCase1(TestBase): @@ -142,7 +88,6 @@ class TestCase2(TestBase): def set_data_feed(self): x = np.random.uniform(size=[30, 70]) label = np.arange(30).reshape([30, 1]) - self.feed_fp32 = { "x": x.astype(np.float32), "label": label.astype(np.int64) diff --git a/python/paddle/fluid/tests/unittests/ipu/test_cumsum_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_cumsum_op_ipu.py index 2f1d86daf00..a0a145fb72b 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_cumsum_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_cumsum_op_ipu.py @@ -17,7 +17,7 @@ import unittest import numpy as np import paddle import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -48,60 +48,22 @@ class TestBase(IPUOpTest): def set_op_attrs(self): self.attrs = {} - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype="float32") - - out = paddle.fluid.layers.cumsum(x, **self.attrs) - - fetch_list = [out.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() - - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed_fp32 - if exec_mode > ExecutionMode.IPU_FP32: - feed = self.feed_fp16 - - result = exe.run(program, feed=feed, fetch_list=fetch_list) - return result[0] + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32") + out = paddle.fluid.layers.cumsum(x, **self.attrs) + self.fetch_list = [out.name] - def test(self): - output_dict = {} - for mode in ExecutionMode: - if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: - break - - output_dict[mode] = self._test_base(mode).flatten() + def run_model(self, exec_mode): + self.run_op_test(exec_mode) - self.check(output_dict) + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() class TestCase1(TestBase): diff --git a/python/paddle/fluid/tests/unittests/ipu/test_dropout_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_dropout_op_ipu.py index e34da7f7016..4e3b03ffca0 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_dropout_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_dropout_op_ipu.py @@ -17,8 +17,7 @@ import unittest import numpy as np import paddle import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import (ExecutionMode, - IPUOpTest) +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -31,10 +30,6 @@ class TestBase(IPUOpTest): self.set_feed_attr() self.set_op_attrs() - @property - def fp16_enabled(self): - return True - def set_data_feed(self): data = np.random.uniform(size=[1, 3, 10, 10]) self.feed_fp32 = {'x': data.astype(np.float32)} @@ -51,60 +46,23 @@ class TestBase(IPUOpTest): "dropout_implementation": "downgrade_in_infer" } - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - - dropout = paddle.fluid.layers.dropout(x, **self.attrs) - out = paddle.fluid.layers.elementwise_add(dropout, dropout) - - fetch_list = [out.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() - - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed_fp32 - if exec_mode > ExecutionMode.IPU_FP32: - feed = self.feed_fp16 - - result = exe.run(program, feed=feed, fetch_list=fetch_list) - return result[0] + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + x = paddle.fluid.layers.dropout(x, **self.attrs) + out = paddle.fluid.layers.elementwise_add(x, x) + self.fetch_list = [out.name] - def test(self): - output_dict = {} - for mode in ExecutionMode: - if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: - break - output_dict[mode] = self._test_base(mode).flatten() + def run_model(self, exec_mode): + self.run_op_test(exec_mode) - self.check(output_dict) + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() class TestCase1(TestBase): diff --git a/python/paddle/fluid/tests/unittests/ipu/test_elemetwise_x_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_elemetwise_x_op_ipu.py index a9d6d230832..24082fe49ba 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_elemetwise_x_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_elemetwise_x_op_ipu.py @@ -17,8 +17,7 @@ import unittest import numpy as np import paddle import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import (ExecutionMode, - IPUOpTest) +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -43,63 +42,24 @@ class TestMul(IPUOpTest): self.feed_shape = [x.shape for x in self.feed_fp32.values()] self.feed_list = list(self.feed_fp32.keys()) - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - y = paddle.static.data( - name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='float32') - - out = self.op(x, y, **self.attrs) - - fetch_list = [out.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() - - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed_fp32 - if exec_mode > ExecutionMode.IPU_FP32: - feed = self.feed_fp16 - - result = exe.run(program, feed=feed, fetch_list=fetch_list) - return result[0] + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + y = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32') + out = self.op(x, y, **self.attrs) + self.fetch_list = [out.name] - def run_test_base(self): - output_dict = {} - for mode in ExecutionMode: - if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: - break - output_dict[mode] = self._test_base(mode).flatten() + def run_model(self, exec_mode): + self.run_op_test(exec_mode) - self.check(output_dict) + def run_test_base(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() def test_case0(self): data_x = np.random.uniform(size=(2, 3, 4, 5)) diff --git a/python/paddle/fluid/tests/unittests/ipu/test_equal_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_equal_op_ipu.py index 5b18c738513..56b9a73f080 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_equal_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_equal_op_ipu.py @@ -17,7 +17,7 @@ import unittest import numpy as np import paddle import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -30,10 +30,6 @@ class TestBase(IPUOpTest): self.set_feed_attr() self.set_op_attrs() - @property - def fp16_enabled(self): - return True - def set_data_feed(self): x = np.ones([1, 10]) y = np.zeros([1, 10]) @@ -53,63 +49,24 @@ class TestBase(IPUOpTest): def set_op_attrs(self): self.attrs = {} - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - y = paddle.static.data( - name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='float32') - - out = paddle.fluid.layers.equal(x, y, **self.attrs) - - fetch_list = [out.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() - - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed_fp32 - if exec_mode > ExecutionMode.IPU_FP32: - feed = self.feed_fp16 - - result = exe.run(program, feed=feed, fetch_list=fetch_list) - return result[0] + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + y = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32') + out = paddle.fluid.layers.equal(x, y, **self.attrs) + self.fetch_list = [out.name] - def test(self): - output_dict = {} - for mode in ExecutionMode: - if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: - break - output_dict[mode] = self._test_base(mode).flatten().astype(np.int32) + def run_model(self, exec_mode): + self.run_op_test(exec_mode) - self.check(output_dict) + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() class TestCase1(TestBase): diff --git a/python/paddle/fluid/tests/unittests/ipu/test_expand_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_expand_op_ipu.py index 966dfdef87b..211aa4a61a5 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_expand_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_expand_op_ipu.py @@ -17,7 +17,7 @@ import unittest import numpy as np import paddle import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -30,10 +30,6 @@ class TestBase(IPUOpTest): self.set_feed_attr() self.set_op_attrs() - @property - def fp16_enabled(self): - return True - def set_data_feed(self): data = np.random.uniform(size=[2, 3, 1]) self.feed_fp32 = {'in_0': data.astype(np.float32)} @@ -47,59 +43,22 @@ class TestBase(IPUOpTest): def set_op_attrs(self): self.attrs = {"expand_times": [1, 2, 2]} - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype="float32") - - out = paddle.fluid.layers.expand(x, **self.attrs) - - fetch_list = [out.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() - - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed_fp32 - if exec_mode > ExecutionMode.IPU_FP32: - feed = self.feed_fp16 - - result = exe.run(program, feed=feed, fetch_list=fetch_list) - return result[0] + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32") + out = paddle.fluid.layers.expand(x, **self.attrs) + self.fetch_list = [out.name] - def test(self): - output_dict = {} - for mode in ExecutionMode: - if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: - break - output_dict[mode] = self._test_base(mode).flatten() + def run_model(self, exec_mode): + self.run_op_test(exec_mode) - self.check(output_dict) + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() class TestCase1(TestBase): @@ -116,53 +75,15 @@ class TestCase1(TestBase): def set_op_attrs(self): self.attrs = {} - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype="float32") - - expand_times = paddle.fluid.layers.fill_constant( - shape=[len(self.feed_shape[0])], dtype="int32", value=2) - out = paddle.fluid.layers.expand( - x, expand_times=expand_times, **self.attrs) - - fetch_list = [out.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() - - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed_fp32 - if exec_mode > ExecutionMode.IPU_FP32: - feed = self.feed_fp16 - - result = exe.run(program, feed=feed, fetch_list=fetch_list) - return result[0] + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32") + expand_times = paddle.fluid.layers.fill_constant( + shape=[len(self.feed_shape[0])], dtype="int32", value=2) + out = paddle.fluid.layers.expand( + x, expand_times=expand_times, **self.attrs) + self.fetch_list = [out.name] if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ipu/test_fill_any_like_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_fill_any_like_op_ipu.py index 00b855a5a7a..b3faabda3cd 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_fill_any_like_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_fill_any_like_op_ipu.py @@ -17,7 +17,7 @@ import unittest import numpy as np import paddle import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -30,10 +30,6 @@ class TestBase(IPUOpTest): self.set_feed_attr() self.set_op_attrs() - @property - def fp16_enabled(self): - return True - def set_data_feed(self): data = np.random.uniform(size=[2, 3, 1]) self.feed_fp32 = {'in_0': data.astype(np.float32)} @@ -46,60 +42,23 @@ class TestBase(IPUOpTest): def set_op_attrs(self): self.attrs = {'fill_value': 0.3, 'dtype': 'float32'} - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - - x_fill = paddle.full_like(x, **self.attrs) - out = paddle.fluid.layers.elementwise_add(x_fill, x_fill) - - fetch_list = [out.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + x_fill = paddle.full_like(x, **self.attrs) + out = paddle.fluid.layers.elementwise_add(x_fill, x_fill) + self.fetch_list = [out.name] - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed_fp32 - if exec_mode > ExecutionMode.IPU_FP32: - feed = self.feed_fp16 - - result = exe.run(program, feed=feed, fetch_list=fetch_list) - return result[0] + def run_model(self, exec_mode): + self.run_op_test(exec_mode) def test(self): - output_dict = {} - for mode in ExecutionMode: - if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: - break - output_dict[mode] = self._test_base(mode).flatten() - - self.check(output_dict) + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() class TestCase1(TestBase): diff --git a/python/paddle/fluid/tests/unittests/ipu/test_fill_constant_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_fill_constant_op_ipu.py index 3a1c202bf11..ce457b7abeb 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_fill_constant_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_fill_constant_op_ipu.py @@ -17,7 +17,7 @@ import unittest import numpy as np import paddle import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -30,17 +30,14 @@ class TestBase(IPUOpTest): self.set_feed_attr() self.set_op_attrs() - @property - def fp16_enabled(self): - return True - def set_data_feed(self): - self.feed = {} + self.feed_fp32 = {} + self.feed_fp16 = {} def set_feed_attr(self): - self.feed_shape = [x.shape for x in self.feed.values()] - self.feed_list = list(self.feed.keys()) - self.feed_dtype = [x.dtype for x in self.feed.values()] + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) + self.feed_dtype = [x.dtype for x in self.feed_fp32.values()] def set_op_attrs(self): self.attrs = { @@ -50,50 +47,21 @@ class TestBase(IPUOpTest): 'value': 0.3, } - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.fluid.layers.fill_constant(**self.attrs) - out = paddle.fluid.layers.elementwise_add(x, x) - fetch_list = [out.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() + @IPUOpTest.static_graph + def build_model(self): + x = paddle.fluid.layers.fill_constant(**self.attrs) + out = paddle.fluid.layers.elementwise_add(x, x) + self.fetch_list = [out.name] - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - result = exe.run(program, feed=self.feed, fetch_list=fetch_list) - return result[0] + def run_model(self, exec_mode): + self.run_op_test(exec_mode) def test(self): - output_dict = {} - for mode in ExecutionMode: - if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: - break - output_dict[mode] = self._test_base(mode).flatten() - - self.check(output_dict) + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() class TestCase1(TestBase): diff --git a/python/paddle/fluid/tests/unittests/ipu/test_flatten_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_flatten_op_ipu.py index 6f0cafc6680..a8d530f6b77 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_flatten_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_flatten_op_ipu.py @@ -17,7 +17,7 @@ import unittest import numpy as np import paddle import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -30,10 +30,6 @@ class TestBase(IPUOpTest): self.set_feed_attr() self.set_op_attrs() - @property - def fp16_enabled(self): - return True - def set_data_feed(self): data = np.random.uniform(size=[2, 2, 4, 6]) self.feed_fp32 = {"in_0": data.astype(np.float32)} @@ -47,59 +43,22 @@ class TestBase(IPUOpTest): self.attrs = {} self.attrs['axis'] = 1 - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - - out = paddle.fluid.layers.flatten(x=x, **self.attrs) - - fetch_list = [out.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() - - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed_fp32 - if exec_mode > ExecutionMode.IPU_FP32: - feed = self.feed_fp16 - - result = exe.run(program, feed=feed, fetch_list=fetch_list) - return result[0] - - def test_base(self): - output_dict = {} - for mode in ExecutionMode: - if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: - break - output_dict[mode] = self._test_base(mode) - - self.check(output_dict, check_shape=True) + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + out = paddle.fluid.layers.flatten(x=x, **self.attrs) + self.fetch_list = [out.name] + + def run_model(self, exec_mode): + self.run_op_test(exec_mode) + + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() class TestCase1(TestBase): -- GitLab