From fe765cb34e5a3970119f73472ab8cdd250924f11 Mon Sep 17 00:00:00 2001 From: Allen Guo Date: Wed, 9 Mar 2022 11:23:12 +0800 Subject: [PATCH] [IPU] update ipu unittests p1 (#39923) * update ipu UTs part1 * rename ut * sync api changes * update uts for new api * update use_ipumodel() * update use_ipumodel() * split pr --- .../unittests/ipu/test_dropout_op_ipu.py | 88 +++++----- .../unittests/ipu/test_elemetwise_x_op_ipu.py | 150 +++++++++------- .../tests/unittests/ipu/test_equal_op_ipu.py | 114 +++++++------ .../tests/unittests/ipu/test_expand_op_ipu.py | 135 ++++++++------- .../ipu/test_fill_any_like_op_ipu.py | 111 ++++++++++++ .../ipu/test_fill_constant_op_ipu.py | 68 ++++---- .../ipu/test_fp16_inference_io_ipu.py | 160 ++++++++++++++++++ .../tests/unittests/ipu/test_gather_op_ipu.py | 97 +++++------ .../tests/unittests/ipu/test_gelu_op_ipu.py | 93 +++++----- .../unittests/ipu/test_greater_op_ipu.py | 140 +++++++++++++++ .../unittests/ipu/test_groupnorm_op_ipu.py | 112 ++++++------ ...l_io.py => test_inference_model_io_ipu.py} | 71 ++++---- .../unittests/ipu/test_instancenorm_op_ipu.py | 104 ++++++------ 13 files changed, 960 insertions(+), 483 deletions(-) create mode 100644 python/paddle/fluid/tests/unittests/ipu/test_fill_any_like_op_ipu.py create mode 100644 python/paddle/fluid/tests/unittests/ipu/test_fp16_inference_io_ipu.py create mode 100644 python/paddle/fluid/tests/unittests/ipu/test_greater_op_ipu.py rename python/paddle/fluid/tests/unittests/ipu/{test_ipu_inference_model_io.py => test_inference_model_io_ipu.py} (78%) diff --git a/python/paddle/fluid/tests/unittests/ipu/test_dropout_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_dropout_op_ipu.py index 8b1560edfd..e34da7f701 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_dropout_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_dropout_op_ipu.py @@ -16,14 +16,9 @@ import unittest import numpy as np import paddle -import paddle.fluid as fluid -import paddle.fluid.compiler as compiler -import paddle.optimizer import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, - np_dtype_to_fluid_str) - -paddle.enable_static() +from paddle.fluid.tests.unittests.ipu.op_test_ipu import (ExecutionMode, + IPUOpTest) @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -32,81 +27,88 @@ class TestBase(IPUOpTest): def setUp(self): self.set_atol() self.set_training() - self.set_feed() + self.set_data_feed() self.set_feed_attr() - self.set_attrs() + self.set_op_attrs() - def set_feed(self): - self.feed = { - "x": np.random.uniform(size=[1, 3, 10, 10]).astype('float32') - } + @property + def fp16_enabled(self): + return True + + def set_data_feed(self): + data = np.random.uniform(size=[1, 3, 10, 10]) + self.feed_fp32 = {'x': data.astype(np.float32)} + self.feed_fp16 = {'x': data.astype(np.float16)} def set_feed_attr(self): - self.feed_shape = [x.shape for x in self.feed.values()] - self.feed_list = list(self.feed.keys()) - self.feed_dtype = [ - np_dtype_to_fluid_str(x.dtype) for x in self.feed.values() - ] + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) - def set_attrs(self): + def set_op_attrs(self): self.attrs = { "dropout_prob": 0.5, "is_test": True, "dropout_implementation": "downgrade_in_infer" } - def _test_base(self, run_ipu=True): - scope = fluid.core.Scope() + def _test_base(self, exec_mode): + scope = paddle.static.Scope() main_prog = paddle.static.Program() startup_prog = paddle.static.Program() - SEED = self.SEED - main_prog.random_seed = SEED - startup_prog.random_seed = SEED + main_prog.random_seed = self.SEED + startup_prog.random_seed = self.SEED - with fluid.scope_guard(scope): + with paddle.static.scope_guard(scope): with paddle.static.program_guard(main_prog, startup_prog): x = paddle.static.data( name=self.feed_list[0], shape=self.feed_shape[0], - dtype=self.feed_dtype[0]) + dtype='float32') + dropout = paddle.fluid.layers.dropout(x, **self.attrs) out = paddle.fluid.layers.elementwise_add(dropout, dropout) fetch_list = [out.name] - if run_ipu: - place = paddle.IPUPlace() - else: + if exec_mode == ExecutionMode.CPU_FP32: place = paddle.CPUPlace() + else: + place = paddle.IPUPlace() + exe = paddle.static.Executor(place) exe.run(startup_prog) - if run_ipu: + if exec_mode != ExecutionMode.CPU_FP32: feed_list = self.feed_list ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.SetGraphConfig(is_training=self.is_training) - program = compiler.IPUCompiledProgram( + ipu_strategy.set_graph_config(is_training=self.is_training) + if exec_mode == ExecutionMode.IPU_POPART_FP16: + ipu_strategy.set_precision_config(enable_fp16=True) + program = paddle.static.IpuCompiledProgram( main_prog, ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) else: program = main_prog - result = exe.run(program, feed=self.feed, fetch_list=fetch_list) - return result[0] + feed = self.feed_fp32 + if exec_mode > ExecutionMode.IPU_FP32: + feed = self.feed_fp16 - def test_base(self): - res0 = self._test_base(True) - res1 = self._test_base(False) + result = exe.run(program, feed=feed, fetch_list=fetch_list) + return result[0] - self.assertTrue( - np.allclose( - res0.flatten(), res1.flatten(), atol=self.atol)) + def test(self): + output_dict = {} + for mode in ExecutionMode: + if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: + break + output_dict[mode] = self._test_base(mode).flatten() - self.assertTrue(res0.shape == res1.shape) + self.check(output_dict) class TestCase1(TestBase): - def set_attrs(self): + def set_op_attrs(self): self.attrs = { "dropout_prob": 0.5, "is_test": True, @@ -115,7 +117,7 @@ class TestCase1(TestBase): class TestCase2(TestBase): - def set_attrs(self): + def set_op_attrs(self): self.attrs = { "dropout_prob": 0.0, "is_test": False, diff --git a/python/paddle/fluid/tests/unittests/ipu/test_elemetwise_x_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_elemetwise_x_op_ipu.py index 07b06d77c9..a9d6d23083 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_elemetwise_x_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_elemetwise_x_op_ipu.py @@ -16,14 +16,9 @@ import unittest import numpy as np import paddle -import paddle.fluid as fluid -import paddle.fluid.compiler as compiler -import paddle.optimizer import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, - np_dtype_to_fluid_str) - -paddle.enable_static() +from paddle.fluid.tests.unittests.ipu.op_test_ipu import (ExecutionMode, + IPUOpTest) @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -32,101 +27,136 @@ class TestMul(IPUOpTest): def setUp(self): self.set_atol() self.set_training() - self.init_op() + self.set_test_op() + + @property + def fp16_enabled(self): + if IPUOpTest.use_ipumodel(): + return False + else: + return True - def init_op(self): + def set_test_op(self): self.op = paddle.fluid.layers.elementwise_mul def set_feed_attr(self): - self.feed_shape = [x.shape for x in self.feed.values()] - self.feed_list = list(self.feed.keys()) - self.feed_dtype = [ - np_dtype_to_fluid_str(x.dtype) for x in self.feed.values() - ] - - def _test_base(self, run_ipu=True): - scope = fluid.core.Scope() + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) + + def _test_base(self, exec_mode): + scope = paddle.static.Scope() main_prog = paddle.static.Program() startup_prog = paddle.static.Program() - SEED = self.SEED - main_prog.random_seed = SEED - startup_prog.random_seed = SEED + main_prog.random_seed = self.SEED + startup_prog.random_seed = self.SEED - with fluid.scope_guard(scope): + with paddle.static.scope_guard(scope): with paddle.static.program_guard(main_prog, startup_prog): x = paddle.static.data( name=self.feed_list[0], shape=self.feed_shape[0], - dtype=self.feed_dtype[0]) + dtype='float32') y = paddle.static.data( name=self.feed_list[1], shape=self.feed_shape[1], - dtype=self.feed_dtype[1]) + dtype='float32') + out = self.op(x, y, **self.attrs) - fetch_list = [out.name] + fetch_list = [out.name] - if run_ipu: - place = paddle.IPUPlace() - else: + if exec_mode == ExecutionMode.CPU_FP32: place = paddle.CPUPlace() + else: + place = paddle.IPUPlace() + exe = paddle.static.Executor(place) exe.run(startup_prog) - if run_ipu: + if exec_mode != ExecutionMode.CPU_FP32: feed_list = self.feed_list ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.SetGraphConfig(is_training=self.is_training) - program = compiler.IPUCompiledProgram( + ipu_strategy.set_graph_config(is_training=self.is_training) + if exec_mode == ExecutionMode.IPU_POPART_FP16: + ipu_strategy.set_precision_config(enable_fp16=True) + program = paddle.static.IpuCompiledProgram( main_prog, ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) else: program = main_prog - result = exe.run(program, feed=self.feed, fetch_list=fetch_list) + feed = self.feed_fp32 + if exec_mode > ExecutionMode.IPU_FP32: + feed = self.feed_fp16 + + result = exe.run(program, feed=feed, fetch_list=fetch_list) return result[0] def run_test_base(self): - res0 = self._test_base(True) - res1 = self._test_base(False) - - self.assertTrue( - np.allclose( - res0.flatten(), res1.flatten(), atol=self.atol)) + output_dict = {} + for mode in ExecutionMode: + if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: + break + output_dict[mode] = self._test_base(mode).flatten() - self.assertTrue(res0.shape == res1.shape) + self.check(output_dict) def test_case0(self): - self.feed = { - "x": np.random.uniform(size=(2, 3, 4, 5)).astype('float32'), - "y": np.random.uniform(size=(2, 3, 4, 5)).astype('float32'), + data_x = np.random.uniform(size=(2, 3, 4, 5)) + data_y = np.random.uniform(size=(2, 3, 4, 5)) + + self.feed_fp32 = { + "x": data_x.astype('float32'), + "y": data_y.astype('float32'), + } + self.feed_fp16 = { + "x": data_x.astype('float16'), + "y": data_y.astype('float16'), } self.attrs = {} self.set_feed_attr() self.run_test_base() def test_case1(self): - self.feed = { - "x": np.random.uniform(size=(2, 3, 4, 5)).astype('float32'), - "y": np.random.uniform(size=(3, 4)).astype('float32'), + data_x = np.random.uniform(size=(2, 3, 4, 5)) + data_y = np.random.uniform(size=(3, 4)) + self.feed_fp32 = { + "x": data_x.astype('float32'), + "y": data_y.astype('float32'), + } + self.feed_fp16 = { + "x": data_x.astype('float16'), + "y": data_y.astype('float16'), } self.set_feed_attr() self.attrs = {"axis": 1} self.run_test_base() def test_case2(self): - self.feed = { - "x": np.random.uniform(size=(2, 3, 4, 5)).astype('float32'), - "y": np.random.uniform(size=(5)).astype('float32'), + data_x = np.random.uniform(size=(2, 3, 4, 5)) + data_y = np.random.uniform(size=(5)) + self.feed_fp32 = { + "x": data_x.astype('float32'), + "y": data_y.astype('float32'), + } + self.feed_fp16 = { + "x": data_x.astype('float16'), + "y": data_y.astype('float16'), } self.set_feed_attr() self.attrs = {"axis": -1} self.run_test_base() def test_case3(self): - self.feed = { - "x": np.random.uniform(size=(2, 3, 4, 5)).astype('float32'), - "y": np.random.uniform(size=(2)).astype('float32'), + data_x = np.random.uniform(size=(2, 3, 4, 5)) + data_y = np.random.uniform(size=(2)) + self.feed_fp32 = { + "x": data_x.astype('float32'), + "y": data_y.astype('float32'), + } + self.feed_fp16 = { + "x": data_x.astype('float16'), + "y": data_y.astype('float16'), } self.set_feed_attr() self.attrs = {"axis": 0} @@ -134,37 +164,43 @@ class TestMul(IPUOpTest): class TestAdd(TestMul): - def init_op(self): + def set_test_op(self): self.op = paddle.fluid.layers.elementwise_add class TestSub(TestMul): - def init_op(self): + def set_test_op(self): self.op = paddle.fluid.layers.elementwise_sub class TestDiv(TestMul): - def init_op(self): + def set_test_op(self): self.op = paddle.fluid.layers.elementwise_div class TestMin(TestMul): - def init_op(self): + def set_test_op(self): self.op = paddle.fluid.layers.elementwise_min class TestMax(TestMul): - def init_op(self): + def set_test_op(self): self.op = paddle.fluid.layers.elementwise_max class TestPow(TestMul): - def init_op(self): + def set_test_op(self): self.op = paddle.fluid.layers.elementwise_pow class TestMod(TestMul): - def init_op(self): + def set_atol(self): + self.atol = 1e-7 + self.rtol = 1e-5 + self.atol_fp16 = 1e-2 + self.rtol_fp16 = 1e-3 + + def set_test_op(self): self.op = paddle.fluid.layers.elementwise_mod diff --git a/python/paddle/fluid/tests/unittests/ipu/test_equal_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_equal_op_ipu.py index c319894bfa..5b18c73851 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_equal_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_equal_op_ipu.py @@ -16,14 +16,8 @@ import unittest import numpy as np import paddle -import paddle.fluid as fluid -import paddle.fluid.compiler as compiler -import paddle.optimizer import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, - np_dtype_to_fluid_str) - -paddle.enable_static() +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -32,94 +26,106 @@ class TestBase(IPUOpTest): def setUp(self): self.set_atol() self.set_training() - self.set_feed() + self.set_data_feed() self.set_feed_attr() - self.set_attrs() - - def set_feed(self): - self.feed = { - "x": np.ones([1, 10]).astype('float32'), - "y": np.zeros([1, 10]).astype('float32'), + self.set_op_attrs() + + @property + def fp16_enabled(self): + return True + + def set_data_feed(self): + x = np.ones([1, 10]) + y = np.zeros([1, 10]) + self.feed_fp32 = { + "x": x.astype(np.float32), + "y": y.astype(np.float32), + } + self.feed_fp16 = { + "x": x.astype(np.float16), + "y": y.astype(np.float16), } def set_feed_attr(self): - self.feed_shape = [x.shape for x in self.feed.values()] - self.feed_list = list(self.feed.keys()) - self.feed_dtype = [ - np_dtype_to_fluid_str(x.dtype) for x in self.feed.values() - ] + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) - def set_attrs(self): + def set_op_attrs(self): self.attrs = {} - def _test_base(self, run_ipu=True): - scope = fluid.core.Scope() + def _test_base(self, exec_mode): + scope = paddle.static.Scope() main_prog = paddle.static.Program() startup_prog = paddle.static.Program() - SEED = self.SEED - main_prog.random_seed = SEED - startup_prog.random_seed = SEED + main_prog.random_seed = self.SEED + startup_prog.random_seed = self.SEED - with fluid.scope_guard(scope): + with paddle.static.scope_guard(scope): with paddle.static.program_guard(main_prog, startup_prog): - # XX x = paddle.static.data( name=self.feed_list[0], shape=self.feed_shape[0], - dtype=self.feed_dtype[0]) + dtype='float32') y = paddle.static.data( name=self.feed_list[1], shape=self.feed_shape[1], - dtype=self.feed_dtype[1]) + dtype='float32') + out = paddle.fluid.layers.equal(x, y, **self.attrs) fetch_list = [out.name] - if run_ipu: - place = paddle.IPUPlace() - else: + if exec_mode == ExecutionMode.CPU_FP32: place = paddle.CPUPlace() + else: + place = paddle.IPUPlace() + exe = paddle.static.Executor(place) exe.run(startup_prog) - if run_ipu: + if exec_mode != ExecutionMode.CPU_FP32: feed_list = self.feed_list ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.SetGraphConfig(is_training=self.is_training) - program = compiler.IPUCompiledProgram( + ipu_strategy.set_graph_config(is_training=self.is_training) + if exec_mode == ExecutionMode.IPU_POPART_FP16: + ipu_strategy.set_precision_config(enable_fp16=True) + program = paddle.static.IpuCompiledProgram( main_prog, ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) else: program = main_prog - result = exe.run(program, feed=self.feed, fetch_list=fetch_list) - return result[0] + feed = self.feed_fp32 + if exec_mode > ExecutionMode.IPU_FP32: + feed = self.feed_fp16 - def test_base(self): - res0 = self._test_base(True) - res1 = self._test_base(False) + result = exe.run(program, feed=feed, fetch_list=fetch_list) + return result[0] - self.assertTrue( - np.allclose( - res0.flatten(), res1.flatten(), atol=self.atol)) + def test(self): + output_dict = {} + for mode in ExecutionMode: + if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: + break + output_dict[mode] = self._test_base(mode).flatten().astype(np.int32) - self.assertTrue(res0.shape == res1.shape) + self.check(output_dict) class TestCase1(TestBase): - def set_feed(self): - self.feed = { - "x": np.ones([1, 10]).astype('float32'), - "y": np.ones([1, 10]).astype('float32'), - } + def set_data_feed(self): + x = np.ones([1, 10]) + y = np.ones([1, 10]) + self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)} + self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)} class TestCase2(TestBase): - def set_feed(self): - self.feed = { - "x": np.ones([1, 10]).astype('float32'), - "y": np.arange(0, 10).reshape([1, 10]).astype('float32'), - } + def set_data_feed(self): + x = np.ones([1, 10]) + y = np.arange(0, 10).reshape([1, 10]) + self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)} + self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)} if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ipu/test_expand_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_expand_op_ipu.py index 5b7ea61568..966dfdef87 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_expand_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_expand_op_ipu.py @@ -16,14 +16,8 @@ import unittest import numpy as np import paddle -import paddle.fluid as fluid -import paddle.fluid.compiler as compiler -import paddle.optimizer import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, - np_dtype_to_fluid_str) - -paddle.enable_static() +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -32,125 +26,142 @@ class TestBase(IPUOpTest): def setUp(self): self.set_atol() self.set_training() - self.set_feed() + self.set_data_feed() self.set_feed_attr() - self.set_attrs() + self.set_op_attrs() + + @property + def fp16_enabled(self): + return True - def set_feed(self): - self.feed = {"x": np.random.uniform(size=[2, 3, 1]).astype('float32')} + def set_data_feed(self): + data = np.random.uniform(size=[2, 3, 1]) + self.feed_fp32 = {'in_0': data.astype(np.float32)} + self.feed_fp16 = {'in_0': data.astype(np.float16)} def set_feed_attr(self): - self.feed_shape = [x.shape for x in self.feed.values()] - self.feed_list = list(self.feed.keys()) - self.feed_dtype = [ - np_dtype_to_fluid_str(x.dtype) for x in self.feed.values() - ] + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) + self.feed_dtype = [x.dtype for x in self.feed_fp32.values()] - def set_attrs(self): + def set_op_attrs(self): self.attrs = {"expand_times": [1, 2, 2]} - def _test_base(self, run_ipu=True): - scope = fluid.core.Scope() + def _test_base(self, exec_mode): + scope = paddle.static.Scope() main_prog = paddle.static.Program() startup_prog = paddle.static.Program() - SEED = self.SEED - main_prog.random_seed = SEED - startup_prog.random_seed = SEED + main_prog.random_seed = self.SEED + startup_prog.random_seed = self.SEED - with fluid.scope_guard(scope): + with paddle.static.scope_guard(scope): with paddle.static.program_guard(main_prog, startup_prog): x = paddle.static.data( name=self.feed_list[0], shape=self.feed_shape[0], - dtype=self.feed_dtype[0]) + dtype="float32") + out = paddle.fluid.layers.expand(x, **self.attrs) fetch_list = [out.name] - if run_ipu: - place = paddle.IPUPlace() - else: + if exec_mode == ExecutionMode.CPU_FP32: place = paddle.CPUPlace() + else: + place = paddle.IPUPlace() + exe = paddle.static.Executor(place) exe.run(startup_prog) - if run_ipu: + if exec_mode != ExecutionMode.CPU_FP32: feed_list = self.feed_list ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.SetGraphConfig(is_training=self.is_training) - program = compiler.IPUCompiledProgram( + ipu_strategy.set_graph_config(is_training=self.is_training) + if exec_mode == ExecutionMode.IPU_POPART_FP16: + ipu_strategy.set_precision_config(enable_fp16=True) + program = paddle.static.IpuCompiledProgram( main_prog, ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) else: program = main_prog - result = exe.run(program, feed=self.feed, fetch_list=fetch_list) - return result[0] + feed = self.feed_fp32 + if exec_mode > ExecutionMode.IPU_FP32: + feed = self.feed_fp16 - def test_base(self): - res0 = self._test_base(False) - res1 = self._test_base(True) + result = exe.run(program, feed=feed, fetch_list=fetch_list) + return result[0] - self.assertTrue( - np.allclose( - res0.flatten(), res1.flatten(), atol=self.atol)) + def test(self): + output_dict = {} + for mode in ExecutionMode: + if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: + break + output_dict[mode] = self._test_base(mode).flatten() - self.assertTrue(res0.shape == res1.shape) + self.check(output_dict) class TestCase1(TestBase): - def set_feed(self): - self.feed = {"x": np.random.uniform(size=[2, 2]).astype('float32')} + def set_data_feed(self): + x = np.random.uniform(size=[2, 2]) + self.feed_fp32 = {"x": x.astype(np.float32)} + self.feed_fp16 = {"x": x.astype(np.float16)} def set_feed_attr(self): - self.feed_shape = [x.shape for x in self.feed.values()] - self.feed_list = list(self.feed.keys()) - self.feed_dtype = [ - np_dtype_to_fluid_str(x.dtype) for x in self.feed.values() - ] + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) + self.feed_dtype = [x.dtype for x in self.feed_fp32.values()] - def set_attrs(self): + def set_op_attrs(self): self.attrs = {} - def _test_base(self, run_ipu=True): - scope = fluid.core.Scope() + def _test_base(self, exec_mode): + scope = paddle.static.Scope() main_prog = paddle.static.Program() startup_prog = paddle.static.Program() - SEED = self.SEED - main_prog.random_seed = SEED - startup_prog.random_seed = SEED + main_prog.random_seed = self.SEED + startup_prog.random_seed = self.SEED - with fluid.scope_guard(scope): + with paddle.static.scope_guard(scope): with paddle.static.program_guard(main_prog, startup_prog): x = paddle.static.data( name=self.feed_list[0], shape=self.feed_shape[0], - dtype=self.feed_dtype[0]) - expand_times = fluid.layers.fill_constant( + dtype="float32") + + expand_times = paddle.fluid.layers.fill_constant( shape=[len(self.feed_shape[0])], dtype="int32", value=2) out = paddle.fluid.layers.expand( x, expand_times=expand_times, **self.attrs) fetch_list = [out.name] - if run_ipu: - place = paddle.IPUPlace() - else: + if exec_mode == ExecutionMode.CPU_FP32: place = paddle.CPUPlace() + else: + place = paddle.IPUPlace() + exe = paddle.static.Executor(place) exe.run(startup_prog) - if run_ipu: + if exec_mode != ExecutionMode.CPU_FP32: feed_list = self.feed_list ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.SetGraphConfig(is_training=self.is_training) - program = compiler.IPUCompiledProgram( + ipu_strategy.set_graph_config(is_training=self.is_training) + if exec_mode == ExecutionMode.IPU_POPART_FP16: + ipu_strategy.set_precision_config(enable_fp16=True) + program = paddle.static.IpuCompiledProgram( main_prog, ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) else: program = main_prog - result = exe.run(program, feed=self.feed, fetch_list=fetch_list) + feed = self.feed_fp32 + if exec_mode > ExecutionMode.IPU_FP32: + feed = self.feed_fp16 + + result = exe.run(program, feed=feed, fetch_list=fetch_list) return result[0] diff --git a/python/paddle/fluid/tests/unittests/ipu/test_fill_any_like_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_fill_any_like_op_ipu.py new file mode 100644 index 0000000000..00b855a5a7 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_fill_any_like_op_ipu.py @@ -0,0 +1,111 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import paddle +import paddle.static +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestBase(IPUOpTest): + def setUp(self): + self.set_atol() + self.set_training() + self.set_data_feed() + self.set_feed_attr() + self.set_op_attrs() + + @property + def fp16_enabled(self): + return True + + def set_data_feed(self): + data = np.random.uniform(size=[2, 3, 1]) + self.feed_fp32 = {'in_0': data.astype(np.float32)} + self.feed_fp16 = {'in_0': data.astype(np.float16)} + + def set_feed_attr(self): + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) + + def set_op_attrs(self): + self.attrs = {'fill_value': 0.3, 'dtype': 'float32'} + + def _test_base(self, exec_mode): + scope = paddle.static.Scope() + main_prog = paddle.static.Program() + startup_prog = paddle.static.Program() + main_prog.random_seed = self.SEED + startup_prog.random_seed = self.SEED + + with paddle.static.scope_guard(scope): + with paddle.static.program_guard(main_prog, startup_prog): + x = paddle.static.data( + name=self.feed_list[0], + shape=self.feed_shape[0], + dtype='float32') + + x_fill = paddle.full_like(x, **self.attrs) + out = paddle.fluid.layers.elementwise_add(x_fill, x_fill) + + fetch_list = [out.name] + + if exec_mode == ExecutionMode.CPU_FP32: + place = paddle.CPUPlace() + else: + place = paddle.IPUPlace() + + exe = paddle.static.Executor(place) + exe.run(startup_prog) + + if exec_mode != ExecutionMode.CPU_FP32: + feed_list = self.feed_list + ipu_strategy = paddle.static.IpuStrategy() + ipu_strategy.set_graph_config(is_training=self.is_training) + if exec_mode == ExecutionMode.IPU_POPART_FP16: + ipu_strategy.set_precision_config(enable_fp16=True) + program = paddle.static.IpuCompiledProgram( + main_prog, + ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) + else: + program = main_prog + + feed = self.feed_fp32 + if exec_mode > ExecutionMode.IPU_FP32: + feed = self.feed_fp16 + + result = exe.run(program, feed=feed, fetch_list=fetch_list) + return result[0] + + def test(self): + output_dict = {} + for mode in ExecutionMode: + if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: + break + output_dict[mode] = self._test_base(mode).flatten() + + self.check(output_dict) + + +class TestCase1(TestBase): + def set_op_attrs(self): + self.attrs = {'fill_value': 3, 'dtype': 'int32'} + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_fill_constant_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_fill_constant_op_ipu.py index c62e0c08f9..3a1c202bf1 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_fill_constant_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_fill_constant_op_ipu.py @@ -16,14 +16,8 @@ import unittest import numpy as np import paddle -import paddle.fluid as fluid -import paddle.fluid.compiler as compiler -import paddle.optimizer import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, - np_dtype_to_fluid_str) - -paddle.enable_static() +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -32,21 +26,23 @@ class TestBase(IPUOpTest): def setUp(self): self.set_atol() self.set_training() - self.set_feed() + self.set_data_feed() self.set_feed_attr() - self.set_attrs() + self.set_op_attrs() + + @property + def fp16_enabled(self): + return True - def set_feed(self): + def set_data_feed(self): self.feed = {} def set_feed_attr(self): self.feed_shape = [x.shape for x in self.feed.values()] self.feed_list = list(self.feed.keys()) - self.feed_dtype = [ - np_dtype_to_fluid_str(x.dtype) for x in self.feed.values() - ] + self.feed_dtype = [x.dtype for x in self.feed.values()] - def set_attrs(self): + def set_op_attrs(self): self.attrs = { 'name': 'x', 'shape': [1, 3, 3, 3], @@ -54,33 +50,34 @@ class TestBase(IPUOpTest): 'value': 0.3, } - def _test_base(self, run_ipu=True): - scope = fluid.core.Scope() + def _test_base(self, exec_mode): + scope = paddle.static.Scope() main_prog = paddle.static.Program() startup_prog = paddle.static.Program() - SEED = self.SEED - main_prog.random_seed = SEED - startup_prog.random_seed = SEED + main_prog.random_seed = self.SEED + startup_prog.random_seed = self.SEED - with fluid.scope_guard(scope): + with paddle.static.scope_guard(scope): with paddle.static.program_guard(main_prog, startup_prog): x = paddle.fluid.layers.fill_constant(**self.attrs) out = paddle.fluid.layers.elementwise_add(x, x) - fetch_list = [out.name] - if run_ipu: - place = paddle.IPUPlace() - else: + if exec_mode == ExecutionMode.CPU_FP32: place = paddle.CPUPlace() + else: + place = paddle.IPUPlace() + exe = paddle.static.Executor(place) exe.run(startup_prog) - if run_ipu: + if exec_mode != ExecutionMode.CPU_FP32: feed_list = self.feed_list ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.SetGraphConfig(is_training=self.is_training) - program = compiler.IPUCompiledProgram( + ipu_strategy.set_graph_config(is_training=self.is_training) + if exec_mode == ExecutionMode.IPU_POPART_FP16: + ipu_strategy.set_precision_config(enable_fp16=True) + program = paddle.static.IpuCompiledProgram( main_prog, ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) else: @@ -89,19 +86,18 @@ class TestBase(IPUOpTest): result = exe.run(program, feed=self.feed, fetch_list=fetch_list) return result[0] - def test_base(self): - res0 = self._test_base(False) - res1 = self._test_base(True) - - self.assertTrue( - np.allclose( - res0.flatten(), res1.flatten(), atol=self.atol)) + def test(self): + output_dict = {} + for mode in ExecutionMode: + if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: + break + output_dict[mode] = self._test_base(mode).flatten() - self.assertTrue(res0.shape == res1.shape) + self.check(output_dict) class TestCase1(TestBase): - def set_attrs(self): + def set_op_attrs(self): self.attrs = { 'name': 'x', 'shape': [1, 3, 3, 3], diff --git a/python/paddle/fluid/tests/unittests/ipu/test_fp16_inference_io_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_fp16_inference_io_ipu.py new file mode 100644 index 0000000000..cd29ff705b --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_fp16_inference_io_ipu.py @@ -0,0 +1,160 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import shutil + +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.static +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestBase(IPUOpTest): + def setUp(self): + self.set_atol() + self.set_data_feed() + self.set_feed_attr() + self.set_op_attrs() + + def set_atol(self): + self.atol = 1e-6 + self.rtol = 1e-5 + self.atol_fp16 = 1e-2 + self.rtol_fp16 = 1e-3 + + def set_data_feed(self): + data = np.random.uniform(size=[1, 3, 10, 10]) + self.feed_fp32 = {"in_0": data.astype(np.float32)} + self.feed_fp16 = {"in_0": data.astype(np.float16)} + + def set_feed_attr(self): + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) + + def set_op_attrs(self): + self.attrs = {} + self.attrs['steps'] = 100 + self.attrs['save_at_step'] = 20 + self.attrs['is_training'] = True + self.attrs['opt_type'] = 'sgd' + self.attrs['path'] = 'model' + self.attrs['model_name'] = 'test' + + def _test_save(self): + scope = paddle.static.Scope() + main_prog = paddle.static.Program() + startup_prog = paddle.static.Program() + main_prog.random_seed = self.SEED + startup_prog.random_seed = self.SEED + generator = paddle.fluid.unique_name.UniqueNameGenerator() + self.full_name = '/'.join( + [self.attrs['path'], self.attrs['model_name']]) + + with paddle.fluid.unique_name.guard(generator): + with paddle.static.scope_guard(scope): + with paddle.static.program_guard(main_prog, startup_prog): + x = paddle.static.data( + name=self.feed_list[0], + shape=self.feed_shape[0], + dtype='float32') + + scale = paddle.fluid.layers.scale( + x, scale=1.0, bias=0.0, bias_after_scale=True) + conv = paddle.static.nn.conv2d( + scale, + num_filters=3, + filter_size=3, + bias_attr=False, + name='conv2d') + loss = paddle.mean(conv) + + if self.attrs['is_training']: + if self.attrs['opt_type'] == 'sgd': + sgd = paddle.optimizer.SGD(learning_rate=1e-2) + sgd.minimize(loss) + elif self.attrs['opt_type'] == 'adam': + adam = paddle.optimizer.Adam(learning_rate=1e-2) + adam.minimize(loss) + elif self.attrs['opt_type'] == 'lamb': + lamb = paddle.optimizer.Lamb(learning_rate=1e-2) + lamb.minimize(loss) + + fetch_list = [loss.name] + + place = paddle.IPUPlace() + exe = paddle.static.Executor(place) + exe.run(startup_prog) + + ipu_strategy = paddle.static.IpuStrategy() + ipu_strategy.set_graph_config(is_training=True) + ipu_strategy.set_precision_config(enable_fp16=True) + program = paddle.static.IpuCompiledProgram( + main_prog, ipu_strategy=ipu_strategy).compile( + self.feed_list, fetch_list) + + for _ in range(self.attrs['steps']): + exe.run(program, feed=self.feed_fp16, fetch_list=fetch_list) + + paddle.static.save_inference_model( + self.full_name, x, loss, exe, program=program.org_program) + + def _test_load(self, run_ipu): + if run_ipu: + place = paddle.IPUPlace() + else: + place = paddle.CPUPlace() + exe = paddle.static.Executor(place) + + [inference_program, feed_target_names, fetch_targets] = ( + paddle.static.load_inference_model(self.full_name, exe)) + + if run_ipu: + feed_list = feed_target_names + fetch_list = [fetch_targets[0].name] + ipu_strategy = paddle.static.IpuStrategy() + ipu_strategy.set_graph_config(is_training=False) + ipu_strategy.set_precision_config(enable_fp16=True) + program = paddle.static.IpuCompiledProgram( + inference_program, + ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) + else: + program = inference_program + + feed = self.feed_fp16 if run_ipu else self.feed_fp32 + result = [] + for i in range(10): + feed["in_0"] += np.array([1.1 * i]).astype(feed["in_0"].dtype) + out = exe.run(program, feed=feed, fetch_list=[fetch_targets]) + result.append(out) + + return np.array(result) + + def test_base(self): + self._test_save() + cpu_res = self._test_load(False) + ipu_res = self._test_load(True).astype(np.float32) + + self.assertTrue( + np.allclose( + cpu_res, ipu_res, rtol=self.rtol_fp16, atol=self.atol_fp16)) + + shutil.rmtree(self.attrs['path'], True) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_gather_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_gather_op_ipu.py index d5be8ae0cf..01a56fd14b 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_gather_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_gather_op_ipu.py @@ -16,14 +16,8 @@ import unittest import numpy as np import paddle -import paddle.fluid as fluid -import paddle.fluid.compiler as compiler -import paddle.optimizer import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, - np_dtype_to_fluid_str) - -paddle.enable_static() +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -32,85 +26,92 @@ class TestBase(IPUOpTest): def setUp(self): self.set_atol() self.set_training() - self.set_feed() + self.set_data_feed() self.set_feed_attr() - self.set_attrs() + self.set_op_attrs() + + @property + def fp16_enabled(self): + return True - def set_feed(self): - self.feed = { - "x": np.random.uniform(size=[10, 20]).astype('float32'), - "y": np.array([1, 3, 5]).astype('int32'), - } + def set_data_feed(self): + x = np.random.uniform(size=[10, 20]) + y = np.array([1, 3, 5]) + self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.int32)} + self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.int32)} def set_feed_attr(self): - self.feed_shape = [x.shape for x in self.feed.values()] - self.feed_list = list(self.feed.keys()) - self.feed_dtype = [ - np_dtype_to_fluid_str(x.dtype) for x in self.feed.values() - ] + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) - def set_attrs(self): + def set_op_attrs(self): self.attrs = {} - def _test_base(self, run_ipu=True): - scope = fluid.core.Scope() + def _test_base(self, exec_mode): + scope = paddle.static.Scope() main_prog = paddle.static.Program() startup_prog = paddle.static.Program() - SEED = self.SEED - main_prog.random_seed = SEED - startup_prog.random_seed = SEED + main_prog.random_seed = self.SEED + startup_prog.random_seed = self.SEED - with fluid.scope_guard(scope): + with paddle.static.scope_guard(scope): with paddle.static.program_guard(main_prog, startup_prog): x = paddle.static.data( name=self.feed_list[0], shape=self.feed_shape[0], - dtype=self.feed_dtype[0]) + dtype='float32') y = paddle.static.data( name=self.feed_list[1], shape=self.feed_shape[1], - dtype=self.feed_dtype[1]) + dtype='int32') + out = paddle.fluid.layers.gather(x, index=y, **self.attrs) fetch_list = [out.name] - if run_ipu: - place = paddle.IPUPlace() - else: + if exec_mode == ExecutionMode.CPU_FP32: place = paddle.CPUPlace() + else: + place = paddle.IPUPlace() + exe = paddle.static.Executor(place) exe.run(startup_prog) - if run_ipu: + if exec_mode != ExecutionMode.CPU_FP32: feed_list = self.feed_list ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.SetGraphConfig(is_training=self.is_training) - program = compiler.IPUCompiledProgram( + ipu_strategy.set_graph_config(is_training=self.is_training) + if exec_mode == ExecutionMode.IPU_POPART_FP16: + ipu_strategy.set_precision_config(enable_fp16=True) + program = paddle.static.IpuCompiledProgram( main_prog, ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) else: program = main_prog - result = exe.run(program, feed=self.feed, fetch_list=fetch_list) - return result[0] + feed = self.feed_fp32 + if exec_mode > ExecutionMode.IPU_FP32: + feed = self.feed_fp16 - def test_base(self): - res0 = self._test_base(False) - res1 = self._test_base(True) + result = exe.run(program, feed=feed, fetch_list=fetch_list) + return result[0] - self.assertTrue( - np.allclose( - res0.flatten(), res1.flatten(), atol=self.atol)) + def test(self): + output_dict = {} + for mode in ExecutionMode: + if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: + break + output_dict[mode] = self._test_base(mode).flatten() - self.assertTrue(res0.shape == res1.shape) + self.check(output_dict) class TestCase1(TestBase): - def set_feed(self): - self.feed = { - "x": np.random.uniform(size=[100]).astype('float32'), - "y": np.array([1, 3, 5]).astype('int32'), - } + def set_data_feed(self): + x = np.random.uniform(size=[100]) + y = np.array([1, 3, 5]) + self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.int32)} + self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.int32)} if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ipu/test_gelu_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_gelu_op_ipu.py index ca8c0935d7..602289f3f1 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_gelu_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_gelu_op_ipu.py @@ -16,14 +16,8 @@ import unittest import numpy as np import paddle -import paddle.fluid as fluid -import paddle.fluid.compiler as compiler -import paddle.optimizer import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, - np_dtype_to_fluid_str) - -paddle.enable_static() +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -32,80 +26,89 @@ class TestBase(IPUOpTest): def setUp(self): self.set_atol() self.set_training() - self.set_feed() + self.set_data_feed() self.set_feed_attr() - self.set_attrs() + self.set_op_attrs() - def set_atol(self): - self.atol = 1e-3 + @property + def fp16_enabled(self): + return True - def set_feed(self): - self.feed = { - "x": np.random.uniform(size=[1, 3, 10, 10]).astype('float32') - } + def set_data_feed(self): + data = np.random.uniform(size=[1, 3, 10, 10]) + self.feed_fp32 = {'in_0': data.astype(np.float32)} + self.feed_fp16 = {'in_0': data.astype(np.float16)} def set_feed_attr(self): - self.feed_shape = [x.shape for x in self.feed.values()] - self.feed_list = list(self.feed.keys()) - self.feed_dtype = [ - np_dtype_to_fluid_str(x.dtype) for x in self.feed.values() - ] + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) - def set_attrs(self): + def set_op_attrs(self): self.attrs = {"approximate": False} - def _test_base(self, run_ipu=True): - scope = fluid.core.Scope() + def _test_base(self, exec_mode): + scope = paddle.static.Scope() main_prog = paddle.static.Program() startup_prog = paddle.static.Program() - SEED = self.SEED - main_prog.random_seed = SEED - startup_prog.random_seed = SEED + main_prog.random_seed = self.SEED + startup_prog.random_seed = self.SEED - with fluid.scope_guard(scope): + with paddle.static.scope_guard(scope): with paddle.static.program_guard(main_prog, startup_prog): x = paddle.static.data( name=self.feed_list[0], shape=self.feed_shape[0], - dtype=self.feed_dtype[0]) + dtype='float32') + out = paddle.fluid.layers.gelu(x, **self.attrs) fetch_list = [out.name] - if run_ipu: - place = paddle.IPUPlace() - else: + if exec_mode == ExecutionMode.CPU_FP32: place = paddle.CPUPlace() + else: + place = paddle.IPUPlace() + exe = paddle.static.Executor(place) exe.run(startup_prog) - if run_ipu: + if exec_mode != ExecutionMode.CPU_FP32: feed_list = self.feed_list ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.SetGraphConfig(is_training=self.is_training) - program = compiler.IPUCompiledProgram( + ipu_strategy.set_graph_config(is_training=self.is_training) + if exec_mode == ExecutionMode.IPU_POPART_FP16: + ipu_strategy.set_precision_config(enable_fp16=True) + program = paddle.static.IpuCompiledProgram( main_prog, ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) else: program = main_prog - result = exe.run(program, feed=self.feed, fetch_list=fetch_list) - return result[0] + feed = self.feed_fp32 + if exec_mode > ExecutionMode.IPU_FP32: + feed = self.feed_fp16 - def test_base(self): - res0 = self._test_base(False) - res1 = self._test_base(True) + result = exe.run(program, feed=feed, fetch_list=fetch_list) + return result[0] - self.assertTrue( - np.allclose( - res0.flatten(), res1.flatten(), atol=self.atol)) + def test(self): + output_dict = {} + for mode in ExecutionMode: + if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: + break + output_dict[mode] = self._test_base(mode).flatten() - self.assertTrue(res0.shape == res1.shape) + self.check(output_dict) -@unittest.skip('approximate=True is not supported') class TestCase1(TestBase): - def set_attrs(self): + def set_atol(self): + self.atol = 1e-10 + self.rtol = 1e-6 + self.atol_fp16 = 2e-3 + self.rtol_fp16 = 1e-3 + + def set_op_attrs(self): self.attrs = {"approximate": True} diff --git a/python/paddle/fluid/tests/unittests/ipu/test_greater_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_greater_op_ipu.py new file mode 100644 index 0000000000..05a37dcb3d --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_greater_op_ipu.py @@ -0,0 +1,140 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import paddle +import paddle.static +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestBase(IPUOpTest): + def setUp(self): + self.set_atol() + self.set_training() + self.set_data_feed() + self.set_feed_attr() + self.set_op_attrs() + + @property + def fp16_enabled(self): + return True + + def set_data_feed(self): + x = np.random.randn(3, 4, 5) + y = np.random.randn(3, 4, 5) + self.feed_fp32 = { + "x": x.astype(np.float32), + "y": y.astype(np.float32), + } + self.feed_fp16 = { + "x": x.astype(np.float16), + "y": y.astype(np.float16), + } + + def set_feed_attr(self): + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) + + def set_op_attrs(self): + self.attrs = {} + + def _test_base(self, exec_mode): + scope = paddle.static.Scope() + main_prog = paddle.static.Program() + startup_prog = paddle.static.Program() + main_prog.random_seed = self.SEED + startup_prog.random_seed = self.SEED + + with paddle.static.scope_guard(scope): + with paddle.static.program_guard(main_prog, startup_prog): + x = paddle.static.data( + name=self.feed_list[0], + shape=self.feed_shape[0], + dtype='float32') + y = paddle.static.data( + name=self.feed_list[1], + shape=self.feed_shape[1], + dtype='float32') + + out = paddle.fluid.layers.greater_than(x, y, **self.attrs) + + fetch_list = [out.name] + + if exec_mode == ExecutionMode.CPU_FP32: + place = paddle.CPUPlace() + else: + place = paddle.IPUPlace() + + exe = paddle.static.Executor(place) + exe.run(startup_prog) + + if exec_mode != ExecutionMode.CPU_FP32: + feed_list = self.feed_list + ipu_strategy = paddle.static.IpuStrategy() + ipu_strategy.set_graph_config(is_training=self.is_training) + if exec_mode == ExecutionMode.IPU_POPART_FP16: + ipu_strategy.set_precision_config(enable_fp16=True) + program = paddle.static.IpuCompiledProgram( + main_prog, + ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) + else: + program = main_prog + + feed = self.feed_fp32 + if exec_mode > ExecutionMode.IPU_FP32: + feed = self.feed_fp16 + + result = exe.run(program, feed=feed, fetch_list=fetch_list) + return result[0] + + def test(self): + output_dict = {} + for mode in ExecutionMode: + if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: + break + output_dict[mode] = self._test_base(mode).flatten().astype(np.int32) + + self.check(output_dict) + + +class TestCase1(TestBase): + def set_data_feed(self): + x = np.ones([1, 10]) + y = np.ones([10]) + self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)} + self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)} + + +class TestCase2(TestBase): + def set_data_feed(self): + x = np.ones([1, 10]) + y = np.zeros([1, 10]) + self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)} + self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)} + + +class TestCase3(TestBase): + def set_data_feed(self): + x = np.zeros([1, 10]) + y = np.ones([1, 10]) + self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)} + self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)} + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_groupnorm_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_groupnorm_op_ipu.py index eb644c2c66..102e764cb2 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_groupnorm_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_groupnorm_op_ipu.py @@ -16,14 +16,8 @@ import unittest import numpy as np import paddle -import paddle.fluid as fluid -import paddle.fluid.compiler as compiler -import paddle.optimizer import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, - np_dtype_to_fluid_str) - -paddle.enable_static() +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -32,43 +26,49 @@ class TestBase(IPUOpTest): def setUp(self): self.set_atol() self.set_training() - self.set_feed() + self.set_data_feed() self.set_feed_attr() - self.set_attrs() + self.set_op_attrs() - def set_feed(self): - self.feed = { - "x": np.random.uniform(size=[1, 8, 10, 10]).astype('float32'), - } + @property + def fp16_enabled(self): + return True + + def set_atol(self): + self.atol = 3e-6 + self.rtol = 1e-6 + self.atol_fp16 = 4e-3 + self.rtol_fp16 = 1e-3 + + def set_data_feed(self): + data = np.random.uniform(size=[1, 8, 10, 10]) + self.feed_fp32 = {'in_0': data.astype(np.float32)} + self.feed_fp16 = {'in_0': data.astype(np.float16)} def set_feed_attr(self): - self.feed_shape = [x.shape for x in self.feed.values()] - self.feed_list = list(self.feed.keys()) - self.feed_dtype = [ - np_dtype_to_fluid_str(x.dtype) for x in self.feed.values() - ] + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) - def set_attrs(self): + def set_op_attrs(self): self.attrs = { "groups": 8, "epsilon": 1e-05, "data_layout": 'NCHW', } - def _test_base(self, run_ipu=True): - scope = fluid.core.Scope() + def _test_base(self, exec_mode): + scope = paddle.static.Scope() main_prog = paddle.static.Program() startup_prog = paddle.static.Program() - SEED = self.SEED - main_prog.random_seed = SEED - startup_prog.random_seed = SEED + main_prog.random_seed = self.SEED + startup_prog.random_seed = self.SEED - with fluid.scope_guard(scope): + with paddle.static.scope_guard(scope): with paddle.static.program_guard(main_prog, startup_prog): x = paddle.static.data( name=self.feed_list[0], shape=self.feed_shape[0], - dtype=self.feed_dtype[0]) + dtype='float32') if self.is_training: ch = self.feed_shape[0][1] @@ -78,62 +78,68 @@ class TestBase(IPUOpTest): bias = paddle.ParamAttr(trainable=True) out = paddle.fluid.layers.nn.group_norm( conv1, param_attr=scale, bias_attr=bias, **self.attrs) + loss = paddle.mean(out) + adam = paddle.optimizer.Adam(learning_rate=1e-2) + adam.minimize(loss) else: - scale = True - bias = True out = paddle.fluid.layers.nn.group_norm( - x, param_attr=scale, bias_attr=bias, **self.attrs) + x, param_attr=True, bias_attr=True, **self.attrs) if self.is_training: - loss = paddle.mean(out) - adam = paddle.optimizer.Adam(learning_rate=1e-2) - adam.minimize(loss) fetch_list = [loss.name] else: fetch_list = [out.name] - if run_ipu: - place = paddle.IPUPlace() - else: + if exec_mode == ExecutionMode.CPU_FP32: place = paddle.CPUPlace() + else: + place = paddle.IPUPlace() + exe = paddle.static.Executor(place) exe.run(startup_prog) - if run_ipu: + if exec_mode != ExecutionMode.CPU_FP32: feed_list = self.feed_list ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.SetGraphConfig(is_training=self.is_training) - program = compiler.IPUCompiledProgram( + ipu_strategy.set_graph_config(is_training=self.is_training) + if exec_mode == ExecutionMode.IPU_POPART_FP16: + ipu_strategy.set_precision_config(enable_fp16=True) + program = paddle.static.IpuCompiledProgram( main_prog, ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) else: program = main_prog + feed = self.feed_fp32 + if exec_mode > ExecutionMode.IPU_FP32: + feed = self.feed_fp16 + if self.is_training: result = [] for _ in range(self.epoch): loss_res = exe.run(program, - feed=self.feed, + feed=feed, fetch_list=fetch_list) result.append(loss_res[0]) return np.array(result) else: - result = exe.run(program, feed=self.feed, fetch_list=fetch_list) + result = exe.run(program, feed=feed, fetch_list=fetch_list) return result[0] def test_base(self): - res0 = self._test_base(False) - res1 = self._test_base(True) + output_dict = {} + for mode in ExecutionMode: + if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: + break + if mode > ExecutionMode.IPU_FP32 and self.is_training: + break + output_dict[mode] = self._test_base(mode).flatten() - self.assertTrue( - np.allclose( - res0.flatten(), res1.flatten(), atol=self.atol)) - - self.assertTrue(res0.shape == res1.shape) + self.check(output_dict) class TestCase1(TestBase): - def set_attrs(self): + def set_op_attrs(self): self.attrs = { "groups": 4, "epsilon": 1e-05, @@ -147,11 +153,15 @@ class TestTrainCase1(TestBase): self.epoch = 10 +@unittest.skipIf(IPUOpTest.use_ipumodel(), "skip for ipumodel") class TestTrainCase2(TestBase): def set_atol(self): - self.atol = 1e-3 + self.atol = 7e-4 + self.rtol = 1e-6 + self.atol_fp16 = 4e-3 + self.rtol_fp16 = 1e-3 - def set_attrs(self): + def set_op_attrs(self): self.attrs = { "groups": 4, "epsilon": 1e-05, @@ -163,7 +173,5 @@ class TestTrainCase2(TestBase): self.epoch = 10 -# not support `group_norm(x, param_attr=False, bias_attr=False, **self.attrs)` - if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_ipu_inference_model_io.py b/python/paddle/fluid/tests/unittests/ipu/test_inference_model_io_ipu.py similarity index 78% rename from python/paddle/fluid/tests/unittests/ipu/test_ipu_inference_model_io.py rename to python/paddle/fluid/tests/unittests/ipu/test_inference_model_io_ipu.py index 0a331d8045..33a63a80e3 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_ipu_inference_model_io.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_inference_model_io_ipu.py @@ -12,59 +12,59 @@ # See the License for the specific language governing permissions and # limitations under the License. +import tempfile import unittest -import shutil import numpy as np import paddle -import paddle.fluid as fluid -import paddle.fluid.compiler as compiler -import paddle.optimizer import paddle.static from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest -paddle.enable_static() - @unittest.skipIf(not paddle.is_compiled_with_ipu(), "core is not compiled with IPU") class TestBase(IPUOpTest): def setUp(self): self.set_atol() - self.set_feed() - self.set_attrs() - - def set_feed(self): - self.feed_shape = [] - self.feed_shape.append([1, 3, 10, 10]) - - self.feed = {} - self.feed["in_0"] = np.random.uniform( - size=self.feed_shape[0]).astype(np.float32) - + self.set_data_feed() + self.set_feed_attr() + self.set_op_attrs() + + def set_atol(self): + self.atol = 1e-6 + self.rtol = 1e-5 + self.atol_fp16 = 1e-2 + self.rtol_fp16 = 1e-3 + + def set_data_feed(self): + data = np.random.uniform(size=[1, 3, 10, 10]) + self.feed = {"in_0": data.astype(np.float32)} + + def set_feed_attr(self): + self.feed_shape = [x.shape for x in self.feed.values()] self.feed_list = list(self.feed.keys()) - def set_attrs(self): + def set_op_attrs(self): self.attrs = {} self.attrs['steps'] = 100 self.attrs['save_at_step'] = 20 self.attrs['is_training'] = True self.attrs['opt_type'] = 'sgd' - self.attrs['path'] = 'model' + self.attrs['path'] = tempfile.TemporaryDirectory() self.attrs['model_name'] = 'test' def _test_save(self): - scope = fluid.core.Scope() + scope = paddle.static.Scope() main_prog = paddle.static.Program() startup_prog = paddle.static.Program() main_prog.random_seed = self.SEED startup_prog.random_seed = self.SEED - generator = fluid.unique_name.UniqueNameGenerator() + generator = paddle.fluid.unique_name.UniqueNameGenerator() self.full_name = '/'.join( - [self.attrs['path'], self.attrs['model_name']]) + [self.attrs['path'].name, self.attrs['model_name']]) - with fluid.unique_name.guard(generator): - with fluid.scope_guard(scope): + with paddle.fluid.unique_name.guard(generator): + with paddle.static.scope_guard(scope): with paddle.static.program_guard(main_prog, startup_prog): x = paddle.static.data( name=self.feed_list[0], @@ -88,16 +88,16 @@ class TestBase(IPUOpTest): elif self.attrs['opt_type'] == 'lamb': lamb = paddle.optimizer.Lamb(learning_rate=1e-2) lamb.minimize(loss) - fetch_list = [loss.name] + fetch_list = [loss.name] place = paddle.IPUPlace() exe = paddle.static.Executor(place) exe.run(startup_prog) ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.SetGraphConfig( + ipu_strategy.set_graph_config( is_training=self.attrs['is_training']) - program = compiler.IPUCompiledProgram( + program = paddle.static.IpuCompiledProgram( main_prog, ipu_strategy=ipu_strategy).compile( self.feed_list, fetch_list) @@ -125,8 +125,8 @@ class TestBase(IPUOpTest): feed_list = feed_target_names fetch_list = [fetch_targets[0].name] ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.SetGraphConfig(is_training=False) - program = compiler.IPUCompiledProgram( + ipu_strategy.set_graph_config(is_training=False) + program = paddle.static.IpuCompiledProgram( inference_program, ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) else: @@ -134,7 +134,7 @@ class TestBase(IPUOpTest): tmp = exe.run(program, feed=self.feed, fetch_list=[fetch_targets]) - return tmp + return np.array(tmp) def test_base(self): self._test_save() @@ -142,27 +142,26 @@ class TestBase(IPUOpTest): ipu_res = self._test_load(True) self.assertTrue(np.allclose(cpu_res, ipu_res, atol=self.atol)) - - shutil.rmtree(self.attrs['path'], True) + self.attrs['path'].cleanup() class TestAdam(TestBase): - def set_attrs(self): + def set_op_attrs(self): self.attrs = {} self.attrs['steps'] = 100 self.attrs['is_training'] = True self.attrs['opt_type'] = 'adam' - self.attrs['path'] = 'model' + self.attrs['path'] = tempfile.TemporaryDirectory() self.attrs['model_name'] = 'test' class TestLamb(TestBase): - def set_attrs(self): + def set_op_attrs(self): self.attrs = {} self.attrs['steps'] = 100 self.attrs['is_training'] = True self.attrs['opt_type'] = 'lamb' - self.attrs['path'] = 'model' + self.attrs['path'] = tempfile.TemporaryDirectory() self.attrs['model_name'] = 'test' diff --git a/python/paddle/fluid/tests/unittests/ipu/test_instancenorm_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_instancenorm_op_ipu.py index ee9cd875cf..ed8f3950ac 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_instancenorm_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_instancenorm_op_ipu.py @@ -16,14 +16,8 @@ import unittest import numpy as np import paddle -import paddle.fluid as fluid -import paddle.fluid.compiler as compiler -import paddle.optimizer import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, - np_dtype_to_fluid_str) - -paddle.enable_static() +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -32,39 +26,45 @@ class TestBase(IPUOpTest): def setUp(self): self.set_atol() self.set_training() - self.set_feed() + self.set_data_feed() self.set_feed_attr() - self.set_attrs() + self.set_op_attrs() + + @property + def fp16_enabled(self): + return True + + def set_atol(self): + self.atol = 1e-6 + self.rtol = 1e-5 + self.atol_fp16 = 1e-2 + self.rtol_fp16 = 1e-3 - def set_feed(self): - self.feed = { - "x": np.random.uniform(size=[1, 3, 10, 10]).astype('float32'), - } + def set_data_feed(self): + x = np.random.uniform(size=[1, 3, 10, 10]) + self.feed_fp32 = {"x": x.astype(np.float32)} + self.feed_fp16 = {"x": x.astype(np.float16)} def set_feed_attr(self): - self.feed_shape = [x.shape for x in self.feed.values()] - self.feed_list = list(self.feed.keys()) - self.feed_dtype = [ - np_dtype_to_fluid_str(x.dtype) for x in self.feed.values() - ] + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) - def set_attrs(self): + def set_op_attrs(self): self.attrs = {"epsilon": 1e-05} - def _test_base(self, run_ipu=True): - scope = fluid.core.Scope() + def _test_base(self, exec_mode): + scope = paddle.static.Scope() main_prog = paddle.static.Program() startup_prog = paddle.static.Program() - SEED = self.SEED - main_prog.random_seed = SEED - startup_prog.random_seed = SEED + main_prog.random_seed = self.SEED + startup_prog.random_seed = self.SEED - with fluid.scope_guard(scope): + with paddle.static.scope_guard(scope): with paddle.static.program_guard(main_prog, startup_prog): x = paddle.static.data( name=self.feed_list[0], shape=self.feed_shape[0], - dtype=self.feed_dtype[0]) + dtype='float32') if self.is_training: ch = self.feed_shape[0][1] @@ -74,58 +74,64 @@ class TestBase(IPUOpTest): bias = paddle.ParamAttr(trainable=True) out = paddle.fluid.layers.nn.instance_norm( conv1, param_attr=scale, bias_attr=bias, **self.attrs) + loss = paddle.mean(out) + adam = paddle.optimizer.Adam(learning_rate=1e-2) + adam.minimize(loss) else: - scale = True - bias = True out = paddle.fluid.layers.nn.instance_norm( - x, param_attr=scale, bias_attr=bias, **self.attrs) + x, param_attr=True, bias_attr=True, **self.attrs) if self.is_training: - loss = paddle.mean(out) - adam = paddle.optimizer.Adam(learning_rate=1e-2) - adam.minimize(loss) fetch_list = [loss.name] else: fetch_list = [out.name] - if run_ipu: - place = paddle.IPUPlace() - else: + if exec_mode == ExecutionMode.CPU_FP32: place = paddle.CPUPlace() + else: + place = paddle.IPUPlace() + exe = paddle.static.Executor(place) exe.run(startup_prog) - if run_ipu: + if exec_mode != ExecutionMode.CPU_FP32: feed_list = self.feed_list ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.SetGraphConfig(is_training=self.is_training) - program = compiler.IPUCompiledProgram( + ipu_strategy.set_graph_config(is_training=self.is_training) + if exec_mode == ExecutionMode.IPU_POPART_FP16: + ipu_strategy.set_precision_config(enable_fp16=True) + program = paddle.static.IpuCompiledProgram( main_prog, ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) else: program = main_prog + feed = self.feed_fp32 + if exec_mode > ExecutionMode.IPU_FP32: + feed = self.feed_fp16 + if self.is_training: result = [] for _ in range(self.epoch): loss_res = exe.run(program, - feed=self.feed, + feed=feed, fetch_list=fetch_list) result.append(loss_res) return np.array(result) else: - result = exe.run(program, feed=self.feed, fetch_list=fetch_list) + result = exe.run(program, feed=feed, fetch_list=fetch_list) return result[0] - def test_base(self): - res0 = self._test_base(False) - res1 = self._test_base(True) + def test(self): + output_dict = {} + for mode in ExecutionMode: + if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: + break + if mode > ExecutionMode.IPU_FP32 and self.is_training: + break + output_dict[mode] = self._test_base(mode).flatten() - self.assertTrue( - np.allclose( - res0.flatten(), res1.flatten(), atol=self.atol)) - - self.assertTrue(res0.shape == res1.shape) + self.check(output_dict) class TestTrainCase1(TestBase): @@ -134,7 +140,5 @@ class TestTrainCase1(TestBase): self.epoch = 10 -# not support `instance_norm(x, param_attr=False, bias_attr=False, **self.attrs)` - if __name__ == "__main__": unittest.main() -- GitLab