From 001dab0b9a409ad5eb6521fb075e99fd3b7d6137 Mon Sep 17 00:00:00 2001 From: Allen Guo Date: Fri, 6 May 2022 10:31:12 +0800 Subject: [PATCH] update UTs 2 (#42518) --- .../tests/unittests/ipu/test_mean_op_ipu.py | 75 ++---- .../ipu/test_mixed_precision_inference_ipu.py | 204 +++++++++------- .../ipu/test_mixed_precision_training_ipu.py | 226 +++++++++++------- .../tests/unittests/ipu/test_mul_op_ipu.py | 81 ++----- .../unittests/ipu/test_not_equal_op_ipu.py | 130 ++++++++++ .../unittests/ipu/test_one_hot_op_ipu.py | 82 ++----- .../unittests/ipu/test_one_hot_v2_op_ipu.py | 82 ++----- .../tests/unittests/ipu/test_optimizer_ipu.py | 2 - .../unittests/ipu/test_pool_avg_op_ipu.py | 91 +++---- .../unittests/ipu/test_pool_max_op_ipu.py | 91 +++---- .../tests/unittests/ipu/test_pow_op_ipu.py | 131 ++-------- .../tests/unittests/ipu/test_print_op_ipu.py | 100 +++----- .../unittests/ipu/test_reduce_x_op_ipu.py | 71 ++---- .../ipu/test_reshape_inplace_op_ipu.py | 77 ++---- .../unittests/ipu/test_reshape_op_ipu.py | 75 ++---- .../tests/unittests/ipu/test_scale_op_ipu.py | 127 ++-------- .../ipu/test_scaled_optimizer_state_ipu.py | 131 ++++++++++ .../unittests/ipu/test_set_batch_size_ipu.py | 89 ++----- .../tests/unittests/ipu/test_slice_op_ipu.py | 134 +++-------- 19 files changed, 845 insertions(+), 1154 deletions(-) create mode 100644 python/paddle/fluid/tests/unittests/ipu/test_not_equal_op_ipu.py create mode 100644 python/paddle/fluid/tests/unittests/ipu/test_scaled_optimizer_state_ipu.py diff --git a/python/paddle/fluid/tests/unittests/ipu/test_mean_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_mean_op_ipu.py index b9dd7358b7..c0d7dd1fd1 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_mean_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_mean_op_ipu.py @@ -17,7 +17,7 @@ import unittest import numpy as np import paddle import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -30,10 +30,6 @@ class TestBase(IPUOpTest): self.set_feed_attr() self.set_op_attrs() - @property - def fp16_enabled(self): - return True - def set_data_feed(self): data = np.random.uniform(size=[1, 3, 10, 10]) self.feed_fp32 = {"in_0": data.astype(np.float32)} @@ -46,59 +42,22 @@ class TestBase(IPUOpTest): def set_op_attrs(self): self.attrs = {} - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - - out = paddle.fluid.layers.mean(x) - - fetch_list = [out.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() - - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed_fp32 - if exec_mode > ExecutionMode.IPU_FP32: - feed = self.feed_fp16 - - result = exe.run(program, feed=feed, fetch_list=fetch_list) - return result[0] - - def test_base(self): - output_dict = {} - for mode in ExecutionMode: - if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: - break - output_dict[mode] = self._test_base(mode).flatten() - - self.check(output_dict) + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + out = paddle.fluid.layers.mean(x) + self.fetch_list = [out.name] + + def run_model(self, exec_mode): + self.run_op_test(exec_mode) + + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ipu/test_mixed_precision_inference_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_mixed_precision_inference_ipu.py index a70550c1df..9bdf233556 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_mixed_precision_inference_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_mixed_precision_inference_ipu.py @@ -18,7 +18,7 @@ import numpy as np import paddle import paddle.static import paddle.nn.functional as F -from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionModeFull +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -28,10 +28,7 @@ class TestBase(IPUOpTest): self.set_atol() self.set_data_feed() self.set_feed_attr() - - @property - def fp16_enabled(self): - return True + self.set_attrs() def set_atol(self): self.atol = 1e-6 @@ -42,7 +39,6 @@ class TestBase(IPUOpTest): def set_data_feed(self): data = np.random.uniform(size=[1, 10, 27, 27]) self.feed_fp32 = {"in_0": data.astype(np.float32)} - self.feed_fp16 = {"in_0": data.astype(np.float16)} def set_feed_attr(self): self.feed_shape = [x.shape for x in self.feed_fp32.values()] @@ -54,86 +50,126 @@ class TestBase(IPUOpTest): for var_name in to_fp16_var_names: assert (block.var(var_name).dtype, paddle.float16) - def _test_base(self, exec_mode): - generator = paddle.fluid.unique_name.UniqueNameGenerator() - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.fluid.unique_name.guard(generator): - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - - # using fp32 - x = paddle.static.nn.conv2d( - input=x, num_filters=3, filter_size=3) - x = paddle.static.nn.batch_norm(x, act='relu') - x = F.max_pool2d(x, kernel_size=2, stride=2) - - # using fp16 - with paddle.static.amp.fp16_guard(): - x = paddle.static.nn.conv2d( - input=x, num_filters=6, filter_size=3) - x = paddle.static.nn.batch_norm(x, act='relu') - x = F.max_pool2d(x, kernel_size=2, stride=2) - - # using fp32 - x = paddle.static.nn.fc(x, size=10) - loss = paddle.mean(x) - fetch_list = [loss.name] - - if exec_mode == ExecutionModeFull.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() - - # cast model to fp16 - if exec_mode == ExecutionModeFull.IPU_MIXED_PRECISION: - to_fp16_var_names = paddle.static.amp.cast_model_to_fp16( - main_prog, self.amp_list) - self.dtype_check(main_prog, to_fp16_var_names) - - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - # cast parameters to fp16 - if exec_mode == ExecutionModeFull.IPU_MIXED_PRECISION: - paddle.static.amp.cast_parameters_to_fp16( - paddle.CPUPlace(), - main_prog, - to_fp16_var_names=to_fp16_var_names) - - if exec_mode != ExecutionModeFull.CPU_FP32: - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=False) - if exec_mode == ExecutionModeFull.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, ipu_strategy=ipu_strategy).compile( - self.feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed_fp32 - result = exe.run(program, feed=feed, fetch_list=fetch_list) - return result[0] + def set_attrs(self): + self.num_ipus = 1 + self.enable_pipelining = False + self.enable_manual_shard = False + self.batches_per_step = 1 + + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + + # using fp32 + x = paddle.static.nn.conv2d(input=x, num_filters=3, filter_size=3) + x = paddle.static.nn.batch_norm(x, act='relu') + x = F.max_pool2d(x, kernel_size=2, stride=2) + + # using fp16 + with paddle.static.amp.fp16_guard(): + x = paddle.static.nn.conv2d(input=x, num_filters=6, filter_size=3) + x = paddle.static.nn.batch_norm(x, act='relu') + x = F.max_pool2d(x, kernel_size=2, stride=2) + + # using fp32 + x = paddle.static.nn.fc(x, size=10) + loss = paddle.mean(x) + self.fetch_list = [loss.name] + + def run_model(self, exec_mode): + # cast model to fp16 + if self.is_fp16_mode(exec_mode): + amp_list = paddle.static.amp.CustomOpLists() + amp_list.unsupported_list = {} + to_fp16_var_names = paddle.static.amp.cast_model_to_fp16( + self.main_prog, amp_list, use_fp16_guard=True) + self.dtype_check(self.main_prog, to_fp16_var_names) + + if self.is_ipu_mode(exec_mode): + place = paddle.CPUPlace() + else: + place = paddle.IPUPlace() + exe = paddle.static.Executor(place) + exe.run(self.startup_prog) + + # cast parameters to fp16 + if exec_mode == IPUOpTest.ExecutionMode.IPU_FP16: + paddle.static.amp.cast_parameters_to_fp16( + paddle.CPUPlace(), + self.main_prog, + to_fp16_var_names=to_fp16_var_names) + + if self.is_ipu_mode(exec_mode): + ipu_strategy = paddle.static.IpuStrategy() + ipu_strategy.set_graph_config( + is_training=False, + num_ipus=self.num_ipus, + enable_manual_shard=self.enable_manual_shard) + ipu_strategy.set_pipelining_config( + enable_pipelining=self.enable_pipelining, + batches_per_step=self.batches_per_step) + program = paddle.static.IpuCompiledProgram( + self.main_prog, ipu_strategy=ipu_strategy).compile( + self.feed_list, self.fetch_list) + else: + program = self.main_prog + + result = exe.run(program, + feed=self.feed_fp32, + fetch_list=self.fetch_list) + self.output_dict[exec_mode] = result[0] + + def test(self): + for m in IPUOpTest.ExecutionMode: + self.build_model() + self.run_model(m) + self.check() + + +class TestPipline(TestBase): + @IPUOpTest.static_graph + def build_model(self, exec_mode): + feed_shape = list(self.feed_shape[0]) + if self.is_ipu_mode(exec_mode): + feed_shape[0] = 1 + x = paddle.static.data( + name=self.feed_list[0], shape=feed_shape, dtype='float32') + with paddle.static.ipu_shard_guard(index=0, stage=0): + # using fp32 + x = paddle.static.nn.conv2d(input=x, num_filters=3, filter_size=3) + x = paddle.static.nn.batch_norm(x, act='relu') + x = F.max_pool2d(x, kernel_size=2, stride=2) + + with paddle.static.ipu_shard_guard(index=1, stage=1): + # using fp16 + with paddle.static.amp.fp16_guard(): + x = paddle.static.nn.conv2d( + input=x, num_filters=6, filter_size=3) + x = paddle.static.nn.batch_norm(x, act='relu') + x = F.max_pool2d(x, kernel_size=2, stride=2) + + with paddle.static.ipu_shard_guard(index=2, stage=2): + # using fp32 + x = paddle.static.nn.fc(x, size=10) + loss = paddle.mean(x) + self.fetch_list = [loss.name] + + def set_data_feed(self): + data = np.random.uniform(size=[3, 10, 27, 27]) + self.feed_fp32 = {"in_0": data.astype(np.float32)} + + def set_attrs(self): + self.num_ipus = 3 + self.enable_pipelining = True + self.enable_manual_shard = True + self.batches_per_step = 3 def test(self): - output_dict = {} - for mode in ExecutionModeFull: - if mode == ExecutionModeFull.IPU_POPART_FP16: - continue - if mode > ExecutionModeFull.IPU_FP32 and not self.fp16_enabled: - break - output_dict[mode] = self._test_base(mode).flatten() - - self.check(output_dict) + for m in IPUOpTest.ExecutionMode: + self.build_model(m) + self.run_model(m) + # skip check results if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ipu/test_mixed_precision_training_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_mixed_precision_training_ipu.py index 224c0bddc2..c4ac9cddd7 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_mixed_precision_training_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_mixed_precision_training_ipu.py @@ -18,7 +18,7 @@ import numpy as np import paddle import paddle.static import paddle.nn.functional as F -from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionModeFull +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -29,10 +29,7 @@ class TestBase(IPUOpTest): self.set_training() self.set_data_feed() self.set_feed_attr() - - @property - def fp16_enabled(self): - return True + self.set_attrs() def set_atol(self): self.atol = 2e-6 @@ -47,104 +44,149 @@ class TestBase(IPUOpTest): def set_data_feed(self): data = np.random.uniform(size=[1, 3, 28, 28]) self.feed_fp32 = {"in_0": data.astype(np.float32)} - self.feed_fp16 = {"in_0": data.astype(np.float16)} def set_feed_attr(self): self.feed_shape = [x.shape for x in self.feed_fp32.values()] self.feed_list = list(self.feed_fp32.keys()) + def set_attrs(self): + self.num_ipus = 1 + self.enable_pipelining = False + self.enable_manual_shard = False + self.batches_per_step = 1 + def dtype_check(self, program, to_fp16_var_names): block = program.global_block() assert len(to_fp16_var_names) > 0 for var_name in to_fp16_var_names: assert (block.var(var_name).dtype, paddle.float16) - def _test_base(self, exec_mode): - generator = paddle.fluid.unique_name.UniqueNameGenerator() - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.fluid.unique_name.guard(generator): - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - - # using fp32 - x = paddle.static.nn.conv2d( - input=x, num_filters=3, filter_size=3) - x = paddle.static.nn.batch_norm(x, act='relu') - x = F.max_pool2d(x, kernel_size=2, stride=2) - - # using fp16 - with paddle.static.amp.fp16_guard(): - x = paddle.static.nn.conv2d( - input=x, num_filters=6, filter_size=3) - x = paddle.static.nn.batch_norm(x, act='relu') - x = F.max_pool2d(x, kernel_size=2, stride=2) - - # using fp32 - x = paddle.static.nn.fc(x, size=10) - loss = paddle.mean(x) - - # optimizer - optimizer = paddle.optimizer.Adam(learning_rate=1e-2) - optimizer.minimize(loss, startup_prog) - fetch_list = [loss.name] - - # cast model to fp16 - if exec_mode == ExecutionModeFull.IPU_MIXED_PRECISION: - to_fp16_var_names = paddle.static.amp.cast_model_to_fp16( - main_prog, self.amp_list) - self.dtype_check(main_prog, to_fp16_var_names) - - if exec_mode == ExecutionModeFull.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - # cast parameters to fp16 - if exec_mode == ExecutionModeFull.IPU_MIXED_PRECISION: - paddle.static.amp.cast_parameters_to_fp16( - paddle.CPUPlace(), - main_prog, - to_fp16_var_names=to_fp16_var_names) - - if exec_mode != ExecutionModeFull.CPU_FP32: - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - if exec_mode == ExecutionModeFull.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, ipu_strategy=ipu_strategy).compile( - self.feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed_fp32 - result = [] - for i in range(self.epoch): - out = exe.run(program, feed=feed, fetch_list=fetch_list) - result.append(out) - return np.array(result) - - def test_base(self): - output_dict = {} - for mode in ExecutionModeFull: - if mode == ExecutionModeFull.IPU_POPART_FP16: - continue - if mode > ExecutionModeFull.IPU_FP32 and not self.fp16_enabled: - break - output_dict[mode] = self._test_base(mode).flatten() - - self.check(output_dict) + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + + # using fp32 + x = paddle.static.nn.conv2d(input=x, num_filters=3, filter_size=3) + x = paddle.static.nn.batch_norm(x, act='relu') + x = F.max_pool2d(x, kernel_size=2, stride=2) + + # using fp16 + with paddle.static.amp.fp16_guard(): + x = paddle.static.nn.conv2d(input=x, num_filters=6, filter_size=3) + x = paddle.static.nn.batch_norm(x, act='relu') + x = F.max_pool2d(x, kernel_size=2, stride=2) + + # using fp32 + x = paddle.static.nn.fc(x, size=10) + loss = paddle.mean(x) + + # optimizer + optimizer = paddle.optimizer.Adam(learning_rate=1e-2) + optimizer.minimize(loss, self.startup_prog) + self.fetch_list = [loss.name] + + def run_model(self, exec_mode): + # cast model to fp16 + if self.is_fp16_mode(exec_mode): + amp_list = paddle.static.amp.CustomOpLists() + amp_list.unsupported_list = {} + to_fp16_var_names = paddle.static.amp.cast_model_to_fp16( + self.main_prog, amp_list) + self.dtype_check(self.main_prog, to_fp16_var_names) + + if self.is_ipu_mode(exec_mode): + place = paddle.CPUPlace() + else: + place = paddle.IPUPlace() + exe = paddle.static.Executor(place) + exe.run(self.startup_prog) + + # cast parameters to fp16 + if self.is_fp16_mode(exec_mode): + paddle.static.amp.cast_parameters_to_fp16( + paddle.CPUPlace(), + self.main_prog, + to_fp16_var_names=to_fp16_var_names) + + if self.is_ipu_mode(exec_mode): + ipu_strategy = paddle.static.IpuStrategy() + ipu_strategy.set_graph_config( + is_training=self.is_training, + num_ipus=self.num_ipus, + enable_manual_shard=self.enable_manual_shard) + ipu_strategy.set_pipelining_config( + enable_pipelining=self.enable_pipelining, + batches_per_step=self.batches_per_step) + program = paddle.static.IpuCompiledProgram( + self.main_prog, ipu_strategy=ipu_strategy).compile( + self.feed_list, self.fetch_list) + else: + program = self.main_prog + + result = [] + for _ in range(self.epoch): + out = exe.run(program, + feed=self.feed_fp32, + fetch_list=self.fetch_list) + result.append(out) + self.output_dict[exec_mode] = result + + def test(self): + for m in IPUOpTest.ExecutionMode: + self.build_model() + self.run_model(m) + self.check() + + +class TestPipline(TestBase): + @IPUOpTest.static_graph + def build_model(self, exec_mode): + feed_shape = list(self.feed_shape[0]) + if self.is_ipu_mode(exec_mode): + feed_shape[0] = 1 + x = paddle.static.data( + name=self.feed_list[0], shape=feed_shape, dtype='float32') + + with paddle.static.ipu_shard_guard(index=0, stage=0): + # using fp32 + x = paddle.static.nn.conv2d(input=x, num_filters=3, filter_size=3) + x = paddle.static.nn.batch_norm(x, act='relu') + x = F.max_pool2d(x, kernel_size=2, stride=2) + + with paddle.static.ipu_shard_guard(index=1, stage=1): + # using fp16 + with paddle.static.amp.fp16_guard(): + x = paddle.static.nn.conv2d( + input=x, num_filters=6, filter_size=3) + x = paddle.static.nn.batch_norm(x, act='relu') + x = F.max_pool2d(x, kernel_size=2, stride=2) + + with paddle.static.ipu_shard_guard(index=2, stage=2): + # using fp32 + x = paddle.static.nn.fc(x, size=10) + loss = paddle.mean(x) + + # optimizer + optimizer = paddle.optimizer.Adam(learning_rate=1e-2) + optimizer.minimize(loss, self.startup_prog) + self.fetch_list = [loss.name] + + def set_data_feed(self): + data = np.random.uniform(size=[5, 10, 27, 27]) + self.feed_fp32 = {"in_0": data.astype(np.float32)} + + def set_attrs(self): + self.num_ipus = 3 + self.enable_pipelining = True + self.enable_manual_shard = True + self.batches_per_step = 5 + + def test(self): + for m in IPUOpTest.ExecutionMode: + self.build_model(m) + self.run_model(m) + # skip check results if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ipu/test_mul_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_mul_op_ipu.py index 7a9135626d..583a8941ac 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_mul_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_mul_op_ipu.py @@ -17,7 +17,7 @@ import unittest import numpy as np import paddle import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -30,10 +30,6 @@ class TestBase(IPUOpTest): self.set_feed_attr() self.set_op_attrs() - @property - def fp16_enabled(self): - return True - def set_data_feed(self): x = np.random.uniform(size=[2, 5]) y = np.random.uniform(size=[5, 3]) @@ -51,63 +47,24 @@ class TestBase(IPUOpTest): "y_num_col_dims": 1, } - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - y = paddle.static.data( - name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='float32') - - out = paddle.fluid.layers.mul(x, y, **self.attrs) - - fetch_list = [out.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() - - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed_fp32 - if exec_mode > ExecutionMode.IPU_FP32: - feed = self.feed_fp16 - - result = exe.run(program, feed=feed, fetch_list=fetch_list) - return result[0] - - def test_base(self): - output_dict = {} - for mode in ExecutionMode: - if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: - break - output_dict[mode] = self._test_base(mode).flatten() - - self.check(output_dict) + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + y = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32') + out = paddle.fluid.layers.mul(x, y, **self.attrs) + self.fetch_list = [out.name] + + def run_model(self, exec_mode): + self.run_op_test(exec_mode) + + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() class TestCase1(TestBase): diff --git a/python/paddle/fluid/tests/unittests/ipu/test_not_equal_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_not_equal_op_ipu.py new file mode 100644 index 0000000000..a4365c021f --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_not_equal_op_ipu.py @@ -0,0 +1,130 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import paddle +import paddle.static +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestBase(IPUOpTest): + def setUp(self): + self.set_atol() + self.set_training() + self.set_data_feed() + self.set_feed_attr() + self.set_op_attrs() + + def set_data_feed(self): + x = np.ones([1, 10]) + y = np.zeros([1, 10]) + self.feed_fp32 = { + "x": x.astype(np.float32), + "y": y.astype(np.float32), + } + self.feed_fp16 = { + "x": x.astype(np.float16), + "y": y.astype(np.float16), + } + + def set_feed_attr(self): + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) + + def set_op_attrs(self): + self.attrs = {} + + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + y = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32') + out = paddle.fluid.layers.not_equal(x, y, **self.attrs) + self.fetch_list = [out.name] + + def run_model(self, exec_mode): + self.run_op_test(exec_mode) + + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() + + +class TestCase1(TestBase): + def set_data_feed(self): + x = np.ones([1, 10]) + y = np.ones([1, 10]) + self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)} + self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)} + + +class TestCase2(TestBase): + def set_data_feed(self): + x = np.ones([1, 10]) + y = np.arange(0, 10).reshape([1, 10]) + self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)} + self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)} + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestScalar(IPUOpTest): + def setUp(self): + self.set_atol() + self.set_training() + self.set_data_feed() + self.set_feed_attr() + self.set_op_attrs() + + def set_data_feed(self): + x = np.ones([1, 10]) + y = 0.5 + self.feed_fp32 = {"x": x.astype(np.float32), } + self.feed_fp16 = {"x": x.astype(np.float16), } + + def set_feed_attr(self): + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) + + def set_op_attrs(self): + self.attrs = {} + + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + out = (x != 0.5) + self.fetch_list = [out.name] + + def run_model(self, exec_mode): + self.run_op_test(exec_mode) + + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_one_hot_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_one_hot_op_ipu.py index 33a5dc888c..938654bfaf 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_one_hot_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_one_hot_op_ipu.py @@ -17,7 +17,7 @@ import unittest import numpy as np import paddle import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -30,74 +30,34 @@ class TestBase(IPUOpTest): self.set_feed_attr() self.set_op_attrs() - @property - def fp16_enabled(self): - return True - def set_data_feed(self): data1 = np.array([[1], [1], [3], [0]]) - - self.feed = {'x': data1.astype(np.int32)} + self.feed_fp32 = {'x': data1.astype(np.int32)} + self.feed_fp16 = {'x': data1.astype(np.int32)} def set_feed_attr(self): - self.feed_shape = [x.shape for x in self.feed.values()] - self.feed_list = list(self.feed.keys()) + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) def set_op_attrs(self): self.attrs = {"depth": 4, "allow_out_of_range": False} - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='int32') - - out = paddle.fluid.layers.one_hot(x, **self.attrs) - - fetch_list = [out.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() - - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed - - result = exe.run(program, feed=feed, fetch_list=fetch_list) - - return result[0] - - def test_base(self): - output_dict = {} - for mode in ExecutionMode: - if (mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled): - break - output_dict[mode] = self._test_base(mode).flatten() - - self.check(output_dict) + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='int32') + out = paddle.fluid.layers.one_hot(x, **self.attrs) + self.fetch_list = [out.name] + + def run_model(self, exec_mode): + self.run_op_test(exec_mode) + + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() @unittest.skip('does not support allow_out_of_range=True') diff --git a/python/paddle/fluid/tests/unittests/ipu/test_one_hot_v2_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_one_hot_v2_op_ipu.py index 79fc9b04e1..ec25f37886 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_one_hot_v2_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_one_hot_v2_op_ipu.py @@ -17,7 +17,7 @@ import unittest import numpy as np import paddle import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -30,74 +30,34 @@ class TestBase(IPUOpTest): self.set_feed_attr() self.set_op_attrs() - @property - def fp16_enabled(self): - return True - def set_data_feed(self): data1 = np.array([[1], [1], [3], [0]]) - - self.feed = {'x': data1.astype(np.int32)} + self.feed_fp32 = {'x': data1.astype(np.int32)} + self.feed_fp16 = {'x': data1.astype(np.int32)} def set_feed_attr(self): - self.feed_shape = [x.shape for x in self.feed.values()] - self.feed_list = list(self.feed.keys()) + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) def set_op_attrs(self): self.attrs = {"depth": 4, "allow_out_of_range": False} - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='int32') - - out = paddle.fluid.input.one_hot(x, **self.attrs) - - fetch_list = [out.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() - - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed - - result = exe.run(program, feed=feed, fetch_list=fetch_list) - - return result[0] - - def test_base(self): - output_dict = {} - for mode in ExecutionMode: - if (mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled): - break - output_dict[mode] = self._test_base(mode).flatten() - - self.check(output_dict) + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='int32') + out = paddle.fluid.input.one_hot(x, **self.attrs) + self.fetch_list = [out.name] + + def run_model(self, exec_mode): + self.run_op_test(exec_mode) + + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() @unittest.skip('does not support allow_out_of_range=True') diff --git a/python/paddle/fluid/tests/unittests/ipu/test_optimizer_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_optimizer_ipu.py index 43f54b52b5..060a69e831 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_optimizer_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_optimizer_ipu.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import numpy as np import unittest import paddle diff --git a/python/paddle/fluid/tests/unittests/ipu/test_pool_avg_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_pool_avg_op_ipu.py index 4288b82832..e5df11eb4e 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_pool_avg_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_pool_avg_op_ipu.py @@ -17,7 +17,7 @@ import unittest import numpy as np import paddle import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -30,10 +30,6 @@ class TestBase(IPUOpTest): self.set_feed_attr() self.set_op_attrs() - @property - def fp16_enabled(self): - return True - def set_data_feed(self): data = np.random.uniform(size=[1, 3, 10, 10]) self.feed_fp32 = {'in_0': data.astype(np.float32)} @@ -56,59 +52,22 @@ class TestBase(IPUOpTest): "data_format": 'NCHW', } - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - - out = paddle.fluid.layers.pool2d(x, **self.attrs) - - fetch_list = [out.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() - - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed_fp32 - if exec_mode > ExecutionMode.IPU_FP32: - feed = self.feed_fp16 - - result = exe.run(program, feed=feed, fetch_list=fetch_list) - return result[0] - - def test_base(self): - output_dict = {} - for mode in ExecutionMode: - if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: - break - output_dict[mode] = self._test_base(mode).flatten() - - self.check(output_dict) + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + out = paddle.fluid.layers.pool2d(x, **self.attrs) + self.fetch_list = [out.name] + + def run_model(self, exec_mode): + self.run_op_test(exec_mode) + + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() class TestCase1(TestBase): @@ -180,5 +139,21 @@ class TestCase6(TestBase): self.attrs['exclusive'] = False +class TestAdaptive(TestBase): + def set_op_attrs(self): + self.attrs = { + "pool_size": 1, + "pool_type": 'avg', + "require_index": False + } + + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + out = paddle.fluid.layers.adaptive_pool2d(x, **self.attrs) + self.fetch_list = [out.name] + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_pool_max_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_pool_max_op_ipu.py index 911a163b8a..41b2b8406d 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_pool_max_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_pool_max_op_ipu.py @@ -17,7 +17,7 @@ import unittest import numpy as np import paddle import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -30,10 +30,6 @@ class TestBase(IPUOpTest): self.set_feed_attr() self.set_op_attrs() - @property - def fp16_enabled(self): - return True - def set_data_feed(self): data = np.random.uniform(size=[1, 3, 10, 10]) self.feed_fp32 = {'in_0': data.astype(np.float32)} @@ -56,59 +52,22 @@ class TestBase(IPUOpTest): "data_format": 'NCHW', } - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - - out = paddle.fluid.layers.pool2d(x, **self.attrs) - - fetch_list = [out.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() - - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed_fp32 - if exec_mode > ExecutionMode.IPU_FP32: - feed = self.feed_fp16 - - result = exe.run(program, feed=feed, fetch_list=fetch_list) - return result[0] - - def test_base(self): - output_dict = {} - for mode in ExecutionMode: - if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: - break - output_dict[mode] = self._test_base(mode).flatten() - - self.check(output_dict) + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + out = paddle.fluid.layers.pool2d(x, **self.attrs) + self.fetch_list = [out.name] + + def run_model(self, exec_mode): + self.run_op_test(exec_mode) + + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() class TestCase1(TestBase): @@ -179,5 +138,21 @@ class TestCase6(TestBase): self.attrs['exclusive'] = False +class TestAdaptive(TestBase): + def set_op_attrs(self): + self.attrs = { + "pool_size": 1, + "pool_type": 'max', + "require_index": False + } + + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + out = paddle.fluid.layers.adaptive_pool2d(x, **self.attrs) + self.fetch_list = [out.name] + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_pow_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_pow_op_ipu.py index b3562d722c..5ff1223961 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_pow_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_pow_op_ipu.py @@ -17,7 +17,7 @@ import unittest import numpy as np import paddle import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -30,10 +30,6 @@ class TestBase(IPUOpTest): self.set_feed_attr() self.set_op_attrs() - @property - def fp16_enabled(self): - return True - def set_data_feed(self): data = np.random.uniform(size=[1, 3, 2, 2]) self.feed_fp32 = {"x": data.astype(np.float32)} @@ -47,59 +43,22 @@ class TestBase(IPUOpTest): def set_op_attrs(self): self.attrs = {"factor": 2.0} - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - - out = paddle.fluid.layers.pow(x, **self.attrs) - - fetch_list = [out.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() - - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed_fp32 - if exec_mode > ExecutionMode.IPU_FP32: - feed = self.feed_fp16 - - result = exe.run(program, feed=feed, fetch_list=fetch_list) - return result[0] - - def test_base(self): - output_dict = {} - for mode in ExecutionMode: - if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: - break - output_dict[mode] = self._test_base(mode).flatten() - - self.check(output_dict) + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + out = paddle.fluid.layers.pow(x, **self.attrs) + self.fetch_list = [out.name] + + def run_model(self, exec_mode): + self.run_op_test(exec_mode) + + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() class TestCase1(TestBase): @@ -119,54 +78,14 @@ class TestCase1(TestBase): def set_op_attrs(self): self.attrs = {} - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - factor = paddle.static.data( - name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='float32') - - out = paddle.fluid.layers.pow(x, factor=factor, **self.attrs) - - fetch_list = [out.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() - - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed_fp32 - if exec_mode > ExecutionMode.IPU_FP32: - feed = self.feed_fp16 - - result = exe.run(program, feed=feed, fetch_list=fetch_list) - return result[0] + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + factor = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32') + out = paddle.fluid.layers.pow(x, factor=factor, **self.attrs) + self.fetch_list = [out.name] if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ipu/test_print_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_print_op_ipu.py index c9454e5945..3189e060d5 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_print_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_print_op_ipu.py @@ -30,82 +30,48 @@ class TestBase(IPUOpTest): self.set_feed_attr() self.set_op_attrs() + @property + def fp16_enabled(self): + return False + def set_data_feed(self): - self.feed = { - "x": np.random.uniform(size=[1, 3, 3, 3]).astype('float32'), - } + data = np.random.uniform(size=[1, 3, 3, 3]).astype('float32') + self.feed_fp32 = {"x": data.astype(np.float32)} + self.feed_fp16 = {"x": data.astype(np.float16)} def set_feed_attr(self): - self.feed_shape = [x.shape for x in self.feed.values()] - self.feed_list = list(self.feed.keys()) - self.feed_dtype = [x.dtype for x in self.feed.values()] + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) + self.feed_dtype = [x.dtype for x in self.feed_fp32.values()] def set_op_attrs(self): self.attrs = {} - def _test_base(self, run_ipu=True): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype=self.feed_dtype[0]) - out = paddle.fluid.layers.conv2d( - x, num_filters=3, filter_size=3) - out = paddle.fluid.layers.Print(out, **self.attrs) - - if self.is_training: - loss = paddle.mean(out) - adam = paddle.optimizer.Adam(learning_rate=1e-2) - adam.minimize(loss) - fetch_list = [loss.name] - else: - fetch_list = [out.name] - - if run_ipu: - place = paddle.IPUPlace() - else: - place = paddle.CPUPlace() - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if run_ipu: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - if self.is_training: - result = [] - for _ in range(self.epoch): - loss_res = exe.run(program, - feed=self.feed, - fetch_list=fetch_list) - result.append(loss_res[0]) - return np.array(result) - else: - result = exe.run(program, feed=self.feed, fetch_list=fetch_list) - return result[0] + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], + shape=self.feed_shape[0], + dtype=self.feed_dtype[0]) + out = paddle.fluid.layers.conv2d(x, num_filters=3, filter_size=3) + out = paddle.fluid.layers.Print(out, **self.attrs) + + if self.is_training: + loss = paddle.mean(out) + adam = paddle.optimizer.Adam(learning_rate=1e-2) + adam.minimize(loss) + self.fetch_list = [loss.name] + else: + self.fetch_list = [out.name] + + def run_model(self, exec_mode): + self.run_op_test(exec_mode) def test(self): - res0 = self._test_base(False) - res1 = self._test_base(True) - - self.assertTrue( - np.allclose( - res0.flatten(), res1.flatten(), atol=self.atol)) - - self.assertTrue(res0.shape == res1.shape) + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) class TestCase1(TestBase): diff --git a/python/paddle/fluid/tests/unittests/ipu/test_reduce_x_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_reduce_x_op_ipu.py index 929ee51b65..93f96e08fd 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_reduce_x_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_reduce_x_op_ipu.py @@ -17,7 +17,7 @@ import unittest import numpy as np import paddle import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -28,10 +28,6 @@ class TestMean(IPUOpTest): self.set_training() self.set_test_op() - @property - def fp16_enabled(self): - return True - def set_test_op(self): self.op = paddle.fluid.layers.reduce_mean @@ -40,59 +36,22 @@ class TestMean(IPUOpTest): self.feed_list = list(self.feed_fp32.keys()) self.feed_dtype = [x.dtype for x in self.feed_fp32.values()] - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - - out = self.op(x, **self.attrs) - - fetch_list = [out.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() - - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed_fp32 - if exec_mode > ExecutionMode.IPU_FP32: - feed = self.feed_fp16 - - result = exe.run(program, feed=feed, fetch_list=fetch_list) - return result[0] + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + out = self.op(x, **self.attrs) + self.fetch_list = [out.name] - def run_test_base(self): - output_dict = {} - for mode in ExecutionMode: - if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: - break - output_dict[mode] = self._test_base(mode).flatten() + def run_model(self, exec_mode): + self.run_op_test(exec_mode) - self.check(output_dict) + def run_test_base(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() def set_data_feed0(self): data = np.random.uniform(size=[2, 4]) diff --git a/python/paddle/fluid/tests/unittests/ipu/test_reshape_inplace_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_reshape_inplace_op_ipu.py index 9ddf5c7537..35be4d9882 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_reshape_inplace_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_reshape_inplace_op_ipu.py @@ -17,7 +17,7 @@ import unittest import numpy as np import paddle import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -30,10 +30,6 @@ class TestBase(IPUOpTest): self.set_feed_attr() self.set_op_attrs() - @property - def fp16_enabled(self): - return True - def set_data_feed(self): data = np.random.uniform(size=[1, 3, 10, 10]) self.feed_fp32 = {"x": data.astype(np.float32)} @@ -50,60 +46,23 @@ class TestBase(IPUOpTest): "inplace": True, } - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - - add = paddle.fluid.layers.elementwise_add(x, x) - out = paddle.fluid.layers.reshape(add, **self.attrs) - - fetch_list = [out.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() - - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed_fp32 - if exec_mode > ExecutionMode.IPU_FP32: - feed = self.feed_fp16 - - result = exe.run(program, feed=feed, fetch_list=fetch_list) - return result[0] - - def test_base(self): - output_dict = {} - for mode in ExecutionMode: - if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: - break - output_dict[mode] = self._test_base(mode) - - self.check(output_dict, check_shape=True) + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + add = paddle.fluid.layers.elementwise_add(x, x) + out = paddle.fluid.layers.reshape(add, **self.attrs) + self.fetch_list = [out.name] + + def run_model(self, exec_mode): + self.run_op_test(exec_mode) + + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() class TestCase1(TestBase): diff --git a/python/paddle/fluid/tests/unittests/ipu/test_reshape_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_reshape_op_ipu.py index 1197719317..427e975402 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_reshape_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_reshape_op_ipu.py @@ -17,7 +17,7 @@ import unittest import numpy as np import paddle import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -30,10 +30,6 @@ class TestBase(IPUOpTest): self.set_feed_attr() self.set_op_attrs() - @property - def fp16_enabled(self): - return True - def set_data_feed(self): data = np.random.uniform(size=[2, 4, 6]) self.feed_fp32 = {"in_0": data.astype(np.float32)} @@ -48,59 +44,22 @@ class TestBase(IPUOpTest): self.attrs['shape'] = [6, 8] self.attrs['inplace'] = False - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - - out = paddle.fluid.layers.reshape(x=x, **self.attrs) - - fetch_list = [out.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() - - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed_fp32 - if exec_mode > ExecutionMode.IPU_FP32: - feed = self.feed_fp16 - - result = exe.run(program, feed=feed, fetch_list=fetch_list) - return result[0] - - def test_base(self): - output_dict = {} - for mode in ExecutionMode: - if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: - break - output_dict[mode] = self._test_base(mode) - - self.check(output_dict, check_shape=True) + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + out = paddle.fluid.layers.reshape(x=x, **self.attrs) + self.fetch_list = [out.name] + + def run_model(self, exec_mode): + self.run_op_test(exec_mode) + + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() class TestCase1(TestBase): diff --git a/python/paddle/fluid/tests/unittests/ipu/test_scale_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_scale_op_ipu.py index 49714eba8d..f28bcba4cf 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_scale_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_scale_op_ipu.py @@ -17,7 +17,7 @@ import unittest import numpy as np import paddle import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -51,59 +51,22 @@ class TestBase(IPUOpTest): "bias_after_scale": True, } - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - - out = paddle.fluid.layers.scale(x, **self.attrs) - - fetch_list = [out.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() - - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed_fp32 - if exec_mode > ExecutionMode.IPU_FP32: - feed = self.feed_fp16 - - result = exe.run(program, feed=feed, fetch_list=fetch_list) - return result[0] - - def test_base(self): - output_dict = {} - for mode in ExecutionMode: - if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: - break - output_dict[mode] = self._test_base(mode).flatten() - - self.check(output_dict) + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + out = paddle.fluid.layers.scale(x, **self.attrs) + self.fetch_list = [out.name] + + def run_model(self, exec_mode): + self.run_op_test(exec_mode) + + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() class TestCase1(TestBase): @@ -155,54 +118,14 @@ class TestCase5(TestBase): "bias_after_scale": True, } - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - y = paddle.static.data( - name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='float32') - - out = paddle.fluid.layers.scale(x, scale=y, **self.attrs) - - fetch_list = [out.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() - - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed_fp32 - if exec_mode > ExecutionMode.IPU_FP32: - feed = self.feed_fp16 - - result = exe.run(program, feed=feed, fetch_list=fetch_list) - return result[0] + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + y = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32') + out = paddle.fluid.layers.scale(x, scale=y, **self.attrs) + self.fetch_list = [out.name] if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ipu/test_scaled_optimizer_state_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_scaled_optimizer_state_ipu.py new file mode 100644 index 0000000000..113b316af4 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_scaled_optimizer_state_ipu.py @@ -0,0 +1,131 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import unittest +import paddle +import paddle.static +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestBase(IPUOpTest): + def setUp(self): + self.set_atol() + self.set_training() + self.set_data_feed() + self.set_feed_attr() + self.set_attrs() + + def set_training(self): + self.is_training = True + self.epoch = 100 + + def set_data_feed(self): + data = np.random.uniform(size=[1, 3, 10, 10]).astype('float32') + self.feed_fp32 = {"image": data.astype(np.float32)} + self.feed_fp16 = {"image": data.astype(np.float16)} + + def set_feed_attr(self): + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) + self.feed_dtype = [x.dtype for x in self.feed_fp32.values()] + + def set_attrs(self): + self.attrs = { + "optimizer": 'lamb', + "weight_decay": 0.0, + "scaled_optimizer_state": True + } + + @IPUOpTest.static_graph + def build_model(self): + image = paddle.static.data( + name='image', shape=[1, 3, 10, 10], dtype='float32') + conv1 = paddle.static.nn.conv2d( + image, num_filters=3, filter_size=3, bias_attr=False) + loss = paddle.mean(conv1) + + weight_decay = self.attrs['weight_decay'] + opt = paddle.optimizer.Adam( + learning_rate=1e-1, weight_decay=weight_decay) + if self.attrs['optimizer'] == 'lamb': + opt = paddle.optimizer.Lamb( + learning_rate=1e-1, lamb_weight_decay=weight_decay) + opt.minimize(loss) + self.feed_list = [image.name] + self.fetch_list = [loss.name] + + def run_model(self, exec_mode): + ipu_strategy = paddle.static.IpuStrategy() + ipu_strategy.set_graph_config(is_training=self.is_training) + if self.is_ipu_mode(exec_mode): + if "use_no_bias_optimizer" in self.attrs.keys(): + ipu_strategy.set_options({ + "use_no_bias_optimizer": self.attrs["use_no_bias_optimizer"] + }) + if "scaled_optimizer_state" in self.attrs.keys(): + ipu_strategy.set_options({ + "scaled_optimizer_state": + self.attrs["scaled_optimizer_state"] + }) + self.run_op_test(exec_mode, ipu_strategy=ipu_strategy) + + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() + + +class TestScaledAdam(TestBase): + def set_attrs(self): + self.attrs = { + "optimizer": 'adam', + "weight_decay": 0.0, + "scaled_optimizer_state": True + } + + def set_atol(self): + super().set_atol() + self.atol = 1e-5 + self.rtol = 1e-5 + + +@unittest.skip('cpu do not support AdamNoBias') +class TestScaledAdamNoBias(TestBase): + def set_attrs(self): + self.attrs = { + "optimizer": 'adam', + "weight_decay": 0.0, + "use_no_bias_optimizer": True, + "scaled_optimizer_state": True + } + + +@unittest.skip('cpu do not support LambNoBias') +class TestScaledLambNoBias(TestBase): + def set_attrs(self): + self.attrs = { + "optimizer": 'lamb', + "weight_decay": 0.0, + "use_no_bias_optimizer": True, + "scaled_optimizer_state": True + } + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_set_batch_size_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_set_batch_size_ipu.py index 6702ae4344..5c61012cac 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_set_batch_size_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_set_batch_size_ipu.py @@ -17,7 +17,7 @@ import unittest import numpy as np import paddle import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -30,10 +30,6 @@ class TestBase(IPUOpTest): self.set_feed_attr() self.set_op_attrs() - @property - def fp16_enabled(self): - return True - def set_atol(self): self.atol = 3e-6 self.rtol = 1e-5 @@ -52,67 +48,32 @@ class TestBase(IPUOpTest): def set_op_attrs(self): self.attrs = {} - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - - conv1 = paddle.static.nn.conv2d( - x, num_filters=3, filter_size=3, bias_attr=False) - conv2 = paddle.static.nn.conv2d( - conv1, num_filters=3, filter_size=3, bias_attr=False) - conv3 = paddle.static.nn.conv2d( - conv2, num_filters=3, filter_size=3, bias_attr=False) - conv4 = paddle.static.nn.conv2d( - conv3, num_filters=3, filter_size=3, bias_attr=False) - - fetch_list = [conv4.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() - - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config( - is_training=self.is_training, micro_batch_size=2) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed_fp32 - if exec_mode > ExecutionMode.IPU_FP32: - feed = self.feed_fp16 - - result = exe.run(program, feed=feed, fetch_list=fetch_list) - return result[0] + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + conv1 = paddle.static.nn.conv2d( + x, num_filters=3, filter_size=3, bias_attr=False) + conv2 = paddle.static.nn.conv2d( + conv1, num_filters=3, filter_size=3, bias_attr=False) + conv3 = paddle.static.nn.conv2d( + conv2, num_filters=3, filter_size=3, bias_attr=False) + conv4 = paddle.static.nn.conv2d( + conv3, num_filters=3, filter_size=3, bias_attr=False) + self.fetch_list = [conv4.name] + + def run_model(self, exec_mode): + ipu_strategy = paddle.static.IpuStrategy() + ipu_strategy.set_graph_config( + is_training=self.is_training, micro_batch_size=2) + self.run_op_test(exec_mode, ipu_strategy) def test(self): - output_dict = {} - for mode in ExecutionMode: - if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: - break - output_dict[mode] = self._test_base(mode).flatten() - - self.check(output_dict) + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ipu/test_slice_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_slice_op_ipu.py index 8881f018de..ac8ef3e9d6 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_slice_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_slice_op_ipu.py @@ -17,7 +17,7 @@ import unittest import numpy as np import paddle import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest @unittest.skipIf(not paddle.is_compiled_with_ipu(), @@ -30,10 +30,6 @@ class TestBase(IPUOpTest): self.set_feed_attr() self.set_op_attrs() - @property - def fp16_enabled(self): - return True - def set_data_feed(self): data = np.random.uniform(size=[4, 5, 6]) self.feed_fp32 = {"in_0": data.astype(np.float32)} @@ -51,59 +47,22 @@ class TestBase(IPUOpTest): "ends": [3, 2, 4], } - def _test_base(self, exec_mode): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - - out = paddle.fluid.layers.slice(x, **self.attrs) - - fetch_list = [out.name] - - if exec_mode == ExecutionMode.CPU_FP32: - place = paddle.CPUPlace() - else: - place = paddle.IPUPlace() - - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if exec_mode != ExecutionMode.CPU_FP32: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - if exec_mode == ExecutionMode.IPU_POPART_FP16: - ipu_strategy.set_precision_config(enable_fp16=True) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - feed = self.feed_fp32 - if exec_mode > ExecutionMode.IPU_FP32: - feed = self.feed_fp16 - - result = exe.run(program, feed=feed, fetch_list=fetch_list) - return result[0] - - def test_base(self): - output_dict = {} - for mode in ExecutionMode: - if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: - break - output_dict[mode] = self._test_base(mode) - - self.check(output_dict, check_shape=True) + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + out = paddle.fluid.layers.slice(x, **self.attrs) + self.fetch_list = [out.name] + + def run_model(self, exec_mode): + self.run_op_test(exec_mode) + + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() class TestCase1(TestBase): @@ -135,54 +94,17 @@ class TestCase2(TestBase): def set_op_attrs(self): self.attrs = {"axes": [0, 1, 2]} - def _test_base(self, run_ipu=True): - scope = fluid.core.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = self.SEED - startup_prog.random_seed = self.SEED - - with fluid.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype='float32') - starts = paddle.static.data( - name=self.feed_list[1], - shape=self.feed_shape[1], - dtype='int32') - ends = paddle.static.data( - name=self.feed_list[2], - shape=self.feed_shape[2], - dtype='int32') - out = paddle.fluid.layers.slice( - x, starts=starts, ends=ends, **self.attrs) - - fetch_list = [out.name] - - if run_ipu: - place = paddle.IPUPlace() - else: - place = paddle.CPUPlace() - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if run_ipu: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(is_training=self.is_training) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - result = exe.run(program, feed=self.feed, fetch_list=fetch_list) - return result[0] - - def test_base(self): - pass + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data( + name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32') + starts = paddle.static.data( + name=self.feed_list[1], shape=self.feed_shape[1], dtype='int32') + ends = paddle.static.data( + name=self.feed_list[2], shape=self.feed_shape[2], dtype='int32') + out = paddle.fluid.layers.slice( + x, starts=starts, ends=ends, **self.attrs) + self.fetch_list = [out.name] if __name__ == "__main__": -- GitLab