From a05e7ef6fc1eaba2d709db3f2030f91d6fe1bf02 Mon Sep 17 00:00:00 2001 From: Allen Guo Date: Tue, 12 Jul 2022 11:18:28 +0800 Subject: [PATCH] [IPU] add more UTs 2/N (#44208) * add authors Co-authored-by: Allen Guo Co-authored-by: Zhixin Yao Co-authored-by: Zhaorui Chen * squash py changes 2/N Co-authored-by: Zhixin Yao Co-authored-by: Zhaorui Chen --- .../unittests/ipu/test_kldiv_loss_op_ipu.py | 95 +++++++++++ .../tests/unittests/ipu/test_matmul_op_ipu.py | 34 +++- .../unittests/ipu/test_matmul_v2_op_ipu.py | 30 ++++ .../unittests/ipu/test_meshgrid_op_ipu.py | 133 +++++++++++++++ .../unittests/ipu/test_model_pipeline_ipu.py | 100 +++++------- .../tests/unittests/ipu/test_p_norm_op_ipu.py | 75 +++++++++ .../tests/unittests/ipu/test_pad_op_ipu.py | 152 ++++++++++++++++++ .../tests/unittests/ipu/test_prelu_op_ipu.py | 1 - .../unittests/ipu/test_reduce_x_op_ipu.py | 54 +++++++ .../test_softmax_with_cross_entropy_op_ipu.py | 99 ++++++++++++ .../unittests/ipu/test_warpctc_op_ipu.py | 122 ++++++++++++++ 11 files changed, 833 insertions(+), 62 deletions(-) create mode 100644 python/paddle/fluid/tests/unittests/ipu/test_kldiv_loss_op_ipu.py create mode 100644 python/paddle/fluid/tests/unittests/ipu/test_meshgrid_op_ipu.py create mode 100644 python/paddle/fluid/tests/unittests/ipu/test_p_norm_op_ipu.py create mode 100644 python/paddle/fluid/tests/unittests/ipu/test_pad_op_ipu.py create mode 100644 python/paddle/fluid/tests/unittests/ipu/test_warpctc_op_ipu.py diff --git a/python/paddle/fluid/tests/unittests/ipu/test_kldiv_loss_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_kldiv_loss_op_ipu.py new file mode 100644 index 00000000000..d6d48c65063 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_kldiv_loss_op_ipu.py @@ -0,0 +1,95 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import paddle +import paddle.static +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest +import paddle.nn.functional as F + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestBase(IPUOpTest): + + def setUp(self): + self.set_atol() + self.set_training() + self.set_data_feed() + self.set_feed_attr() + self.set_op_attrs() + + def set_data_feed(self): + x = np.random.uniform(size=[3, 4, 2, 2]) + target = np.random.uniform(size=[3, 4, 2, 2]) + self.feed_fp32 = { + "x": x.astype(np.float32), + "target": target.astype(np.float32) + } + self.feed_fp16 = { + "x": x.astype(np.float16), + "target": target.astype(np.float16) + } + + def set_feed_attr(self): + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) + + def set_op_attrs(self): + self.attrs = { + 'reduction': 'mean', + } + + @IPUOpTest.static_graph + def build_model(self, on_ipu): + x = paddle.static.data(name=self.feed_list[0], + shape=self.feed_shape[0], + dtype="float32") + target = paddle.static.data(name=self.feed_list[1], + shape=self.feed_shape[1], + dtype='float32') + out = paddle.fluid.layers.kldiv_loss(x, target, **self.attrs) + self.fetch_list = [out.name] + + def run_model(self, exec_mode): + self.run_op_test(exec_mode) + + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model(self.is_ipu_mode(m)) + self.run_model(m) + self.check() + + +class TestCase1(TestBase): + + def set_op_attrs(self): + self.attrs = { + 'reduction': 'sum', + } + + +class TestCase2(TestBase): + + def set_op_attrs(self): + self.attrs = { + 'reduction': 'none', + } + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_matmul_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_matmul_op_ipu.py index 222bb202097..fb8cf86b71c 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_matmul_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_matmul_op_ipu.py @@ -157,8 +157,8 @@ class TestCase6_2(TestCase6): class TestCase7(TestBase): def set_data_feed(self): - x = np.random.uniform(size=[1, 12, 128, 64]) - y = np.random.uniform(size=[1, 12, 128, 64]) + x = np.random.uniform(size=[1, 3, 4, 5]) + y = np.random.uniform(size=[1, 3, 4, 5]) self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)} self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)} @@ -205,5 +205,35 @@ class TestCase9(TestBase): self.feed_fp16 = {"x": x.astype(np.float16), "y": x.astype(np.float16)} +class TestCase10(TestBase): + + def set_op_attrs(self): + self.attrs = { + "transpose_y": True, + } + + def set_data_feed(self): + x = np.random.uniform(size=[4, 2, 3]) + y = np.random.uniform(size=[2, 3]) + + self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)} + self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)} + + +class TestCase11(TestBase): + + def set_op_attrs(self): + self.attrs = { + "transpose_x": True, + } + + def set_data_feed(self): + x = np.random.uniform(size=[4, 3, 2]) + y = np.random.uniform(size=[3, 2]) + + self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)} + self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)} + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_matmul_v2_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_matmul_v2_op_ipu.py index 4777c42da13..6e84066a4a1 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_matmul_v2_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_matmul_v2_op_ipu.py @@ -150,5 +150,35 @@ class TestCase8(TestBase): } +class TestCase9(TestBase): + + def set_op_attrs(self): + self.attrs = { + "transpose_y": True, + } + + def set_data_feed(self): + x = np.random.uniform(size=[4, 2, 3]) + y = np.random.uniform(size=[2, 3]) + + self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)} + self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)} + + +class TestCase10(TestBase): + + def set_op_attrs(self): + self.attrs = { + "transpose_x": True, + } + + def set_data_feed(self): + x = np.random.uniform(size=[4, 3, 2]) + y = np.random.uniform(size=[3, 2]) + + self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)} + self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)} + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_meshgrid_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_meshgrid_op_ipu.py new file mode 100644 index 00000000000..4efd4c5714b --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_meshgrid_op_ipu.py @@ -0,0 +1,133 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import paddle +import paddle.static +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestBase(IPUOpTest): + + def setUp(self): + self.set_atol() + self.set_training() + self.set_feed() + self.set_feed_attr() + self.set_op_attrs() + + def set_atol(self): + self.atol = 1e-6 + self.rtol = 1e-6 + self.atol_fp16 = 1e-3 + self.rtol_fp16 = 1e-3 + + def set_feed(self): + data1 = np.random.uniform(size=[100]) + data2 = np.random.uniform(size=[200]) + self.feed_fp32 = { + 'x': data1.astype(np.float32), + 'y': data2.astype(np.float32) + } + self.feed_fp16 = { + 'x': data1.astype(np.float16), + 'y': data2.astype(np.float16) + } + + def set_feed_attr(self): + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) + self.feed_dtype = [x.dtype for x in self.feed_fp32.values()] + + def set_op_attrs(self): + self.attrs = {} + self.attrs['axis'] = [0, 1] + + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data(name=self.feed_list[0], + shape=self.feed_shape[0], + dtype=self.feed_dtype[0]) + y = paddle.static.data(name=self.feed_list[1], + shape=self.feed_shape[1], + dtype=self.feed_dtype[1]) + r1, r2 = paddle.meshgrid(x, y) + self.fetch_list = [r1.name, r2.name] + + def run_model(self, exec_mode): + self.run_op_test(exec_mode) + + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + for k, v in self.output_dict.items(): + self.output_dict[k] = np.concatenate([vv.flatten() for vv in v]) + self.check() + + +class TestCase1(TestBase): + + def set_feed(self): + data1 = np.random.uniform(size=[10]) + data2 = np.random.uniform(size=[20]) + data3 = np.random.uniform(size=[30]) + self.feed_fp32 = { + 'x': data1.astype(np.float32), + 'y': data2.astype(np.float32), + 'z': data3.astype(np.float32) + } + self.feed_fp16 = { + 'x': data1.astype(np.float16), + 'y': data2.astype(np.float16), + 'z': data3.astype(np.float16) + } + + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data(name=self.feed_list[0], + shape=self.feed_shape[0], + dtype=self.feed_dtype[0]) + y = paddle.static.data(name=self.feed_list[1], + shape=self.feed_shape[1], + dtype=self.feed_dtype[1]) + z = paddle.static.data(name=self.feed_list[2], + shape=self.feed_shape[2], + dtype=self.feed_dtype[2]) + r1, r2, r3 = paddle.meshgrid(x, y, z) + self.fetch_list = [r1.name, r2.name, r3.name] + + +class TestCase2(TestBase): + + def set_feed(self): + data1 = np.random.uniform(size=[100]) + data2 = np.random.uniform(size=[200]) + self.feed_fp32 = { + 'x': data1.astype(np.int32), + 'y': data2.astype(np.int32) + } + self.feed_fp16 = { + 'x': data1.astype(np.int32), + 'y': data2.astype(np.int32) + } + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_model_pipeline_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_model_pipeline_ipu.py index 27538610a42..9f7ebc52834 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_model_pipeline_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_model_pipeline_ipu.py @@ -12,79 +12,61 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function +import unittest import numpy as np -import unittest import paddle import paddle.static - -paddle.enable_static() -SEED = 2021 +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest @unittest.skipIf(not paddle.is_compiled_with_ipu(), "core is not compiled with IPU") -class TestCastNet(unittest.TestCase): - - def _test(self, run_ipu=True): - scope = paddle.static.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - main_prog.random_seed = SEED - startup_prog.random_seed = SEED - np.random.seed(SEED) - - np_image = np.random.rand(1, 3, 10, 10).astype(np.float32) +class TestBase(IPUOpTest): - with paddle.static.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - image = paddle.static.data(name='image', - shape=[1, 3, 10, 10], - dtype='float32') - with paddle.static.ipu_shard_guard(index=0): - conv1 = paddle.static.nn.conv2d(image, - num_filters=3, - filter_size=3, - bias_attr=False) - with paddle.static.ipu_shard_guard(index=1): - conv2 = paddle.static.nn.conv2d(conv1, - num_filters=3, - filter_size=3, - bias_attr=False) - loss = paddle.mean(conv2) + def setUp(self): + self.set_training() + self.set_data_feed() + self.set_feed_attr() - if run_ipu: - place = paddle.IPUPlace() - else: - place = paddle.CPUPlace() - executor = paddle.static.Executor(place) - executor.run(startup_prog) + def set_data_feed(self): + data = np.random.uniform(size=[2, 3, 10, 10]) + self.feed_fp32 = {"in_0": data.astype(np.float32)} - if run_ipu: - feed_list = [image.name] - fetch_list = [loss.name] - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.set_graph_config(num_ipus=2, - is_training=False, - enable_manual_shard=True) - ipu_strategy.set_pipelining_config(enable_pipelining=False) - program = paddle.static.IpuCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog + def set_feed_attr(self): + self.feed_shape = [(1, 3, 10, 10)] + self.feed_list = list(self.feed_fp32.keys()) - loss_res = executor.run(program, - feed={"image": np_image}, - fetch_list=[loss]) - return loss_res + @IPUOpTest.static_graph + def build_model(self): + image = paddle.static.data(name=self.feed_list[0], + shape=self.feed_shape[0], + dtype='float32') + with paddle.static.ipu_shard_guard(index=0): + conv1 = paddle.static.nn.conv2d(image, + num_filters=3, + filter_size=3, + bias_attr=False) + with paddle.static.ipu_shard_guard(index=1): + conv2 = paddle.static.nn.conv2d(conv1, + num_filters=3, + filter_size=3, + bias_attr=False) + loss = paddle.mean(conv2) + self.fetch_list = [loss.name] - def test_cast(self): - cpu_outputs = self._test(False) - ipu_outputs = self._test(True) + def run_model(self, exec_mode): + ipu_strategy = paddle.static.IpuStrategy() + ipu_strategy.set_graph_config(num_ipus=2, + is_training=False, + enable_manual_shard=True) + ipu_strategy.set_pipelining_config(enable_pipelining=True, + batches_per_step=2) + self.run_op_test(exec_mode, ipu_strategy=ipu_strategy) - self.assertTrue(np.allclose(cpu_outputs, ipu_outputs, atol=1e-4)) + def test(self): + self.build_model() + self.run_model(IPUOpTest.ExecutionMode.IPU_FP32) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ipu/test_p_norm_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_p_norm_op_ipu.py new file mode 100644 index 00000000000..ec333ddff01 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_p_norm_op_ipu.py @@ -0,0 +1,75 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import paddle +import paddle.static +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestBase(IPUOpTest): + + def setUp(self): + self.set_atol() + self.set_training() + self.set_feed() + self.set_op_attrs() + + def set_op_attrs(self): + self.attrs = {"p": 2} + + def set_feed(self): + data = np.random.uniform(size=[2, 3, 4]) + self.feed_fp32 = {'x': data.astype(np.float32)} + self.feed_fp16 = {'x': data.astype(np.float16)} + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) + + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data(name=self.feed_list[0], + shape=self.feed_shape[0], + dtype='float32') + x = paddle.nn.functional.normalize(x, **self.attrs) + self.fetch_list = [x.name] + + def run_model(self, exec_mode): + self.run_op_test(exec_mode) + + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() + + +class TestCase1(TestBase): + + def set_op_attrs(self): + self.attrs = {"axis": 1} + + +class TestCase2(TestBase): + + def set_op_attrs(self): + self.attrs = {"p": 3.5, "axis": 1, "epsilon": 1e-3} + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_pad_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_pad_op_ipu.py new file mode 100644 index 00000000000..02a488180aa --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_pad_op_ipu.py @@ -0,0 +1,152 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import paddle +import paddle.static +from op_test_ipu import IPUOpTest + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestBase(IPUOpTest): + + def setUp(self): + self.set_atol() + self.set_training() + self.set_feed() + self.set_op_attrs() + + def set_feed(self): + data = np.random.uniform(size=[5, 4, 2, 3]) + self.feed_fp32 = {'x': data.astype(np.float32)} + self.feed_fp16 = {'x': data.astype(np.float16)} + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) + + def set_op_attrs(self): + self.attrs = {"pad": [1, 2, 3, 4]} + + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data(name=self.feed_list[0], + shape=self.feed_shape[0], + dtype='float32') + pad = paddle.nn.functional.pad(x, **self.attrs) + self.fetch_list = [pad.name] + + def run_model(self, exec_mode): + self.run_op_test(exec_mode) + + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() + + +@unittest.skip("Do not support `pad` as a tensor") +class TestCase1(TestBase): + + def set_op_attrs(self): + self.attrs = {} + + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data(name=self.feed_list[0], + shape=self.feed_shape[0], + dtype='float32') + const_attrs = { + 'name': 'y', + 'shape': [4], + 'dtype': 'int32', + 'value': 2, + } + y = paddle.fluid.layers.fill_constant(**const_attrs) + pad = paddle.nn.functional.pad(x, pad=y) + self.fetch_list = [pad.name] + + +class TestCase2(TestBase): + + def set_op_attrs(self): + self.attrs = {"pad": [2, 5], "data_format": "NCL"} + + def set_feed(self): + data = np.random.uniform(size=[4, 2, 3]) + self.feed_fp32 = {'x': data.astype(np.float32)} + self.feed_fp16 = {'x': data.astype(np.float16)} + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) + + +class TestCase3(TestBase): + + def set_op_attrs(self): + self.attrs = {"pad": [2, 5, 2, 3, 6, 3], "data_format": "NCDHW"} + + def set_feed(self): + data = np.random.uniform(size=[2, 3, 4, 2, 3]) + self.feed_fp32 = {'x': data.astype(np.float32)} + self.feed_fp16 = {'x': data.astype(np.float16)} + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) + + +class TestCase4(TestBase): + + def set_op_attrs(self): + self.attrs = {"pad": [2, 2, 1, 1], "mode": "reflect"} + + +@unittest.skip("replicate mode is not supported") +class TestCase5(TestBase): + + def set_op_attrs(self): + self.attrs = {"pad": [1, 2, 3, 4], "mode": "replicate"} + + +@unittest.skip("circular mode is not supported") +class TestCase6(TestBase): + + def set_op_attrs(self): + self.attrs = {"pad": [1, 2, 3, 4], "mode": "circular"} + + +@unittest.skip("Only support NCL, NCHW, NCDHW") +class TestCase7(TestBase): + + def set_op_attrs(self): + self.attrs = {"pad": [1, 2], "data_format": "NLC"} + + +@unittest.skip("Only support NCL, NCHW, NCDHW") +class TestCase7(TestBase): + + def set_op_attrs(self): + self.attrs = {"pad": [1, 2, 3, 4], "data_format": "NHWC"} + + +@unittest.skip("Only support NCL, NCHW, NCDHW") +class TestCase7(TestBase): + + def set_op_attrs(self): + self.attrs = {"pad": [1, 2, 3, 4, 1, 3], "data_format": "NDHWC"} + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_prelu_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_prelu_op_ipu.py index b06b0dc96f1..0200cce0a33 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_prelu_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_prelu_op_ipu.py @@ -61,7 +61,6 @@ class TestBase(IPUOpTest): def run_model(self, exec_mode): ipu_strategy = paddle.static.IpuStrategy() ipu_strategy.set_graph_config(is_training=self.is_training) - ipu_strategy.set_options({'onnx_dump_path': 'onnx_dump_path.onnx'}) self.run_op_test(exec_mode, ipu_strategy=ipu_strategy) def test(self): diff --git a/python/paddle/fluid/tests/unittests/ipu/test_reduce_x_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_reduce_x_op_ipu.py index ffa3c6d1550..4cfbb9a5e0b 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_reduce_x_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_reduce_x_op_ipu.py @@ -148,5 +148,59 @@ class TestSum(TestMean): self.op = paddle.fluid.layers.reduce_sum +class TestLogsumexp(TestMean): + + def set_test_op(self): + self.op = paddle.logsumexp + + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data(name=self.feed_list[0], + shape=self.feed_shape[0], + dtype='float32') + if 'dim' in self.attrs: + self.attrs['axis'] = self.attrs['dim'] + del self.attrs['dim'] + if 'keep_dim' in self.attrs: + self.attrs['keepdim'] = self.attrs['keep_dim'] + del self.attrs['keep_dim'] + out = self.op(x, **self.attrs) + self.fetch_list = [out.name] + + +class TestAll(TestMean): + + @property + def fp16_enabled(self): + return False + + def set_data_feed0(self): + data = np.random.choice(a=[False, True], size=(2, 4)) + self.feed_fp32 = {"in_0": data.astype(bool)} + self.set_feed_attr() + + def set_data_feed1(self): + data = np.random.choice(a=[False, True], size=(2, 2, 2)) + self.feed_fp32 = {"in_0": data.astype(bool)} + self.set_feed_attr() + + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data(name=self.feed_list[0], + shape=self.feed_shape[0], + dtype='bool') + out = self.op(x, **self.attrs) + self.fetch_list = [out.name] + + def set_test_op(self): + self.op = paddle.fluid.layers.reduce_all + + +class TestAny(TestAll): + + def set_test_op(self): + self.op = paddle.fluid.layers.reduce_any + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_softmax_with_cross_entropy_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_softmax_with_cross_entropy_op_ipu.py index 97b0c25f938..21021cd9f59 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_softmax_with_cross_entropy_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_softmax_with_cross_entropy_op_ipu.py @@ -106,5 +106,104 @@ class TestCase2(TestBase): } +class TestCase3(TestBase): + + def set_data_feed(self): + x = np.random.uniform(size=[3, 5, 7]) + label = np.random.randint(0, 7, [3, 5, 1], dtype='int64') + self.feed_fp32 = { + "x": x.astype(np.float32), + "label": label.astype(np.int64) + } + self.feed_fp16 = { + "x": x.astype(np.float16), + "label": label.astype(np.int32) + } + + +class TestCase4(TestBase): + + def set_op_attrs(self): + self.attrs = { + 'soft_label': False, + 'return_softmax': True, + 'ignore_index': 1, + } + + @IPUOpTest.static_graph + def build_model(self, on_ipu): + x = paddle.static.data(name=self.feed_list[0], + shape=self.feed_shape[0], + dtype="float32") + if on_ipu: + label = paddle.static.data(name=self.feed_list[1], + shape=self.feed_shape[1], + dtype='int32') + else: + label = paddle.static.data(name=self.feed_list[1], + shape=self.feed_shape[1], + dtype='int64') + loss, softmax = F.softmax_with_cross_entropy(x, label, **self.attrs) + self.fetch_list = [loss.name, softmax.name] + + def run_model(self, exec_mode): + if self.is_ipu_mode(exec_mode): + self.feed_fp32['label'] = self.feed_fp32['label'].astype(np.int32) + self.run_op_test(exec_mode) + + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model(self.is_ipu_mode(m)) + self.run_model(m) + self.check() + + +class TestCase5(TestCase4): + + def set_op_attrs(self): + self.attrs = { + 'soft_label': False, + 'return_softmax': True, + 'ignore_index': 1, + 'axis': 1, + } + + def set_data_feed(self): + x = np.random.uniform(size=[3, 5, 7, 11]) + label = np.random.randint(0, 5, [3, 1, 7, 11], dtype='int64') + self.feed_fp32 = { + "x": x.astype(np.float32), + "label": label.astype(np.int64) + } + self.feed_fp16 = { + "x": x.astype(np.float16), + "label": label.astype(np.int32) + } + + +class TestCase6(TestCase4): + + def set_op_attrs(self): + self.attrs = { + 'soft_label': False, + 'return_softmax': True, + 'ignore_index': 1, + 'axis': 2, + } + + def set_data_feed(self): + x = np.random.uniform(size=[3, 5, 7, 9, 11]) + label = np.random.randint(0, 7, [3, 5, 1, 9, 11], dtype='int64') + self.feed_fp32 = { + "x": x.astype(np.float32), + "label": label.astype(np.int64) + } + self.feed_fp16 = { + "x": x.astype(np.float16), + "label": label.astype(np.int32) + } + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_warpctc_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_warpctc_op_ipu.py new file mode 100644 index 00000000000..8387b350155 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_warpctc_op_ipu.py @@ -0,0 +1,122 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import paddle +import paddle.static +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest +import paddle.nn.functional as F + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestBase(IPUOpTest): + + def setUp(self): + self.set_atol() + self.set_training() + self.set_data_feed() + self.set_feed_attr() + self.set_op_attrs() + + def set_training(self): + # ctcloss only support training currently. + self.is_training = True + self.epoch = 1 + + def set_data_feed(self): + self.batch_size = 16 + self.max_seq_length = 5 + self.max_label_length = 3 + self.num_classes = 5 + self.logits_length = np.array([self.max_seq_length] * self.batch_size, + dtype=np.int64) + self.labels_length = np.array([self.max_label_length] * self.batch_size, + dtype=np.int64) + self.blank = self.num_classes - 1 + self.norm_by_times = False + + logits = np.random.uniform( + 0.1, 1.0, [self.max_seq_length, self.batch_size, self.num_classes + ]).astype("float32") + labels = np.random.randint(0, + self.num_classes - 1, + [self.batch_size, self.max_label_length], + dtype="int32") + + self.feed_fp32 = { + "Logits": logits, + "Label": labels, + "input_length": self.logits_length.astype("int64"), + "label_length": self.labels_length.astype("int64"), + } + self.feed_fp16 = { + "Logits": logits.astype(np.float16), + "Label": labels, + "input_length": self.logits_length.astype("int64"), + "label_length": self.labels_length.astype("int64"), + } + + def set_feed_attr(self): + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) + + def set_op_attrs(self): + self.attrs = { + "blank": self.blank, + "norm_by_times": self.norm_by_times, + } + + @IPUOpTest.static_graph + def build_model(self): + data = paddle.static.data(name=self.feed_list[0], + shape=self.feed_shape[0], + dtype="float32") + logits = paddle.nn.Linear(self.num_classes, + self.num_classes, + bias_attr=False)(data) + labels = paddle.static.data(name=self.feed_list[1], + shape=self.feed_shape[1], + dtype='int32') + input_length = paddle.static.data(name=self.feed_list[2], + shape=self.feed_shape[2], + dtype='int64') + label_length = paddle.static.data(name=self.feed_list[3], + shape=self.feed_shape[3], + dtype='int64') + out = paddle.fluid.layers.warpctc(logits, + labels, + input_length=input_length, + label_length=label_length, + **self.attrs) + loss = paddle.mean(out) + adam = paddle.optimizer.Adam(learning_rate=1e-2) + adam.minimize(loss) + self.fetch_list = [loss.name, out.name] + + def run_model(self, exec_mode): + self.run_op_test(exec_mode) + + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() + + +if __name__ == "__main__": + unittest.main() -- GitLab