From 3db6facbe0275d8c24ba067f6f8a54e7cd364b3f Mon Sep 17 00:00:00 2001 From: jianghaicheng Date: Tue, 14 Dec 2021 11:39:49 +0800 Subject: [PATCH] ipu_commit_tests p6 (#38090) --- .../tests/unittests/ipu/test_ipu_place.py | 51 ++++ .../tests/unittests/ipu/test_ipu_shard.py | 70 ++++++ .../tests/unittests/ipu/test_ipu_strategy.py | 57 +++++ .../unittests/ipu/test_layernorm_op_ipu.py | 199 ++++++++++++++++ .../unittests/ipu/test_log_softmax_op_ipu.py | 110 +++++++++ .../unittests/ipu/test_lookuptable_op_ipu.py | 137 +++++++++++ .../tests/unittests/ipu/test_lr_sheduelr.py | 101 ++++++++ .../tests/unittests/ipu/test_matmul_op_ipu.py | 217 ++++++++++++++++++ 8 files changed, 942 insertions(+) create mode 100644 python/paddle/fluid/tests/unittests/ipu/test_ipu_place.py create mode 100644 python/paddle/fluid/tests/unittests/ipu/test_ipu_shard.py create mode 100644 python/paddle/fluid/tests/unittests/ipu/test_ipu_strategy.py create mode 100644 python/paddle/fluid/tests/unittests/ipu/test_layernorm_op_ipu.py create mode 100644 python/paddle/fluid/tests/unittests/ipu/test_log_softmax_op_ipu.py create mode 100644 python/paddle/fluid/tests/unittests/ipu/test_lookuptable_op_ipu.py create mode 100644 python/paddle/fluid/tests/unittests/ipu/test_lr_sheduelr.py create mode 100644 python/paddle/fluid/tests/unittests/ipu/test_matmul_op_ipu.py diff --git a/python/paddle/fluid/tests/unittests/ipu/test_ipu_place.py b/python/paddle/fluid/tests/unittests/ipu/test_ipu_place.py new file mode 100644 index 00000000000..48ab046deb3 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_ipu_place.py @@ -0,0 +1,51 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import numpy as np +import unittest +import sys +sys.path.append("..") +import paddle +import paddle.fluid as fluid + +paddle.enable_static() + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestIpuPlace(unittest.TestCase): + def test_ipu_place(self): + num_devices = fluid.core.get_ipu_device_count() + self.assertGreater(num_devices, 0) + + for i in range(num_devices): + place = paddle.IPUPlace() + p = fluid.core.Place() + p.set_place(place) + self.assertTrue(p.is_ipu_place()) + + def test_ipu_set_device(self): + num_devices = fluid.core.get_ipu_device_count() + self.assertGreater(num_devices, 0) + + for i in range(num_devices): + paddle.set_device('ipu') + device = paddle.get_device() + self.assertTrue(device == "ipus:{{0-{}}}".format(num_devices - 1)) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_ipu_shard.py b/python/paddle/fluid/tests/unittests/ipu/test_ipu_shard.py new file mode 100644 index 00000000000..368556d8b2f --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_ipu_shard.py @@ -0,0 +1,70 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import numpy as np +import unittest +import sys +import paddle +import paddle.fluid as fluid + +paddle.enable_static() + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestIpuShard(unittest.TestCase): + def _test(self): + # build graph + a = paddle.static.data(name='data', shape=[None, 1], dtype='int32') + b = a + 2 # scale : scale * x + bias, ipu_index : no + + with paddle.fluid.ipu_shard(ipu_index=1): + c = b + 1 # scale, ipu_index : 1 + with paddle.fluid.ipu_shard(ipu_index=2): + d = c * 2 # scale, ipu_index : 2 + with paddle.fluid.ipu_shard(ipu_index=3): + e = d + 3 # scale, ipu_index : 3 + with paddle.fluid.ipu_shard(ipu_index=1): + e = e + 3 # scale, ipu_index : 1 + with paddle.fluid.ipu_shard(ipu_index=2): + e = e + 3 # scale, ipu_index : 2 + + with paddle.fluid.ipu_shard(ipu_index=1): + f = paddle.tensor.pow(e, 2.0) # pow, ipu_index : 1 + + with paddle.fluid.ipu_shard(ipu_index=2): + g = f - 1 # scale, ipu_index : 2 + + h = g + 1 # scale, ipu_index : no + + ipu_index_list = [] + main_prog = paddle.static.default_main_program() + for op in main_prog.global_block().ops: + if op.desc.has_attr("ipu_index"): + ipu_index_list.append(op.desc.attr("ipu_index")) + + return ipu_index_list + + def test_ipu_shard(self): + ipu_index_list = self._test() + expected_ipu_index_list = [1, 2, 3, 1, 2, 1, 2] + self.assertTrue( + np.allclose( + ipu_index_list, expected_ipu_index_list, atol=0)) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_ipu_strategy.py b/python/paddle/fluid/tests/unittests/ipu/test_ipu_strategy.py new file mode 100644 index 00000000000..741ca8784bb --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_ipu_strategy.py @@ -0,0 +1,57 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import numpy as np +import unittest +import sys +import paddle +import paddle.fluid as fluid +import paddle.fluid.compiler as compiler + +paddle.enable_static() +SEED = 2021 + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestConvNet(unittest.TestCase): + def test_training(self): + ipu_strategy = compiler.get_ipu_strategy() + + assert ipu_strategy.num_ipus == 1, "Default num_ipus must be 1" + assert ipu_strategy.is_training == True, "Default is_training is True" + assert ipu_strategy.enable_pipelining == False, \ + "Default enable_pipelining is False" + assert ipu_strategy.enable_manual_shard == False, \ + "Default enable_manual_shard is False" + + ipu_strategy.num_ipus = 2 + assert ipu_strategy.num_ipus == 2, "Set num_ipus Failed" + + ipu_strategy.is_training = False + assert ipu_strategy.is_training == False, "Set is_training Failed" + + ipu_strategy.enable_pipelining = True + assert ipu_strategy.enable_pipelining == True, \ + "Set enable_pipelining Failed" + + ipu_strategy.enable_manual_shard = True + assert ipu_strategy.enable_manual_shard == True, \ + "Set enable_manual_shard Failed" + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_layernorm_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_layernorm_op_ipu.py new file mode 100644 index 00000000000..043bc8ad362 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_layernorm_op_ipu.py @@ -0,0 +1,199 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.compiler as compiler +import paddle.optimizer +import paddle.static +from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, + np_dtype_to_fluid_str) + +paddle.enable_static() + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestBase(IPUOpTest): + def setUp(self): + self.set_atol() + self.set_training() + self.set_feed() + self.set_feed_attr() + self.set_attrs() + + def set_feed(self): + self.feed = { + "x": np.random.uniform(size=[1, 3, 10, 10]).astype('float32'), + } + + def set_feed_attr(self): + self.feed_shape = [x.shape for x in self.feed.values()] + self.feed_list = list(self.feed.keys()) + self.feed_dtype = [ + np_dtype_to_fluid_str(x.dtype) for x in self.feed.values() + ] + + def set_attrs(self): + self.attrs = { + "scale": True, + "shift": True, + "begin_norm_axis": 1, + "epsilon": 1e-05, + } + + def _test_base(self, run_ipu=True): + scope = fluid.core.Scope() + main_prog = paddle.static.Program() + startup_prog = paddle.static.Program() + SEED = self.SEED + main_prog.random_seed = SEED + startup_prog.random_seed = SEED + + with fluid.scope_guard(scope): + with paddle.static.program_guard(main_prog, startup_prog): + x = paddle.static.data( + name=self.feed_list[0], + shape=self.feed_shape[0], + dtype=self.feed_dtype[0]) + + if self.is_training: + ch = self.feed_shape[0][1] + conv1 = paddle.static.nn.conv2d( + x, num_filters=ch, filter_size=3, bias_attr=False) + scale = paddle.ParamAttr(trainable=True) + bias = paddle.ParamAttr(trainable=True) + out = paddle.fluid.layers.nn.layer_norm( + conv1, param_attr=scale, bias_attr=bias, **self.attrs) + else: + # scale = True + # bias = True + scale = self.attrs['scale'] + bias = self.attrs['shift'] + out = paddle.fluid.layers.nn.layer_norm( + x, param_attr=scale, bias_attr=bias, **self.attrs) + + if self.is_training: + loss = paddle.mean(out) + adam = paddle.optimizer.Adam(learning_rate=1e-2) + adam.minimize(loss) + fetch_list = [loss.name] + else: + fetch_list = [out.name] + + if run_ipu: + place = paddle.IPUPlace() + else: + place = paddle.CPUPlace() + exe = paddle.static.Executor(place) + exe.run(startup_prog) + + if run_ipu: + feed_list = self.feed_list + ipu_strategy = compiler.get_ipu_strategy() + ipu_strategy.is_training = self.is_training + program = compiler.IPUCompiledProgram( + main_prog, + ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) + else: + program = main_prog + + if self.is_training: + result = [] + for _ in range(self.epoch): + loss_res = exe.run(program, + feed=self.feed, + fetch_list=fetch_list) + result.append(loss_res[0]) + return np.array(result) + else: + result = exe.run(program, feed=self.feed, fetch_list=fetch_list) + return result[0] + + def test_base(self): + res0 = self._test_base(False) + res1 = self._test_base(True) + + self.assertTrue( + np.allclose( + res0.flatten(), res1.flatten(), atol=self.atol)) + + self.assertTrue(res0.shape == res1.shape) + + +@unittest.skip('raise error') +class TestCase1(TestBase): + def set_attrs(self): + self.attrs = { + "scale": False, + "shift": True, + "begin_norm_axis": 1, + "epsilon": 1e-05, + } + + +@unittest.skip('raise error') +class TestCase2(TestBase): + def set_attrs(self): + self.attrs = { + "scale": True, + "shift": False, + "begin_norm_axis": 1, + "epsilon": 1e-05, + } + + +class TestCase3(TestBase): + def set_attrs(self): + self.attrs = { + "scale": True, + "shift": True, + "begin_norm_axis": 2, + "epsilon": 1e-05, + } + + +class TestTrainCase1(TestBase): + def set_atol(self): + self.atol = 1e-3 + + def set_training(self): + self.is_training = True + self.epoch = 10 + + +class TestTrainCase2(TestBase): + def set_atol(self): + self.atol = 1e-3 + + def set_attrs(self): + self.attrs = { + "scale": True, + "shift": True, + "begin_norm_axis": 2, + "epsilon": 1e-05, + } + + def set_training(self): + self.is_training = True + self.epoch = 10 + + +# not support `layer_norm(x, param_attr=False, bias_attr=False, **self.attrs)` + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_log_softmax_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_log_softmax_op_ipu.py new file mode 100644 index 00000000000..6f85c4f381e --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_log_softmax_op_ipu.py @@ -0,0 +1,110 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.compiler as compiler +import paddle.optimizer +import paddle.static +from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, + np_dtype_to_fluid_str) +import paddle.nn.functional as F + +paddle.enable_static() + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestBase(IPUOpTest): + def setUp(self): + self.set_atol() + self.set_training() + self.set_feed() + self.set_feed_attr() + self.set_attrs() + + def set_feed(self): + self.feed = { + "x": np.random.uniform(size=[1, 3, 10, 10]).astype('float32') + } + + def set_feed_attr(self): + self.feed_shape = [x.shape for x in self.feed.values()] + self.feed_list = list(self.feed.keys()) + self.feed_dtype = [ + np_dtype_to_fluid_str(x.dtype) for x in self.feed.values() + ] + + def set_attrs(self): + self.attrs = {"axis": -1} + + def _test_base(self, run_ipu=True): + scope = fluid.core.Scope() + main_prog = paddle.static.Program() + startup_prog = paddle.static.Program() + SEED = self.SEED + main_prog.random_seed = SEED + startup_prog.random_seed = SEED + + with fluid.scope_guard(scope): + with paddle.static.program_guard(main_prog, startup_prog): + x = paddle.static.data( + name=self.feed_list[0], + shape=self.feed_shape[0], + dtype=self.feed_dtype[0]) + out = F.log_softmax(x, **self.attrs) + + fetch_list = [out.name] + + if run_ipu: + place = paddle.IPUPlace() + else: + place = paddle.CPUPlace() + exe = paddle.static.Executor(place) + exe.run(startup_prog) + + if run_ipu: + feed_list = self.feed_list + ipu_strategy = compiler.get_ipu_strategy() + ipu_strategy.is_training = self.is_training + program = compiler.IPUCompiledProgram( + main_prog, + ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) + else: + program = main_prog + + result = exe.run(program, feed=self.feed, fetch_list=fetch_list) + return result[0] + + def test_base(self): + res0 = self._test_base(False) + res1 = self._test_base(True) + + self.assertTrue( + np.allclose( + res0.flatten(), res1.flatten(), atol=self.atol)) + + self.assertTrue(res0.shape == res1.shape) + + +class TestCase1(TestBase): + def set_attrs(self): + self.attrs = {"axis": 1} + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_lookuptable_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_lookuptable_op_ipu.py new file mode 100644 index 00000000000..2443541c799 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_lookuptable_op_ipu.py @@ -0,0 +1,137 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.compiler as compiler +import paddle.optimizer +import paddle.static +from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, + np_dtype_to_fluid_str) + +paddle.enable_static() + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestBase(IPUOpTest): + def setUp(self): + self.set_atol() + self.set_training() + self.set_attrs() + + def set_feed_attr(self): + self.feed_shape = [x.shape for x in self.feed.values()] + self.feed_list = list(self.feed.keys()) + self.feed_dtype = [ + np_dtype_to_fluid_str(x.dtype) for x in self.feed.values() + ] + + def set_attrs(self): + self.attrs = { + "size": [128, 16], + "is_sparse": False, + "is_distributed": False, + "padding_idx": -1, + "dtype": 'float32' + } + + def _test_base(self, run_ipu=True): + scope = fluid.core.Scope() + main_prog = paddle.static.Program() + startup_prog = paddle.static.Program() + SEED = self.SEED + main_prog.random_seed = SEED + startup_prog.random_seed = SEED + + if run_ipu: + self.feed = { + "x": np.array( + [[[1], [3]], [[2], [4]], [[4], [127]]]).astype(np.int32) + } + else: + self.feed = { + "x": np.array( + [[[1], [3]], [[2], [4]], [[4], [127]]]).astype(np.int64) + } + + self.set_feed_attr() + + with fluid.scope_guard(scope): + with paddle.static.program_guard(main_prog, startup_prog): + x = paddle.static.data( + name=self.feed_list[0], + shape=self.feed_shape[0], + dtype=self.feed_dtype[0]) + out = paddle.fluid.layers.embedding(x, **self.attrs) + + if self.is_training: + loss = paddle.mean(out) + adam = paddle.optimizer.Adam(learning_rate=1e-2) + adam.minimize(loss) + fetch_list = [loss.name] + else: + fetch_list = [out.name] + + if run_ipu: + place = paddle.IPUPlace() + else: + place = paddle.CPUPlace() + exe = paddle.static.Executor(place) + exe.run(startup_prog) + + if run_ipu: + feed_list = self.feed_list + ipu_strategy = compiler.get_ipu_strategy() + ipu_strategy.is_training = self.is_training + program = compiler.IPUCompiledProgram( + main_prog, + ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) + else: + program = main_prog + + if self.is_training: + result = [] + for _ in range(self.epoch): + loss_res = exe.run(program, + feed=self.feed, + fetch_list=fetch_list) + result.append(loss_res[0]) + return np.array(result) + else: + result = exe.run(program, feed=self.feed, fetch_list=fetch_list) + return result[0] + + def test_base(self): + res0 = self._test_base(False) + res1 = self._test_base(True) + + self.assertTrue( + np.allclose( + res0.flatten(), res1.flatten(), atol=self.atol)) + + self.assertTrue(res0.shape == res1.shape) + + +class TestTrainCase1(TestBase): + def set_training(self): + self.is_training = True + self.epoch = 10 + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_lr_sheduelr.py b/python/paddle/fluid/tests/unittests/ipu/test_lr_sheduelr.py new file mode 100644 index 00000000000..0aac2344b3c --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_lr_sheduelr.py @@ -0,0 +1,101 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import numpy as np +import unittest +import sys +import paddle +import paddle.fluid as fluid +import paddle.fluid.compiler as compiler +from paddle.optimizer.lr import LRScheduler + +paddle.enable_static() +SEED = 2021 + + +class LR_New(LRScheduler): + def __init__(self, learning_rate=1.0, last_epoch=-1, verbose=False): + super(LR_New, self).__init__(learning_rate, last_epoch, verbose) + + def get_lr(self): + self.base_lr = self.base_lr + 1 + self.last_epoch = self.last_epoch + 1 + return self.base_lr + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestConvNet(unittest.TestCase): + def _test(self, run_ipu=True): + scope = fluid.core.Scope() + main_prog = paddle.static.Program() + startup_prog = paddle.static.Program() + main_prog.random_seed = SEED + startup_prog.random_seed = SEED + np.random.seed(SEED) + + np_image = np.random.rand(1, 3, 10, 10).astype(np.float32) + + with fluid.scope_guard(scope): + with paddle.static.program_guard(main_prog, startup_prog): + image = paddle.static.data( + name='image', shape=[1, 3, 10, 10], dtype='float32') + conv1 = paddle.static.nn.conv2d( + image, num_filters=3, filter_size=3, bias_attr=False) + loss = paddle.mean(conv1) + + sgd = paddle.optimizer.SGD(learning_rate=LR_New()) + sgd.minimize(loss) + + if run_ipu: + place = paddle.IPUPlace() + else: + place = paddle.CPUPlace() + exe = paddle.static.Executor(place) + exe.run(startup_prog) + + if run_ipu: + feed_list = [image.name] + fetch_list = [loss.name] + ipu_strategy = compiler.get_ipu_strategy() + ipu_strategy.is_training = True + program = compiler.IPUCompiledProgram( + main_prog, ipu_strategy=ipu_strategy).compile(feed_list, + fetch_list) + else: + program = main_prog + + result = [] + for epoch in range(100): + if hasattr(program, "lr_sheduler"): + program.lr_sheduler.step() + loss_res = exe.run(program, + feed={image.name: np_image}, + fetch_list=[loss]) + result.append(loss_res) + + return np.array(result) + + def test_training(self): + # cpu and ipu dimenstion mismatch, cpu:(100, 1, 1), ipu:(100, 1) + ipu_loss = self._test(True).flatten() + cpu_loss = self._test(False).flatten() + + self.assertTrue(np.allclose(ipu_loss, cpu_loss, atol=1e-4)) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_matmul_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_matmul_op_ipu.py new file mode 100644 index 00000000000..7133a76c607 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_matmul_op_ipu.py @@ -0,0 +1,217 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.compiler as compiler +import paddle.optimizer +import paddle.static +from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, + np_dtype_to_fluid_str) + +paddle.enable_static() + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestBase(IPUOpTest): + def setUp(self): + self.set_atol() + self.set_training() + self.set_feed() + self.set_feed_attr() + self.set_attrs() + + def set_feed(self): + self.feed = { + "x": np.random.uniform(size=[2, 3]).astype('float32'), + "y": np.random.uniform(size=[3, 2]).astype('float32'), + } + + def set_feed_attr(self): + self.feed_shape = [x.shape for x in self.feed.values()] + self.feed_list = list(self.feed.keys()) + self.feed_dtype = [ + np_dtype_to_fluid_str(x.dtype) for x in self.feed.values() + ] + + def set_attrs(self): + self.attrs = { + "transpose_x": False, + "transpose_y": False, + "alpha": 1.0, + } + + def _test_base(self, run_ipu=True): + scope = fluid.core.Scope() + main_prog = paddle.static.Program() + startup_prog = paddle.static.Program() + SEED = self.SEED + main_prog.random_seed = SEED + startup_prog.random_seed = SEED + + with fluid.scope_guard(scope): + with paddle.static.program_guard(main_prog, startup_prog): + x = paddle.static.data( + name=self.feed_list[0], + shape=self.feed_shape[0], + dtype=self.feed_dtype[0]) + y = paddle.static.data( + name=self.feed_list[1], + shape=self.feed_shape[1], + dtype=self.feed_dtype[1]) + out = paddle.fluid.layers.matmul(x, y, **self.attrs) + + fetch_list = [out.name] + + if run_ipu: + place = paddle.IPUPlace() + else: + place = paddle.CPUPlace() + exe = paddle.static.Executor(place) + exe.run(startup_prog) + + if run_ipu: + feed_list = self.feed_list + ipu_strategy = compiler.get_ipu_strategy() + ipu_strategy.is_training = self.is_training + program = compiler.IPUCompiledProgram( + main_prog, + ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) + else: + program = main_prog + + result = exe.run(program, feed=self.feed, fetch_list=fetch_list) + return result[0] + + def test_base(self): + res0 = self._test_base(False) + res1 = self._test_base(True) + + self.assertTrue( + np.allclose( + res0.flatten(), res1.flatten(), atol=self.atol)) + + self.assertTrue(res0.shape == res1.shape) + + +class TestCase1(TestBase): + def set_attrs(self): + self.attrs = { + "transpose_x": True, + "transpose_y": True, + "alpha": 1.0, + } + + +class TestCase2(TestBase): + def set_attrs(self): + self.attrs = { + "transpose_x": True, + "transpose_y": True, + "alpha": 3.14, + } + + +class TestCase3(TestBase): + def set_feed(self): + self.feed = { + "x": np.random.uniform(size=[5, 4, 2, 3]).astype('float32'), + "y": np.random.uniform(size=[5, 4, 3, 2]).astype('float32'), + } + + +class TestCase4(TestBase): + def set_feed(self): + self.feed = { + "x": np.random.uniform(size=[4, 2, 3]).astype('float32'), + "y": np.random.uniform(size=[4, 3, 2]).astype('float32'), + } + + +class TestCase5(TestBase): + def set_feed(self): + self.feed = { + "x": np.random.uniform(size=[4, 2, 3]).astype('float32'), + "y": np.random.uniform(size=[3, 2]).astype('float32'), + } + + +class TestCase6(TestBase): + def set_feed(self): + self.feed = { + "x": np.random.uniform(size=[3]).astype('float32'), + "y": np.random.uniform(size=[3]).astype('float32'), + } + + +@unittest.skip("not supported") +class TestCase6_2(TestCase6): + def set_feed(self): + self.feed = { + "x": np.random.uniform(size=[3]).astype('float32'), + "y": np.random.uniform(size=[3]).astype('float32'), + } + + def set_attrs(self): + self.attrs = { + "transpose_x": True, + "transpose_y": True, + "alpha": 1.0, + } + + +class TestCase7(TestBase): + def set_feed(self): + self.feed = { + "x": np.random.uniform(size=[3, 1]).astype('float32'), + "y": np.random.uniform(size=[1, 2]).astype('float32'), + } + + +@unittest.skip("not supported") +class TestCase7_2(TestBase): + def set_feed(self): + self.feed = { + "x": np.random.uniform(size=[3]).astype('float32'), + "y": np.random.uniform(size=[2]).astype('float32'), + } + # equal to + # self.feed = { + # "x": np.random.uniform(size=[3, 1]).astype('float32'), + # "y": np.random.uniform(size=[1, 2]).astype('float32'), + # } + + def set_attrs(self): + self.attrs = { + "transpose_x": True, + "transpose_y": True, + "alpha": 1.0, + } + + +@unittest.skip("dim > 4 is not supported") +class TestCase8(TestBase): + def set_feed(self): + self.feed = { + "x": np.random.uniform(size=[6, 5, 4, 2, 3]).astype('float32'), + "y": np.random.uniform(size=[6, 5, 4, 3, 2]).astype('float32'), + } + + +if __name__ == "__main__": + unittest.main() -- GitLab