diff --git a/python/paddle/fluid/tests/unittests/ipu/test_mean_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_mean_op_ipu.py new file mode 100644 index 0000000000000000000000000000000000000000..87c67fb72c3df8a1b5f171465f30146f1f8fbab8 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_mean_op_ipu.py @@ -0,0 +1,128 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.compiler as compiler +import paddle.optimizer +import paddle.static +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest + +paddle.enable_static() + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestBase(IPUOpTest): + def setUp(self): + self.set_atol() + self.set_training() + self.set_feed() + self.set_attrs() + + def set_feed(self): + self.feed_shape = [] + self.feed_shape.append([1, 3, 10, 10]) + + self.feed = {} + self.feed["in_0"] = np.random.uniform( + size=self.feed_shape[0]).astype(np.float32) + + self.feed_list = list(self.feed.keys()) + + def set_attrs(self): + self.attrs = {} + self.attrs['axis'] = None + self.attrs['keepdim'] = False + + def _test_base(self, run_ipu=True): + scope = fluid.core.Scope() + main_prog = paddle.static.Program() + startup_prog = paddle.static.Program() + SEED = self.SEED + main_prog.random_seed = SEED + startup_prog.random_seed = SEED + + with fluid.scope_guard(scope): + with paddle.static.program_guard(main_prog, startup_prog): + x = paddle.static.data( + name=self.feed_list[0], + shape=self.feed_shape[0], + dtype='float32') + out = paddle.mean(x, **self.attrs) + + fetch_list = [out.name] + + if run_ipu: + place = paddle.IPUPlace() + else: + place = paddle.CPUPlace() + exe = paddle.static.Executor(place) + exe.run(startup_prog) + + if run_ipu: + feed_list = self.feed_list + ipu_strategy = compiler.get_ipu_strategy() + ipu_strategy.is_training = self.is_training + program = compiler.IPUCompiledProgram( + main_prog, + ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) + else: + program = main_prog + + result = exe.run(program, feed=self.feed, fetch_list=fetch_list) + return result[0] + + def test_base(self): + res0 = self._test_base(True) + res1 = self._test_base(False) + + self.assertTrue( + np.allclose( + res0.flatten(), res1.flatten(), atol=self.atol)) + + +class TestCase1(TestBase): + def set_attrs(self): + self.attrs = {} + self.attrs['axis'] = 1 + self.attrs['keepdim'] = False + + +class TestCase2(TestBase): + def set_attrs(self): + self.attrs = {} + self.attrs['axis'] = 2 + self.attrs['keepdim'] = False + + +class TestCase3(TestBase): + def set_attrs(self): + self.attrs = {} + self.attrs['axis'] = 2 + self.attrs['keepdim'] = True + + +class TestCase4(TestBase): + def set_attrs(self): + self.attrs = {} + self.attrs['axis'] = None + self.attrs['keepdim'] = True + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_mul_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_mul_op_ipu.py new file mode 100644 index 0000000000000000000000000000000000000000..678276eba317862b3e9385badddf04c9e57c2e61 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_mul_op_ipu.py @@ -0,0 +1,140 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.compiler as compiler +import paddle.optimizer +import paddle.static +from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, + np_dtype_to_fluid_str) + +paddle.enable_static() + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestBase(IPUOpTest): + def setUp(self): + self.set_atol() + self.set_training() + self.set_feed() + self.set_feed_attr() + self.set_attrs() + + def set_feed(self): + self.feed = { + "x": np.random.uniform(size=[2, 5]).astype('float32'), + "y": np.random.uniform(size=[5, 3]).astype('float32'), + } + + def set_feed_attr(self): + self.feed_shape = [x.shape for x in self.feed.values()] + self.feed_list = list(self.feed.keys()) + self.feed_dtype = [ + np_dtype_to_fluid_str(x.dtype) for x in self.feed.values() + ] + + def set_attrs(self): + self.attrs = { + "x_num_col_dims": 1, + "y_num_col_dims": 1, + } + + def _test_base(self, run_ipu=True): + scope = fluid.core.Scope() + main_prog = paddle.static.Program() + startup_prog = paddle.static.Program() + SEED = self.SEED + main_prog.random_seed = SEED + startup_prog.random_seed = SEED + + with fluid.scope_guard(scope): + with paddle.static.program_guard(main_prog, startup_prog): + x = paddle.static.data( + name=self.feed_list[0], + shape=self.feed_shape[0], + dtype=self.feed_dtype[0]) + y = paddle.static.data( + name=self.feed_list[1], + shape=self.feed_shape[1], + dtype=self.feed_dtype[1]) + out = paddle.fluid.layers.mul(x, y, **self.attrs) + + fetch_list = [out.name] + + if run_ipu: + place = paddle.IPUPlace() + else: + place = paddle.CPUPlace() + exe = paddle.static.Executor(place) + exe.run(startup_prog) + + if run_ipu: + feed_list = self.feed_list + ipu_strategy = compiler.get_ipu_strategy() + ipu_strategy.is_training = self.is_training + program = compiler.IPUCompiledProgram( + main_prog, + ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) + else: + program = main_prog + + result = exe.run(program, feed=self.feed, fetch_list=fetch_list) + return result[0] + + def test_base(self): + res0 = self._test_base(False) + res1 = self._test_base(True) + + self.assertTrue( + np.allclose( + res0.flatten(), res1.flatten(), atol=self.atol)) + + self.assertTrue(res0.shape == res1.shape) + + +class TestCase1(TestBase): + def set_feed(self): + self.feed = { + "x": np.random.uniform(size=[1, 2, 5]).astype('float32'), + "y": np.random.uniform(size=[5, 3]).astype('float32'), + } + + def set_attrs(self): + self.attrs = { + "x_num_col_dims": 2, + "y_num_col_dims": 1, + } + + +class TestCase2(TestBase): + def set_feed(self): + self.feed = { + "x": np.random.uniform(size=[3, 4, 2, 9]).astype('float32'), + "y": np.random.uniform(size=[3, 6, 1, 2, 3]).astype('float32'), + } + + def set_attrs(self): + self.attrs = { + 'x_num_col_dims': 2, + 'y_num_col_dims': 2, + } + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_pool_avg_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_pool_avg_op_ipu.py new file mode 100644 index 0000000000000000000000000000000000000000..cb9967831feee363b75a3a1fccc7ec37754dffdd --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_pool_avg_op_ipu.py @@ -0,0 +1,182 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.compiler as compiler +import paddle.optimizer +import paddle.static +from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, + np_dtype_to_fluid_str) + +paddle.enable_static() + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestBase(IPUOpTest): + def setUp(self): + self.set_atol() + self.set_training() + self.set_feed() + self.set_feed_attr() + self.set_attrs() + + def set_feed(self): + self.feed = { + "x": np.random.uniform(size=[1, 3, 10, 10]).astype('float32'), + } + + def set_feed_attr(self): + self.feed_shape = [x.shape for x in self.feed.values()] + self.feed_list = list(self.feed.keys()) + self.feed_dtype = [ + np_dtype_to_fluid_str(x.dtype) for x in self.feed.values() + ] + + def set_attrs(self): + self.attrs = { + "pool_size": 3, + "pool_type": 'avg', + "pool_stride": 1, + "pool_padding": 0, + "global_pooling": False, + "ceil_mode": False, + "exclusive": True, + "data_format": 'NCHW', + } + + def _test_base(self, run_ipu=True): + scope = fluid.core.Scope() + main_prog = paddle.static.Program() + startup_prog = paddle.static.Program() + SEED = self.SEED + main_prog.random_seed = SEED + startup_prog.random_seed = SEED + + with fluid.scope_guard(scope): + with paddle.static.program_guard(main_prog, startup_prog): + x = paddle.static.data( + name=self.feed_list[0], + shape=self.feed_shape[0], + dtype=self.feed_dtype[0]) + out = paddle.fluid.layers.pool2d(x, **self.attrs) + + fetch_list = [out.name] + + if run_ipu: + place = paddle.IPUPlace() + else: + place = paddle.CPUPlace() + exe = paddle.static.Executor(place) + exe.run(startup_prog) + + if run_ipu: + feed_list = self.feed_list + ipu_strategy = compiler.get_ipu_strategy() + ipu_strategy.is_training = self.is_training + program = compiler.IPUCompiledProgram( + main_prog, + ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) + else: + program = main_prog + + result = exe.run(program, feed=self.feed, fetch_list=fetch_list) + return result[0] + + def test_base(self): + res0 = self._test_base(False) + res1 = self._test_base(True) + + self.assertTrue( + np.allclose( + res0.flatten(), res1.flatten(), atol=self.atol)) + + self.assertTrue(res0.shape == res1.shape) + + +class TestCase1(TestBase): + def set_attrs(self): + super().set_attrs() + self.attrs['pool_size'] = 3 + + +class TestCase1_2(TestBase): + def set_attrs(self): + super().set_attrs() + self.attrs['pool_size'] = [3, 1] + + +class TestCase2(TestBase): + def set_attrs(self): + super().set_attrs() + self.attrs['pool_stride'] = 2 + + +class TestCase2_2(TestBase): + def set_attrs(self): + super().set_attrs() + self.attrs['pool_stride'] = [2, 1] + + +class TestCase3(TestBase): + def set_attrs(self): + super().set_attrs() + self.attrs['pool_padding'] = [1, 1] + + +class TestCase3_2(TestBase): + def set_attrs(self): + super().set_attrs() + self.attrs['pool_padding'] = [1, 1, 2, 2] + + +@unittest.skip('the results has a positional offset') +class TestCase3_3(TestBase): + def set_attrs(self): + super().set_attrs() + self.attrs['pool_padding'] = [1, 2, 1, 1] + + +@unittest.skip('paddle output has nan') +class TestCase3_4(TestBase): + def set_attrs(self): + super().set_attrs() + self.attrs['pool_size'] = 1 + self.attrs['pool_padding'] = 1 + + +class TestCase4(TestBase): + def set_attrs(self): + super().set_attrs() + self.attrs['global_pooling'] = True + + +class TestCase5(TestBase): + def set_attrs(self): + super().set_attrs() + self.attrs['ceil_mode'] = True + + +class TestCase6(TestBase): + def set_attrs(self): + super().set_attrs() + self.attrs['exclusive'] = False + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_pool_max_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_pool_max_op_ipu.py new file mode 100644 index 0000000000000000000000000000000000000000..fffac1218576bf747548be6016e2bdb340e82b30 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_pool_max_op_ipu.py @@ -0,0 +1,181 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.compiler as compiler +import paddle.optimizer +import paddle.static +from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, + np_dtype_to_fluid_str) + +paddle.enable_static() + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestBase(IPUOpTest): + def setUp(self): + self.set_atol() + self.set_training() + self.set_feed() + self.set_feed_attr() + self.set_attrs() + + def set_feed(self): + self.feed = { + "x": np.random.uniform(size=[1, 3, 10, 10]).astype('float32'), + } + + def set_feed_attr(self): + self.feed_shape = [x.shape for x in self.feed.values()] + self.feed_list = list(self.feed.keys()) + self.feed_dtype = [ + np_dtype_to_fluid_str(x.dtype) for x in self.feed.values() + ] + + def set_attrs(self): + self.attrs = { + "pool_size": 3, + "pool_type": 'max', + "pool_stride": 1, + "pool_padding": 0, + "global_pooling": False, + "ceil_mode": False, + "exclusive": True, + "data_format": 'NCHW', + } + + def _test_base(self, run_ipu=True): + scope = fluid.core.Scope() + main_prog = paddle.static.Program() + startup_prog = paddle.static.Program() + SEED = self.SEED + main_prog.random_seed = SEED + startup_prog.random_seed = SEED + + with fluid.scope_guard(scope): + with paddle.static.program_guard(main_prog, startup_prog): + x = paddle.static.data( + name=self.feed_list[0], + shape=self.feed_shape[0], + dtype=self.feed_dtype[0]) + out = paddle.fluid.layers.pool2d(x, **self.attrs) + + fetch_list = [out.name] + + if run_ipu: + place = paddle.IPUPlace() + else: + place = paddle.CPUPlace() + exe = paddle.static.Executor(place) + exe.run(startup_prog) + + if run_ipu: + feed_list = self.feed_list + ipu_strategy = compiler.get_ipu_strategy() + ipu_strategy.is_training = self.is_training + program = compiler.IPUCompiledProgram( + main_prog, + ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) + else: + program = main_prog + + result = exe.run(program, feed=self.feed, fetch_list=fetch_list) + return result[0] + + def test_base(self): + res0 = self._test_base(False) + res1 = self._test_base(True) + + self.assertTrue( + np.allclose( + res0.flatten(), res1.flatten(), atol=self.atol)) + + self.assertTrue(res0.shape == res1.shape) + + +class TestCase1(TestBase): + def set_attrs(self): + super().set_attrs() + self.attrs['pool_size'] = 3 + + +class TestCase1_2(TestBase): + def set_attrs(self): + super().set_attrs() + self.attrs['pool_size'] = [3, 1] + + +class TestCase2(TestBase): + def set_attrs(self): + super().set_attrs() + self.attrs['pool_stride'] = 2 + + +class TestCase2_2(TestBase): + def set_attrs(self): + super().set_attrs() + self.attrs['pool_stride'] = [2, 1] + + +class TestCase3(TestBase): + def set_attrs(self): + super().set_attrs() + self.attrs['pool_padding'] = [1, 1] + + +class TestCase3_2(TestBase): + def set_attrs(self): + super().set_attrs() + self.attrs['pool_padding'] = [1, 1, 2, 2] + + +@unittest.skip('auto_pad is not currently supported') +class TestCase3_3(TestBase): + def set_attrs(self): + super().set_attrs() + self.attrs['pool_padding'] = 'VALID' + + +@unittest.skip('auto_pad is not currently supported') +class TestCase3_4(TestBase): + def set_attrs(self): + super().set_attrs() + self.attrs['pool_padding'] = 'SAME' + + +class TestCase4(TestBase): + def set_attrs(self): + super().set_attrs() + self.attrs['global_pooling'] = True + + +class TestCase5(TestBase): + def set_attrs(self): + super().set_attrs() + self.attrs['ceil_mode'] = True + + +class TestCase6(TestBase): + def set_attrs(self): + super().set_attrs() + self.attrs['exclusive'] = False + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_pow_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_pow_op_ipu.py new file mode 100644 index 0000000000000000000000000000000000000000..8ede44d7f92297745bd4c621adcf0d3846afff59 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_pow_op_ipu.py @@ -0,0 +1,157 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.compiler as compiler +import paddle.optimizer +import paddle.static +from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, + np_dtype_to_fluid_str) + +paddle.enable_static() + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestBase(IPUOpTest): + def setUp(self): + self.set_atol() + self.set_training() + self.set_feed() + self.set_feed_attr() + self.set_attrs() + + def set_feed(self): + self.feed = { + "x": np.random.uniform(size=[1, 3, 2, 2]).astype('float32'), + } + + def set_feed_attr(self): + self.feed_shape = [x.shape for x in self.feed.values()] + self.feed_list = list(self.feed.keys()) + self.feed_dtype = [ + np_dtype_to_fluid_str(x.dtype) for x in self.feed.values() + ] + + def set_attrs(self): + self.attrs = {"factor": 2.0} + + def _test_base(self, run_ipu=True): + scope = fluid.core.Scope() + main_prog = paddle.static.Program() + startup_prog = paddle.static.Program() + SEED = self.SEED + main_prog.random_seed = SEED + startup_prog.random_seed = SEED + + with fluid.scope_guard(scope): + with paddle.static.program_guard(main_prog, startup_prog): + x = paddle.static.data( + name=self.feed_list[0], + shape=self.feed_shape[0], + dtype=self.feed_dtype[0]) + out = paddle.fluid.layers.pow(x, **self.attrs) + + fetch_list = [out.name] + + if run_ipu: + place = paddle.IPUPlace() + else: + place = paddle.CPUPlace() + exe = paddle.static.Executor(place) + exe.run(startup_prog) + + if run_ipu: + feed_list = self.feed_list + ipu_strategy = compiler.get_ipu_strategy() + ipu_strategy.is_training = self.is_training + program = compiler.IPUCompiledProgram( + main_prog, + ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) + else: + program = main_prog + + result = exe.run(program, feed=self.feed, fetch_list=fetch_list) + return result[0] + + def test_base(self): + res0 = self._test_base(False) + res1 = self._test_base(True) + + self.assertTrue( + np.allclose( + res0.flatten(), res1.flatten(), atol=self.atol)) + + self.assertTrue(res0.shape == res1.shape) + + +class TestCase1(TestBase): + def set_feed(self): + self.feed = { + "x": np.random.uniform(size=[1, 3, 2, 2]).astype('float32'), + "y": np.array([2.0]).astype('float32'), + } + + def set_attrs(self): + self.attrs = {} + + def _test_base(self, run_ipu=True): + scope = fluid.core.Scope() + main_prog = paddle.static.Program() + startup_prog = paddle.static.Program() + SEED = self.SEED + main_prog.random_seed = SEED + startup_prog.random_seed = SEED + + with fluid.scope_guard(scope): + with paddle.static.program_guard(main_prog, startup_prog): + x = paddle.static.data( + name=self.feed_list[0], + shape=self.feed_shape[0], + dtype=self.feed_dtype[0]) + factor = paddle.static.data( + name=self.feed_list[1], + shape=self.feed_shape[1], + dtype=self.feed_dtype[1]) + out = paddle.fluid.layers.pow(x, factor=factor, **self.attrs) + + fetch_list = [out.name] + + if run_ipu: + place = paddle.IPUPlace() + else: + place = paddle.CPUPlace() + exe = paddle.static.Executor(place) + exe.run(startup_prog) + + if run_ipu: + feed_list = self.feed_list + ipu_strategy = compiler.get_ipu_strategy() + ipu_strategy.is_training = self.is_training + program = compiler.IPUCompiledProgram( + main_prog, + ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) + else: + program = main_prog + + result = exe.run(program, feed=self.feed, fetch_list=fetch_list) + return result[0] + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_reduce_x_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_reduce_x_op_ipu.py new file mode 100644 index 0000000000000000000000000000000000000000..f8c96684afe8f3c8222fcc4ec6f9c179a73158fd --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_reduce_x_op_ipu.py @@ -0,0 +1,181 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.compiler as compiler +import paddle.optimizer +import paddle.static +from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, + np_dtype_to_fluid_str) + +paddle.enable_static() + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestMean(IPUOpTest): + def setUp(self): + self.set_atol() + self.set_training() + self.init_op() + + def init_op(self): + self.op = paddle.fluid.layers.reduce_mean + + def set_feed_attr(self): + self.feed_shape = [x.shape for x in self.feed.values()] + self.feed_list = list(self.feed.keys()) + self.feed_dtype = [ + np_dtype_to_fluid_str(x.dtype) for x in self.feed.values() + ] + + def _test_base(self, run_ipu=True): + scope = fluid.core.Scope() + main_prog = paddle.static.Program() + startup_prog = paddle.static.Program() + SEED = self.SEED + main_prog.random_seed = SEED + startup_prog.random_seed = SEED + + with fluid.scope_guard(scope): + with paddle.static.program_guard(main_prog, startup_prog): + x = paddle.static.data( + name=self.feed_list[0], + shape=self.feed_shape[0], + dtype='float32') + out = self.op(x, **self.attrs) + + fetch_list = [out.name] + + if run_ipu: + place = paddle.IPUPlace() + else: + place = paddle.CPUPlace() + exe = paddle.static.Executor(place) + exe.run(startup_prog) + + if run_ipu: + feed_list = self.feed_list + ipu_strategy = compiler.get_ipu_strategy() + ipu_strategy.is_training = self.is_training + program = compiler.IPUCompiledProgram( + main_prog, + ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) + else: + program = main_prog + + result = exe.run(program, feed=self.feed, fetch_list=fetch_list) + return result[0] + + def run_test_base(self): + res0 = self._test_base(True) + res1 = self._test_base(False) + + self.assertTrue( + np.allclose( + res0.flatten(), res1.flatten(), atol=self.atol)) + + def set_feed0(self): + self.feed = {} + self.feed["in_0"] = np.random.uniform(size=[2, 4]).astype(np.float32) + self.set_feed_attr() + + def set_feed1(self): + self.feed = {} + self.feed["in_0"] = np.random.uniform(size=[2, 2, 2]).astype(np.float32) + self.set_feed_attr() + + def set_attr0(self): + self.attrs = {} + self.attrs['dim'] = None + self.attrs['keep_dim'] = False + + def test_case0(self): + self.set_feed0() + self.set_attr0() + self.run_test_base() + + def test_case1(self): + self.set_feed0() + self.set_attr0() + self.attrs['dim'] = 0 + self.run_test_base() + + def test_case2(self): + self.set_feed0() + self.set_attr0() + self.attrs['dim'] = -1 + self.run_test_base() + + def test_case3(self): + self.set_feed0() + self.set_attr0() + self.attrs['dim'] = 1 + self.run_test_base() + + def test_case4(self): + self.set_feed0() + self.attrs = {} + self.attrs['dim'] = 1 + self.attrs['keep_dim'] = True + self.run_test_base() + + def test_case5(self): + self.set_feed1() + self.attrs = {} + self.attrs['dim'] = [1, 2] + self.attrs['keep_dim'] = False + self.run_test_base() + + def test_case6(self): + self.set_feed1() + self.attrs = {} + self.attrs['dim'] = [0, 1] + self.attrs['keep_dim'] = False + self.run_test_base() + + def test_case7(self): + self.set_feed1() + self.attrs = {} + self.attrs['dim'] = [0, 1] + self.attrs['keep_dim'] = True + self.run_test_base() + + +class TestMax(TestMean): + def init_op(self): + self.op = paddle.fluid.layers.reduce_max + + +class TestMin(TestMean): + def init_op(self): + self.op = paddle.fluid.layers.reduce_min + + +class TestProd(TestMean): + def init_op(self): + self.op = paddle.fluid.layers.reduce_prod + + +class TestSum(TestMean): + def init_op(self): + self.op = paddle.fluid.layers.reduce_sum + + +if __name__ == "__main__": + unittest.main()