diff --git a/python/paddle/fluid/compiler.py b/python/paddle/fluid/compiler.py index 11ccd476b1b5918f94c35b9461f4728f2835932c..1f81afbed64d79421feba58b45d252a64272a3af 100644 --- a/python/paddle/fluid/compiler.py +++ b/python/paddle/fluid/compiler.py @@ -635,7 +635,7 @@ class IpuDynamicPatcher(object): if not isinstance(item, CacheKey): raise ValueError( 'type(item) should be CacheKey, but received %s' % - type_name(item)) + type(item).__name__) item_id = hash(item) self._recent_key = item_id if item_id not in self._caches or ipu_strategy.need_compile: diff --git a/python/paddle/fluid/tests/unittests/ipu/op_test_ipu.py b/python/paddle/fluid/tests/unittests/ipu/op_test_ipu.py index 5f2a0d59bb8be9c80520bb7d5f5627ce5004bfa5..becaaa4173ae7c5b4c3d8356278cde02d90d2e90 100644 --- a/python/paddle/fluid/tests/unittests/ipu/op_test_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/op_test_ipu.py @@ -216,42 +216,54 @@ class IPUOpTest(IPUTest): raise ValueError("output_dict is empty") cpu_fp32 = output_dict[ExecutionMode.CPU_FP32] ipu_fp32 = output_dict[ExecutionMode.IPU_FP32] - cpu_fp32 = np.asarray(cpu_fp32).astype(np.float32).flatten() - ipu_fp32 = np.asarray(ipu_fp32).astype(np.float32).flatten() - pass_check = np.allclose(ipu_fp32, - cpu_fp32, - rtol=self.rtol, - atol=self.atol) - if not pass_check: - max_atol = np.abs(ipu_fp32 - cpu_fp32).max() - cpu_fp32_abs = np.abs(cpu_fp32) - cpu_fp32_abs[cpu_fp32_abs == 0.0] = 1e-20 - max_rtol = (np.abs(ipu_fp32 - cpu_fp32) / cpu_fp32_abs).max() - raise AssertionError( - f"ipu_fp32 check failed. max_atol is {max_atol}, max_rtol is {max_rtol}" - ) - - if check_shape: - self.assertTrue(cpu_fp32.shape == ipu_fp32.shape) - - if ExecutionMode.IPU_FP16 in output_dict.keys(): - ipu_fp16 = output_dict[ExecutionMode.IPU_FP16] - ipu_fp16 = np.asarray(ipu_fp16).astype(np.float32).flatten() - pass_check = np.allclose(ipu_fp16, - cpu_fp32, - rtol=self.rtol_fp16, - atol=self.atol_fp16) + if len(cpu_fp32) != len(ipu_fp32): + raise ValueError("different outputs number between ipu and cpu.") + for cpu_fp32_res, ipu_fp32_res in zip(cpu_fp32, ipu_fp32): + cpu_fp32_res = np.asarray(cpu_fp32_res).astype(np.float32).flatten() + ipu_fp32_res = np.asarray(ipu_fp32_res).astype(np.float32).flatten() + pass_check = np.allclose(ipu_fp32_res, + cpu_fp32_res, + rtol=self.rtol, + atol=self.atol) if not pass_check: - max_atol = np.abs(ipu_fp16 - cpu_fp32).max() - cpu_fp32_abs = np.abs(cpu_fp32) + max_atol = np.abs(ipu_fp32_res - cpu_fp32_res).max() + cpu_fp32_abs = np.abs(cpu_fp32_res) cpu_fp32_abs[cpu_fp32_abs == 0.0] = 1e-20 - max_rtol = (np.abs(ipu_fp16 - cpu_fp32) / cpu_fp32_abs).max() + max_rtol = (np.abs(ipu_fp32_res - cpu_fp32_res) / + cpu_fp32_abs).max() raise AssertionError( - f"ipu_fp16 check failed. max_atol is {max_atol}, max_rtol is {max_rtol}" + f"ipu_fp32 check failed. max_atol is {max_atol}, max_rtol is {max_rtol}" ) if check_shape: - self.assertTrue(ipu_fp16.shape == cpu_fp32.shape) + self.assertTrue(cpu_fp32_res.shape == ipu_fp32_res.shape) + + if ExecutionMode.IPU_FP16 in output_dict.keys(): + ipu_fp16 = output_dict[ExecutionMode.IPU_FP16] + if len(cpu_fp32) != len(ipu_fp16): + raise ValueError( + "different outputs number between ipu and cpu.") + for cpu_fp32_res, ipu_fp16_res in zip(cpu_fp32, ipu_fp16): + cpu_fp32_res = np.asarray(cpu_fp32_res).astype( + np.float32).flatten() + ipu_fp16_res = np.asarray(ipu_fp16_res).astype( + np.float32).flatten() + pass_check = np.allclose(ipu_fp16_res, + cpu_fp32_res, + rtol=self.rtol_fp16, + atol=self.atol_fp16) + if not pass_check: + max_atol = np.abs(ipu_fp16_res - cpu_fp32_res).max() + cpu_fp32_abs = np.abs(cpu_fp32_res) + cpu_fp32_abs[cpu_fp32_abs == 0.0] = 1e-20 + max_rtol = (np.abs(ipu_fp16_res - cpu_fp32_res) / + cpu_fp32_abs).max() + raise AssertionError( + f"ipu_fp16 check failed. max_atol is {max_atol}, max_rtol is {max_rtol}" + ) + + if check_shape: + self.assertTrue(ipu_fp16_res.shape == cpu_fp32_res.shape) # Execution Mode class ExecutionMode(IntEnum): diff --git a/python/paddle/fluid/tests/unittests/ipu/test_affine_channel_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_affine_channel_op_ipu.py new file mode 100644 index 0000000000000000000000000000000000000000..09a251585b381e12f06b503633dbc274269e1e97 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_affine_channel_op_ipu.py @@ -0,0 +1,98 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import paddle +import paddle.static +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestBase(IPUOpTest): + + def setUp(self): + self.set_atol() + self.set_training() + self.set_data_feed() + self.set_feed_attr() + self.set_op_attrs() + + @property + def fp16_enabled(self): + return False + + def set_data_feed(self): + data = np.random.uniform(size=[1, 3, 32, 32]) + self.feed_fp32 = {'data': data.astype(np.float32)} + self.feed_fp16 = {'data': data.astype(np.float16)} + + def set_feed_attr(self): + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) + + def set_op_attrs(self): + self.attrs = {} + self.attrs['data_layout'] = 'NCHW' + + @IPUOpTest.static_graph + def build_model(self): + data = paddle.static.data(name=self.feed_list[0], + shape=self.feed_shape[0], + dtype='float32') + input_scale = paddle.fluid.layers.create_parameter( + shape=[self.feed_shape[0][1]], dtype="float32") + input_bias = paddle.fluid.layers.create_parameter( + shape=[self.feed_shape[0][1]], dtype="float32") + out = paddle.fluid.layers.affine_channel(data, + scale=input_scale, + bias=input_bias) + self.fetch_list = [out.name] + + def run_model(self, exec_mode): + self.run_op_test(exec_mode) + + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() + + +class TestCase1(TestBase): + + def set_data_feed(self): + data = np.random.uniform(size=[2, 4, 64, 64]) + self.feed_fp32 = {'data': data.astype(np.float32)} + self.feed_fp16 = {'data': data.astype(np.float16)} + + +@unittest.skip("Only support NCHW") +class TestNHWC(TestBase): + + def set_op_attrs(self): + self.attrs = {} + self.attrs['data_layout'] = 'NHWC' + + def set_data_feed(self): + data = np.random.uniform(size=[2, 64, 64, 3]) + self.feed_fp32 = {'data': data.astype(np.float32)} + self.feed_fp16 = {'data': data.astype(np.float16)} + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_binary_cross_entropy_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_binary_cross_entropy_op_ipu.py new file mode 100644 index 0000000000000000000000000000000000000000..121755226ec3429bd42aaf53f4f24b615749663d --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_binary_cross_entropy_op_ipu.py @@ -0,0 +1,101 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import paddle +import paddle.static +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest +import paddle.nn.functional as F + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestBase(IPUOpTest): + + def setUp(self): + self.set_atol() + self.set_training() + self.set_data_feed() + self.set_feed_attr() + self.set_op_attrs() + + def set_data_feed(self): + x = np.random.uniform(size=[3, 4, 2, 2]) + target = np.random.uniform(size=[3, 4, 2, 2]) + self.feed_fp32 = { + "x": x.astype(np.float32), + "target": target.astype(np.float32) + } + self.feed_fp16 = { + "x": x.astype(np.float16), + "target": target.astype(np.float16) + } + + def set_feed_attr(self): + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) + + def set_op_attrs(self): + self.attrs = { + 'reduction': 'mean', + } + + @IPUOpTest.static_graph + def build_model(self, on_ipu): + x = paddle.static.data(name=self.feed_list[0], + shape=self.feed_shape[0], + dtype="float32") + target = paddle.static.data(name=self.feed_list[1], + shape=self.feed_shape[1], + dtype='float32') + out = F.binary_cross_entropy(x, target, **self.attrs) + self.fetch_list = [out.name] + + def run_model(self, exec_mode): + self.run_op_test(exec_mode) + + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model(self.is_ipu_mode(m)) + self.run_model(m) + self.check() + + +class TestCase1(TestBase): + + def set_op_attrs(self): + self.attrs = { + 'reduction': 'sum', + } + + +class TestCase2(TestBase): + + def set_op_attrs(self): + self.attrs = { + 'reduction': 'none', + } + + def set_atol(self): + self.atol = 1e-10 + self.rtol = 1e-6 + self.atol_fp16 = 5e-2 + self.rtol_fp16 = 2e-2 + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_clip_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_clip_op_ipu.py new file mode 100644 index 0000000000000000000000000000000000000000..c61685e4a5e30a581789ae18caa348ed085699f0 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_clip_op_ipu.py @@ -0,0 +1,220 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import paddle +import paddle.static +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestBase(IPUOpTest): + + def setUp(self): + self.set_atol() + self.set_training() + self.set_feed() + self.set_op_attrs() + + def set_atol(self): + self.atol = 1e-6 + self.rtol = 1e-6 + self.atol_fp16 = 1e-3 + self.rtol_fp16 = 1e-3 + + def set_feed(self): + data = np.random.uniform(size=[5, 5]) + self.feed_fp32 = {'x': data.astype(np.float32)} + self.feed_fp16 = {'x': data.astype(np.float16)} + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) + + def set_op_attrs(self): + self.attrs = {} + self.attrs['min'] = 0.1 + self.attrs['max'] = 3.4 + + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data(name=self.feed_list[0], + shape=self.feed_shape[0], + dtype='float32') + x = paddle.clip(x, **self.attrs) + self.fetch_list = [x.name] + + def run_model(self, exec_mode): + self.run_op_test(exec_mode) + + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() + + +class TestNoMin(TestBase): + + def set_op_attrs(self): + self.attrs = {} + self.attrs['max'] = 3.4 + + +class TestNoMax(TestBase): + + def set_op_attrs(self): + self.attrs = {} + self.attrs['min'] = 0.1 + + +class TestNoMinNoMax(TestBase): + + def set_op_attrs(self): + self.attrs = {} + + +class TestMinMaxTensor(TestBase): + + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data(name=self.feed_list[0], + shape=self.feed_shape[0], + dtype='float32') + + min = paddle.fluid.layers.fill_constant(name="min", + shape=[1], + dtype='float32', + value=0.1) + max = paddle.fluid.layers.fill_constant(name="max", + shape=[1], + dtype='float32', + value=3.4) + x = paddle.clip(x, min=min, max=max) + self.fetch_list = [x.name] + + +class TestMinTensor(TestBase): + + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data(name=self.feed_list[0], + shape=self.feed_shape[0], + dtype='float32') + + min = paddle.fluid.layers.fill_constant(name="min", + shape=[1], + dtype='float32', + value=0.1) + x = paddle.clip(x, min=min) + self.fetch_list = [x.name] + + +class TestMaxTensor(TestBase): + + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data(name=self.feed_list[0], + shape=self.feed_shape[0], + dtype='float32') + + max = paddle.fluid.layers.fill_constant(name="max", + shape=[1], + dtype='float32', + value=3.4) + x = paddle.clip(x, max=max) + self.fetch_list = [x.name] + + +class TestCombine1(TestBase): + + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data(name=self.feed_list[0], + shape=self.feed_shape[0], + dtype='float32') + + min = paddle.fluid.layers.fill_constant(name="min", + shape=[1], + dtype='float32', + value=0.1) + x = paddle.clip(x, min=min, max=3.4) + self.fetch_list = [x.name] + + +class TestCombine2(TestBase): + + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data(name=self.feed_list[0], + shape=self.feed_shape[0], + dtype='float32') + + max = paddle.fluid.layers.fill_constant(name="max", + shape=[1], + dtype='float32', + value=3.4) + x = paddle.clip(x, min=0.1, max=max) + self.fetch_list = [x.name] + + +class TestIntInput(TestBase): + + def set_feed(self): + data = np.random.uniform(size=[5, 5]) + self.feed_fp32 = {'x': data.astype(np.int32)} + self.feed_fp16 = {'x': data.astype(np.int32)} + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) + + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data(name=self.feed_list[0], + shape=self.feed_shape[0], + dtype='int32') + + x = paddle.clip(x, min=0.1, max=3.4) + self.fetch_list = [x.name] + + +class TestIntMinMax(TestBase): + + def set_feed(self): + data = np.random.uniform(size=[5, 5]) + self.feed_fp32 = {'x': data.astype(np.int32)} + self.feed_fp16 = {'x': data.astype(np.int32)} + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) + + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data(name=self.feed_list[0], + shape=self.feed_shape[0], + dtype='int32') + min = paddle.fluid.layers.fill_constant(name="min", + shape=[1], + dtype='int32', + value=1) + max = paddle.fluid.layers.fill_constant(name="max", + shape=[1], + dtype='int32', + value=3) + x = paddle.clip(x, min=min, max=max) + self.fetch_list = [x.name] + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_conv2d_transpose_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_conv2d_transpose_op_ipu.py new file mode 100644 index 0000000000000000000000000000000000000000..64fdcc26636cf61183fe41adc713e70d51614cf3 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_conv2d_transpose_op_ipu.py @@ -0,0 +1,162 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import paddle +import paddle.static +from op_test_ipu import IPUOpTest + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestBase(IPUOpTest): + + def setUp(self): + self.set_atol() + self.set_training() + self.set_feed() + self.set_op_attrs() + + def set_atol(self): + self.atol = 1e-6 + self.rtol = 1e-6 + self.atol_fp16 = 1e-3 + self.rtol_fp16 = 1e-3 + + def set_feed(self): + data = np.random.uniform(size=[1, 3, 8, 8]) + self.feed_fp32 = {'in_0': data.astype(np.float32)} + self.feed_fp16 = {'in_0': data.astype(np.float16)} + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) + + def set_op_attrs(self): + self.attrs = {} + self.attrs['num_filters'] = 3 + self.attrs['filter_size'] = 3 + self.attrs['padding'] = 0 + self.attrs['stride'] = 1 + self.attrs['dilation'] = 1 + self.attrs['bias_attr'] = False + + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data(name=self.feed_list[0], + shape=self.feed_shape[0], + dtype='float32') + x = paddle.static.nn.conv2d_transpose(x, **self.attrs) + self.fetch_list = [x.name] + + def run_model(self, exec_mode): + self.run_op_test(exec_mode) + + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() + + +class TestCase1(TestBase): + + def set_op_attrs(self): + super().set_op_attrs() + self.attrs['stride'] = 2 + + +@unittest.skip("Only support dilation=1") +class TestCase2(TestBase): + + def set_op_attrs(self): + super().set_op_attrs() + self.attrs['stride'] = 2 + self.attrs['dilation'] = 2 + + +class TestCase3(TestBase): + + def set_op_attrs(self): + super().set_op_attrs() + self.attrs['padding'] = 2 + + +class TestCase4(TestBase): + + def set_op_attrs(self): + super().set_op_attrs() + self.attrs['padding'] = "SAME" + + +class TestCase5(TestBase): + + def set_op_attrs(self): + super().set_op_attrs() + self.attrs['stride'] = 2 + self.attrs['padding'] = "SAME" + + +class TestCase6(TestBase): + + def set_op_attrs(self): + super().set_op_attrs() + self.attrs['padding'] = "VALID" + + +class TestCase7(TestBase): + + def set_op_attrs(self): + super().set_op_attrs() + self.attrs['padding'] = "VALID" + self.attrs['stride'] = 2 + + +class TestCase8(TestBase): + + def set_op_attrs(self): + super().set_op_attrs() + self.attrs['filter_size'] = 4 + self.attrs['stride'] = 2 + + +class TestCase9(TestBase): + + # When bias_attr is not False, a Add Op will be added after conv2d_transpose Op. + # When bias_attr = None, the bias value is 0. + def set_op_attrs(self): + super().set_op_attrs() + self.attrs['bias_attr'] = None + + +class TestCase10(TestBase): + + # When output_size is not None, the filter_size will be re-computed by output_size + def set_op_attrs(self): + super().set_op_attrs() + self.attrs['filter_size'] = None + self.attrs['output_size'] = [12, 12] + + +class TestCase11(TestBase): + + # Depthwise conv2d transpose + def set_op_attrs(self): + super().set_op_attrs() + self.attrs['groups'] = 3 + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_conv_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_conv_op_ipu.py index 5a2485e251c9604444c1e496f8df6ab8e280e9fe..8fe7ee53ca2a85b7b778e54a4f2c2b0474f20a88 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_conv_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_conv_op_ipu.py @@ -108,7 +108,7 @@ class TestCase4(TestBase): class TestCase5(TestBase): - + # Depthwise conv2d def set_op_attrs(self): super().set_op_attrs() self.attrs['groups'] = 3 diff --git a/python/paddle/fluid/tests/unittests/ipu/test_cross_entropy2_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_cross_entropy2_op_ipu.py index ffd4368c089b588de3430d455a7d45717aeba509..5c456e2f4c3315bf195583425165987fa23de439 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_cross_entropy2_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_cross_entropy2_op_ipu.py @@ -116,5 +116,35 @@ class TestCase3(TestBase): } +class TestCase4(TestBase): + + def set_data_feed(self): + x = np.random.uniform(size=[3, 5, 7]) + label = np.random.randint(0, 7, [3, 5, 1], dtype='int64') + self.feed_fp32 = { + "x": x.astype(np.float32), + "label": label.astype(np.int64) + } + self.feed_fp16 = { + "x": x.astype(np.float16), + "label": label.astype(np.int32) + } + + +class TestCase5(TestBase): + + def set_data_feed(self): + x = np.random.uniform(size=[3, 5, 6, 7]) + label = np.random.randint(0, 7, [3, 5, 6], dtype='int64') + self.feed_fp32 = { + "x": x.astype(np.float32), + "label": label.astype(np.int64) + } + self.feed_fp16 = { + "x": x.astype(np.float16), + "label": label.astype(np.int32) + } + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_cumsum_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_cumsum_op_ipu.py index 75cd3c92322abd2463f2fa8f36ad8eabec88850e..99cb47394ff5ec1e511739c97587fe642d9a3800 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_cumsum_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_cumsum_op_ipu.py @@ -86,5 +86,35 @@ class TestCase3(TestBase): self.attrs = {"exclusive": True, "reverse": True} +class TestCase4(TestBase): + + def set_data_feed(self): + x = np.random.uniform(size=[1, 128]) + self.feed_fp32 = {"x": x.astype(np.int32)} + + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data(name=self.feed_list[0], + shape=self.feed_shape[0], + dtype="int32") + out = paddle.fluid.layers.cumsum(x, **self.attrs) + self.fetch_list = [out.name] + + +class TestCase5(TestBase): + + def set_data_feed(self): + x = np.random.uniform(size=[1, 128]) + self.feed_fp32 = {"x": x.astype(np.int64)} + + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data(name=self.feed_list[0], + shape=self.feed_shape[0], + dtype="int64") + out = paddle.fluid.layers.cumsum(x, **self.attrs) + self.fetch_list = [out.name] + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_data_norm_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_data_norm_op_ipu.py new file mode 100644 index 0000000000000000000000000000000000000000..94225660f4d599ebe2c5f3532d9b5e543c5c3d07 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_data_norm_op_ipu.py @@ -0,0 +1,130 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import paddle +import paddle.static +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestBase(IPUOpTest): + + def setUp(self): + self.set_atol() + self.set_training() + self.set_feed() + self.set_op_attrs() + + def set_op_attrs(self): + self.attrs = {} + + def set_feed(self): + data = np.random.uniform(size=[32, 100]) + self.feed_fp32 = {'x': data.astype(np.float32)} + self.feed_fp16 = {'x': data.astype(np.float16)} + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) + + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data(name=self.feed_list[0], + shape=self.feed_shape[0], + dtype='float32') + x = paddle.static.nn.data_norm(input=x, **self.attrs) + self.fetch_list = [x.name] + + def run_model(self, exec_mode): + self.run_op_test(exec_mode) + + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() + + +class TestCase1(TestBase): + + def set_op_attrs(self): + self.attrs = {"in_place": True} + + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data(name=self.feed_list[0], + shape=self.feed_shape[0], + dtype='float32') + x = paddle.static.nn.data_norm(input=x, **self.attrs) + x = x + 1 + self.fetch_list = [x.name] + + +@unittest.skip("Do not support in_place=True when test single data_norm Op") +class TestCase2(TestBase): + + def set_op_attrs(self): + self.attrs = {"in_place": True} + + +class TestCase3(TestBase): + + def set_op_attrs(self): + self.attrs = {"data_layout": "NHWC"} + + +class TestCase4(TestBase): + + def set_op_attrs(self): + self.attrs = {"epsilon": 0.001} + + +class TestCase5(TestBase): + + def set_op_attrs(self): + self.attrs = {"do_model_average_for_mean_and_var": True} + + +class TestCase6(TestBase): + # If enable_scale_and_shift=True, it requires to set values of scale and bias in `param_attr` + def set_op_attrs(self): + self.attrs = { + "param_attr": { + "scale_w": 0.5, + "bias": 0.1 + }, + "enable_scale_and_shift": True + } + + +class TestCase7(TestBase): + + def set_op_attrs(self): + self.attrs = { + "param_attr": { + "batch_size": 1e3, + "batch_sum": 0.1, + "batch_square": 1e3, + "scale_w": 0.5, + "bias": 0.1 + }, + "enable_scale_and_shift": True + } + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_dist_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_dist_op_ipu.py new file mode 100644 index 0000000000000000000000000000000000000000..c84e8ce9bebadf61eca78cb878e68f1c91cbf9da --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_dist_op_ipu.py @@ -0,0 +1,95 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import paddle +import paddle.static +from op_test_ipu import IPUOpTest + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestBase(IPUOpTest): + + def setUp(self): + self.set_atol() + self.set_training() + self.set_data_feed() + self.set_feed_attr() + self.set_op_attrs() + + def set_data_feed(self): + data_x = np.random.uniform(size=[8, 1, 6, 1]) + data_y = np.random.uniform(size=[7, 1, 5]) + self.feed_fp32 = { + "x": data_x.astype(np.float32), + "y": data_y.astype(np.float32) + } + self.feed_fp16 = { + "x": data_x.astype(np.float16), + "y": data_y.astype(np.float16) + } + + def set_feed_attr(self): + self.feed_shape = [x.shape for x in self.feed_fp32.values()] + self.feed_list = list(self.feed_fp32.keys()) + self.feed_dtype = [x.dtype for x in self.feed_fp32.values()] + + def set_op_attrs(self): + self.attrs = {"p": 2} + + @IPUOpTest.static_graph + def build_model(self): + x = paddle.static.data(name=self.feed_list[0], + shape=self.feed_shape[0], + dtype='float32') + y = paddle.static.data(name=self.feed_list[1], + shape=self.feed_shape[1], + dtype='float32') + out = paddle.dist(x, y, **self.attrs) + self.fetch_list = [out.name] + + def run_model(self, exec_mode): + self.run_op_test(exec_mode) + + def test(self): + for m in IPUOpTest.ExecutionMode: + if not self.skip_mode(m): + self.build_model() + self.run_model(m) + self.check() + + +class TestCase1(TestBase): + + def set_op_attrs(self): + self.attrs = {"p": 0} + + +class TestCase2(TestBase): + + def set_op_attrs(self): + self.attrs = {"p": float("inf")} + + +class TestCase3(TestBase): + + def set_op_attrs(self): + self.attrs = {"p": float("-inf")} + + +if __name__ == "__main__": + unittest.main()