diff --git a/python/paddle/fluid/tests/unittests/ipu/test_gelu_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_gelu_op_ipu.py new file mode 100644 index 0000000000000000000000000000000000000000..6c9d3f29adf3c0674045e653572e892141b699c7 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_gelu_op_ipu.py @@ -0,0 +1,113 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.compiler as compiler +import paddle.optimizer +import paddle.static +from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, + np_dtype_to_fluid_str) + +paddle.enable_static() + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestBase(IPUOpTest): + def setUp(self): + self.set_atol() + self.set_training() + self.set_feed() + self.set_feed_attr() + self.set_attrs() + + def set_atol(self): + self.atol = 1e-3 + + def set_feed(self): + self.feed = { + "x": np.random.uniform(size=[1, 3, 10, 10]).astype('float32') + } + + def set_feed_attr(self): + self.feed_shape = [x.shape for x in self.feed.values()] + self.feed_list = list(self.feed.keys()) + self.feed_dtype = [ + np_dtype_to_fluid_str(x.dtype) for x in self.feed.values() + ] + + def set_attrs(self): + self.attrs = {"approximate": False} + + def _test_base(self, run_ipu=True): + scope = fluid.core.Scope() + main_prog = paddle.static.Program() + startup_prog = paddle.static.Program() + SEED = self.SEED + main_prog.random_seed = SEED + startup_prog.random_seed = SEED + + with fluid.scope_guard(scope): + with paddle.static.program_guard(main_prog, startup_prog): + x = paddle.static.data( + name=self.feed_list[0], + shape=self.feed_shape[0], + dtype=self.feed_dtype[0]) + out = paddle.fluid.layers.gelu(x, **self.attrs) + + fetch_list = [out.name] + + if run_ipu: + place = paddle.IPUPlace() + else: + place = paddle.CPUPlace() + exe = paddle.static.Executor(place) + exe.run(startup_prog) + + if run_ipu: + feed_list = self.feed_list + ipu_strategy = compiler.get_ipu_strategy() + ipu_strategy.is_training = self.is_training + program = compiler.IPUCompiledProgram( + main_prog, + ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) + else: + program = main_prog + + result = exe.run(program, feed=self.feed, fetch_list=fetch_list) + return result[0] + + def test_base(self): + res0 = self._test_base(False) + res1 = self._test_base(True) + + self.assertTrue( + np.allclose( + res0.flatten(), res1.flatten(), atol=self.atol)) + + self.assertTrue(res0.shape == res1.shape) + + +@unittest.skip('approximate=True is not supported') +class TestCase1(TestBase): + def set_attrs(self): + self.attrs = {"approximate": True} + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_groupnorm_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_groupnorm_op_ipu.py new file mode 100644 index 0000000000000000000000000000000000000000..1afc0cb9ed330d95de68e29743980e9920922beb --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_groupnorm_op_ipu.py @@ -0,0 +1,169 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.compiler as compiler +import paddle.optimizer +import paddle.static +from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, + np_dtype_to_fluid_str) + +paddle.enable_static() + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestBase(IPUOpTest): + def setUp(self): + self.set_atol() + self.set_training() + self.set_feed() + self.set_feed_attr() + self.set_attrs() + + def set_feed(self): + self.feed = { + "x": np.random.uniform(size=[1, 8, 10, 10]).astype('float32'), + } + + def set_feed_attr(self): + self.feed_shape = [x.shape for x in self.feed.values()] + self.feed_list = list(self.feed.keys()) + self.feed_dtype = [ + np_dtype_to_fluid_str(x.dtype) for x in self.feed.values() + ] + + def set_attrs(self): + self.attrs = { + "groups": 8, + "epsilon": 1e-05, + "data_layout": 'NCHW', + } + + def _test_base(self, run_ipu=True): + scope = fluid.core.Scope() + main_prog = paddle.static.Program() + startup_prog = paddle.static.Program() + SEED = self.SEED + main_prog.random_seed = SEED + startup_prog.random_seed = SEED + + with fluid.scope_guard(scope): + with paddle.static.program_guard(main_prog, startup_prog): + x = paddle.static.data( + name=self.feed_list[0], + shape=self.feed_shape[0], + dtype=self.feed_dtype[0]) + + if self.is_training: + ch = self.feed_shape[0][1] + conv1 = paddle.static.nn.conv2d( + x, num_filters=ch, filter_size=3, bias_attr=False) + scale = paddle.ParamAttr(trainable=True) + bias = paddle.ParamAttr(trainable=True) + out = paddle.fluid.layers.nn.group_norm( + conv1, param_attr=scale, bias_attr=bias, **self.attrs) + else: + scale = True + bias = True + out = paddle.fluid.layers.nn.group_norm( + x, param_attr=scale, bias_attr=bias, **self.attrs) + + if self.is_training: + loss = paddle.mean(out) + adam = paddle.optimizer.Adam(learning_rate=1e-2) + adam.minimize(loss) + fetch_list = [loss.name] + else: + fetch_list = [out.name] + + if run_ipu: + place = paddle.IPUPlace() + else: + place = paddle.CPUPlace() + exe = paddle.static.Executor(place) + exe.run(startup_prog) + + if run_ipu: + feed_list = self.feed_list + ipu_strategy = compiler.get_ipu_strategy() + ipu_strategy.is_training = self.is_training + program = compiler.IPUCompiledProgram( + main_prog, + ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) + else: + program = main_prog + + if self.is_training: + result = [] + for _ in range(self.epoch): + loss_res = exe.run(program, + feed=self.feed, + fetch_list=fetch_list) + result.append(loss_res[0]) + return np.array(result) + else: + result = exe.run(program, feed=self.feed, fetch_list=fetch_list) + return result[0] + + def test_base(self): + res0 = self._test_base(False) + res1 = self._test_base(True) + + self.assertTrue( + np.allclose( + res0.flatten(), res1.flatten(), atol=self.atol)) + + self.assertTrue(res0.shape == res1.shape) + + +class TestCase1(TestBase): + def set_attrs(self): + self.attrs = { + "groups": 4, + "epsilon": 1e-05, + "data_layout": 'NCHW', + } + + +class TestTrainCase1(TestBase): + def set_training(self): + self.is_training = True + self.epoch = 10 + + +class TestTrainCase2(TestBase): + def set_atol(self): + self.atol = 1e-3 + + def set_attrs(self): + self.attrs = { + "groups": 4, + "epsilon": 1e-05, + "data_layout": 'NCHW', + } + + def set_training(self): + self.is_training = True + self.epoch = 10 + + +# not support `group_norm(x, param_attr=False, bias_attr=False, **self.attrs)` + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_instancenorm_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_instancenorm_op_ipu.py new file mode 100644 index 0000000000000000000000000000000000000000..b50ed7bdbab52fd7d49b1bd202406930da109ced --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_instancenorm_op_ipu.py @@ -0,0 +1,140 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.compiler as compiler +import paddle.optimizer +import paddle.static +from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, + np_dtype_to_fluid_str) + +paddle.enable_static() + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestBase(IPUOpTest): + def setUp(self): + self.set_atol() + self.set_training() + self.set_feed() + self.set_feed_attr() + self.set_attrs() + + def set_feed(self): + self.feed = { + "x": np.random.uniform(size=[1, 3, 10, 10]).astype('float32'), + } + + def set_feed_attr(self): + self.feed_shape = [x.shape for x in self.feed.values()] + self.feed_list = list(self.feed.keys()) + self.feed_dtype = [ + np_dtype_to_fluid_str(x.dtype) for x in self.feed.values() + ] + + def set_attrs(self): + self.attrs = {"epsilon": 1e-05} + + def _test_base(self, run_ipu=True): + scope = fluid.core.Scope() + main_prog = paddle.static.Program() + startup_prog = paddle.static.Program() + SEED = self.SEED + main_prog.random_seed = SEED + startup_prog.random_seed = SEED + + with fluid.scope_guard(scope): + with paddle.static.program_guard(main_prog, startup_prog): + x = paddle.static.data( + name=self.feed_list[0], + shape=self.feed_shape[0], + dtype=self.feed_dtype[0]) + + if self.is_training: + ch = self.feed_shape[0][1] + conv1 = paddle.static.nn.conv2d( + x, num_filters=ch, filter_size=3, bias_attr=False) + scale = paddle.ParamAttr(trainable=True) + bias = paddle.ParamAttr(trainable=True) + out = paddle.fluid.layers.nn.instance_norm( + conv1, param_attr=scale, bias_attr=bias, **self.attrs) + else: + scale = True + bias = True + out = paddle.fluid.layers.nn.instance_norm( + x, param_attr=scale, bias_attr=bias, **self.attrs) + + if self.is_training: + loss = paddle.mean(out) + adam = paddle.optimizer.Adam(learning_rate=1e-2) + adam.minimize(loss) + fetch_list = [loss.name] + else: + fetch_list = [out.name] + + if run_ipu: + place = paddle.IPUPlace() + else: + place = paddle.CPUPlace() + exe = paddle.static.Executor(place) + exe.run(startup_prog) + + if run_ipu: + feed_list = self.feed_list + ipu_strategy = compiler.get_ipu_strategy() + ipu_strategy.is_training = self.is_training + program = compiler.IPUCompiledProgram( + main_prog, + ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) + else: + program = main_prog + + if self.is_training: + result = [] + for _ in range(self.epoch): + loss_res = exe.run(program, + feed=self.feed, + fetch_list=fetch_list) + result.append(loss_res) + return np.array(result) + else: + result = exe.run(program, feed=self.feed, fetch_list=fetch_list) + return result[0] + + def test_base(self): + res0 = self._test_base(False) + res1 = self._test_base(True) + + self.assertTrue( + np.allclose( + res0.flatten(), res1.flatten(), atol=self.atol)) + + self.assertTrue(res0.shape == res1.shape) + + +class TestTrainCase1(TestBase): + def set_training(self): + self.is_training = True + self.epoch = 10 + + +# not support `instance_norm(x, param_attr=False, bias_attr=False, **self.attrs)` + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_ipu_batchs_per_step_simple.py b/python/paddle/fluid/tests/unittests/ipu/test_ipu_batchs_per_step_simple.py new file mode 100644 index 0000000000000000000000000000000000000000..6b549b306f0d379d46ed3597c116fbcbfabccbb4 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_ipu_batchs_per_step_simple.py @@ -0,0 +1,90 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import numpy as np +import unittest +import paddle +import paddle.fluid as fluid +import paddle.fluid.compiler as compiler + +paddle.enable_static() +SEED = 2021 + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestFunc(unittest.TestCase): + def _test_func(self, run_ipu=True): + scope = fluid.core.Scope() + main_prog = paddle.static.Program() + startup_prog = paddle.static.Program() + main_prog.random_seed = SEED + startup_prog.random_seed = SEED + np.random.seed(SEED) + + bps = 5 + n = 1 if run_ipu else -1 + c, h, w = 3, 10, 10 + np_image = np.random.uniform(size=[1 * bps, c, h, w]).astype(np.float32) + + with fluid.scope_guard(scope): + with paddle.static.program_guard(main_prog, startup_prog): + image = paddle.static.data( + name='image', shape=[n, c, h, w], dtype='float32') + conv2d = paddle.static.nn.conv2d( + image, num_filters=3, filter_size=3, bias_attr=False) + + # paddle.mean oshape on ipu is [bps], need another mean() + # paddle.mean oshape on cpu is [1] + # out = paddle.mean(conv2d) + out = conv2d + + if run_ipu: + place = paddle.IPUPlace() + else: + place = paddle.CPUPlace() + exe = paddle.static.Executor(place) + exe.run(startup_prog) + + if run_ipu: + feed_list = [image.name] + fetch_list = [out.name] + ipu_strategy = compiler.get_ipu_strategy() + ipu_strategy.is_training = False + ipu_strategy.batches_per_step = bps + program = compiler.IPUCompiledProgram( + main_prog, ipu_strategy=ipu_strategy).compile(feed_list, + fetch_list) + else: + program = main_prog + + result = exe.run(program, + feed={image.name: np_image}, + fetch_list=[out]) + return result[0] + + def test_func(self): + ipu_res = self._test_func(True) + cpu_res = self._test_func(False) + + if np.prod(ipu_res.shape) == np.prod(cpu_res.shape): + ipu_res = ipu_res.reshape(cpu_res.shape) + + self.assertTrue(np.allclose(ipu_res, cpu_res, atol=1e-4)) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_ipu_fp16_support.py b/python/paddle/fluid/tests/unittests/ipu/test_ipu_fp16_support.py new file mode 100644 index 0000000000000000000000000000000000000000..d135e5a586e7dffd667adc88179a8df830deb245 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_ipu_fp16_support.py @@ -0,0 +1,109 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.compiler as compiler +import paddle.optimizer +import paddle.static +from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, + np_dtype_to_fluid_str) + +paddle.enable_static() + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestBase(IPUOpTest): + def setUp(self): + self.set_atol() + self.set_feed() + self.set_feed_attr() + self.set_attrs() + + def set_feed(self): + np_data = np.random.uniform(low=-1, high=1, size=[1, 3, 100, 100]) + self.feed_ipu = {"x": np_data.astype('float16')} + self.feed_cpu = {"x": np_data.astype('float32')} + + def set_feed_attr(self): + self.feed_shape = [x.shape for x in self.feed_cpu.values()] + self.feed_list = list(self.feed_cpu.keys()) + self.feed_dtype = [ + np_dtype_to_fluid_str(x.dtype) for x in self.feed_cpu.values() + ] + + def set_attrs(self): + self.attrs = {} + + def _test_base(self, run_ipu=True): + scope = fluid.core.Scope() + main_prog = paddle.static.Program() + startup_prog = paddle.static.Program() + SEED = self.SEED + main_prog.random_seed = SEED + startup_prog.random_seed = SEED + + with fluid.scope_guard(scope): + with paddle.static.program_guard(main_prog, startup_prog): + x = paddle.static.data( + name=self.feed_list[0], + shape=self.feed_shape[0], + dtype=self.feed_dtype[0]) + conv1 = paddle.static.nn.conv2d( + x, num_filters=3, filter_size=3, bias_attr=False) + conv2 = paddle.static.nn.conv2d( + x, num_filters=3, filter_size=3, bias_attr=False) + add1 = conv1 + conv2 + conv3 = paddle.static.nn.conv2d( + add1, num_filters=8, filter_size=8, bias_attr=False) + out = paddle.fluid.layers.relu(conv3, **self.attrs) + fetch_list = [out.name] + if run_ipu: + place = paddle.IPUPlace() + else: + place = paddle.CPUPlace() + exe = paddle.static.Executor(place) + exe.run(startup_prog) + + feed = self.feed_ipu if run_ipu else self.feed_cpu + if run_ipu: + feed_list = self.feed_list + ipu_strategy = compiler.get_ipu_strategy() + ipu_strategy.is_training = False + ipu_strategy.enable_fp16 = True + program = compiler.IPUCompiledProgram( + main_prog, + ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) + else: + feed_list = self.feed_list + program = main_prog + result = exe.run(program, feed=feed, fetch_list=fetch_list) + return result[0] + + def test_base(self): + res0 = self._test_base(False) + res1 = self._test_base(True) + + self.assertTrue(res0.shape == res1.shape) + mae = np.mean(np.abs(res0.flatten() - res1.flatten())) + print("mae is ", mae) + self.assertTrue(mae < 0.001) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_ipu_inference_model_io.py b/python/paddle/fluid/tests/unittests/ipu/test_ipu_inference_model_io.py new file mode 100644 index 0000000000000000000000000000000000000000..f8ab3f81e9d3d63c2fe460dd359e3a2a54e02b7d --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_ipu_inference_model_io.py @@ -0,0 +1,169 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import shutil + +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.compiler as compiler +import paddle.optimizer +import paddle.static +from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest + +paddle.enable_static() + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestBase(IPUOpTest): + def setUp(self): + self.set_atol() + self.set_feed() + self.set_attrs() + + def set_feed(self): + self.feed_shape = [] + self.feed_shape.append([1, 3, 10, 10]) + + self.feed = {} + self.feed["in_0"] = np.random.uniform( + size=self.feed_shape[0]).astype(np.float32) + + self.feed_list = list(self.feed.keys()) + + def set_attrs(self): + self.attrs = {} + self.attrs['steps'] = 100 + self.attrs['save_at_step'] = 20 + self.attrs['is_training'] = True + self.attrs['opt_type'] = 'sgd' + self.attrs['path'] = 'model' + self.attrs['model_name'] = 'test' + + def _test_save(self): + scope = fluid.core.Scope() + main_prog = paddle.static.Program() + startup_prog = paddle.static.Program() + main_prog.random_seed = self.SEED + startup_prog.random_seed = self.SEED + generator = fluid.unique_name.UniqueNameGenerator() + self.full_name = '/'.join( + [self.attrs['path'], self.attrs['model_name']]) + + with fluid.unique_name.guard(generator): + with fluid.scope_guard(scope): + with paddle.static.program_guard(main_prog, startup_prog): + x = paddle.static.data( + name=self.feed_list[0], + shape=self.feed_shape[0], + dtype='float32') + conv1 = paddle.static.nn.conv2d( + x, + num_filters=3, + filter_size=3, + bias_attr=False, + name='conv2d') + loss = paddle.mean(conv1) + + if self.attrs['is_training']: + if self.attrs['opt_type'] == 'sgd': + sgd = paddle.optimizer.SGD(learning_rate=1e-2) + sgd.minimize(loss) + elif self.attrs['opt_type'] == 'adam': + adam = paddle.optimizer.Adam(learning_rate=1e-2) + adam.minimize(loss) + elif self.attrs['opt_type'] == 'lamb': + lamb = paddle.optimizer.Lamb(learning_rate=1e-2) + lamb.minimize(loss) + fetch_list = [loss.name] + + place = paddle.IPUPlace() + exe = paddle.static.Executor(place) + exe.run(startup_prog) + + ipu_strategy = compiler.get_ipu_strategy() + ipu_strategy.is_training = self.attrs['is_training'] + program = compiler.IPUCompiledProgram( + main_prog, ipu_strategy=ipu_strategy).compile( + self.feed_list, fetch_list) + + result = [] + for i in range(self.attrs['steps']): + tmp = exe.run(program, + feed=self.feed, + fetch_list=fetch_list) + result.append(tmp) + + paddle.static.save_inference_model( + self.full_name, x, loss, exe, program=program.org_program) + + def _test_load(self, run_ipu): + if run_ipu: + place = paddle.IPUPlace() + else: + place = paddle.CPUPlace() + exe = paddle.static.Executor(place) + + [inference_program, feed_target_names, fetch_targets] = ( + paddle.static.load_inference_model(self.full_name, exe)) + + if run_ipu: + feed_list = feed_target_names + fetch_list = [fetch_targets[0].name] + ipu_strategy = compiler.get_ipu_strategy() + ipu_strategy.is_training = False + program = compiler.IPUCompiledProgram( + inference_program, + ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) + else: + program = inference_program + + tmp = exe.run(program, feed=self.feed, fetch_list=[fetch_targets]) + + return tmp + + def test_base(self): + self._test_save() + cpu_res = self._test_load(False) + ipu_res = self._test_load(True) + + self.assertTrue(np.allclose(cpu_res, ipu_res, atol=self.atol)) + + shutil.rmtree(self.attrs['path'], True) + + +class TestAdam(TestBase): + def set_attrs(self): + self.attrs = {} + self.attrs['steps'] = 100 + self.attrs['is_training'] = True + self.attrs['opt_type'] = 'adam' + self.attrs['path'] = 'model' + self.attrs['model_name'] = 'test' + + +class TestLamb(TestBase): + def set_attrs(self): + self.attrs = {} + self.attrs['steps'] = 100 + self.attrs['is_training'] = True + self.attrs['opt_type'] = 'lamb' + self.attrs['path'] = 'model' + self.attrs['model_name'] = 'test' + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_ipu_model_pipeline.py b/python/paddle/fluid/tests/unittests/ipu/test_ipu_model_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..00fc0dd6633aed5b3dbc08a5170f476ba6d160ef --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_ipu_model_pipeline.py @@ -0,0 +1,86 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import numpy as np +import unittest +import paddle +import paddle.fluid as fluid +import paddle.fluid.compiler as compiler + +paddle.enable_static() +SEED = 2021 + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestCastNet(unittest.TestCase): + def _test(self, run_ipu=True): + scope = fluid.core.Scope() + main_prog = paddle.static.Program() + startup_prog = paddle.static.Program() + main_prog.random_seed = SEED + startup_prog.random_seed = SEED + np.random.seed(SEED) + + np_image = np.random.rand(1, 3, 10, 10).astype(np.float32) + + with fluid.scope_guard(scope): + with paddle.static.program_guard(main_prog, startup_prog): + image = paddle.static.data( + name='image', shape=[1, 3, 10, 10], dtype='float32') + with fluid.ipu_shard(ipu_index=0): + conv1 = paddle.static.nn.conv2d( + image, num_filters=3, filter_size=3, bias_attr=False) + with fluid.ipu_shard(ipu_index=1): + conv2 = paddle.static.nn.conv2d( + conv1, num_filters=3, filter_size=3, bias_attr=False) + loss = paddle.mean(conv2) + + if run_ipu: + place = paddle.IPUPlace() + else: + place = paddle.CPUPlace() + executor = paddle.static.Executor(place) + executor.run(startup_prog) + + if run_ipu: + feed_list = [image.name] + fetch_list = [loss.name] + ipu_strategy = compiler.get_ipu_strategy() + ipu_strategy.num_ipus = 2 + ipu_strategy.is_training = False + ipu_strategy.enable_manual_shard = True + ipu_strategy.enable_pipelining = False + program = compiler.IPUCompiledProgram( + main_prog, + ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) + else: + program = main_prog + + loss_res = executor.run(program, + feed={"image": np_image}, + fetch_list=[loss]) + return loss_res + + def test_cast(self): + cpu_outputs = self._test(False) + ipu_outputs = self._test(True) + + self.assertTrue(np.allclose(cpu_outputs, ipu_outputs, atol=1e-4)) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_ipu_pipeline.py b/python/paddle/fluid/tests/unittests/ipu/test_ipu_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..beab68553d723c5c2277861a74a8c7afaa4d2c38 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ipu/test_ipu_pipeline.py @@ -0,0 +1,71 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import numpy as np +import unittest +import sys +import paddle +import paddle.fluid as fluid + +paddle.enable_static() + + +@unittest.skipIf(not paddle.is_compiled_with_ipu(), + "core is not compiled with IPU") +class TestIpuShard(unittest.TestCase): + def _test(self): + # build graph + a = paddle.static.data(name='data', shape=[None, 1], dtype='int32') + b = a + 2 # scale : scale * x + bias, ipu_stage : no + + with paddle.fluid.ipu_shard(ipu_stage=1): + c = b + 1 # scale, ipu_stage : 1 + with paddle.fluid.ipu_shard(ipu_stage=2): + d = c * 2 # scale, ipu_stage : 2 + with paddle.fluid.ipu_shard(ipu_stage=3): + e = d + 3 # scale, ipu_stage : 3 + with paddle.fluid.ipu_shard(ipu_stage=1): + e = e + 3 # scale, ipu_stage : 1 + with paddle.fluid.ipu_shard(ipu_stage=2): + e = e + 3 # scale, ipu_stage : 2 + + with paddle.fluid.ipu_shard(ipu_stage=1): + f = paddle.tensor.pow(e, 2.0) # pow, ipu_stage : 1 + + with paddle.fluid.ipu_shard(ipu_stage=2): + g = f - 1 # scale, ipu_stage : 2 + + h = g + 1 # scale, ipu_stage : no + + ipu_index_list = [] + main_prog = paddle.static.default_main_program() + for op in main_prog.global_block().ops: + if op.desc.has_attr("ipu_stage"): + ipu_index_list.append(op.desc.attr("ipu_stage")) + + return ipu_index_list + + def test_ipu_shard(self): + ipu_index_list = self._test() + expected_ipu_index_list = [1, 2, 3, 1, 2, 1, 2] + + self.assertTrue( + np.allclose( + ipu_index_list, expected_ipu_index_list, atol=0)) + + +if __name__ == "__main__": + unittest.main()