diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index d1db8feb217528b8f9422cd7e6bbcb0771245603..39676b916e50470ac9774f3564b4bdc3a8fcb20f 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -229,6 +229,7 @@ function cmake_base() { -DWITH_CNCL=${WITH_CNCL:-OFF} -DWITH_XPU=${WITH_XPU:-OFF} -DWITH_MLU=${WITH_MLU:-OFF} + -DWITH_IPU=${WITH_IPU:-OFF} -DLITE_GIT_TAG=release/v2.10 -DWITH_UNITY_BUILD=${WITH_UNITY_BUILD:-OFF} -DWITH_XPU_BKCL=${WITH_XPU_BKCL:-OFF} @@ -280,6 +281,7 @@ EOF -DLITE_GIT_TAG=release/v2.10 \ -DWITH_XPU=${WITH_XPU:-OFF} \ -DWITH_MLU=${WITH_MLU:-OFF} \ + -DWITH_IPU=${WITH_IPU:-OFF} \ -DWITH_CNCL=${WITH_CNCL:-OFF} \ -DXPU_SDK_ROOT=${XPU_SDK_ROOT:-""} \ -DWITH_LITE=${WITH_LITE:-OFF} \ @@ -1283,6 +1285,8 @@ function card_test() { CUDA_DEVICE_COUNT=$(rocm-smi -i | grep GPU | wc -l) elif [ "${WITH_MLU}" == "ON" ];then CUDA_DEVICE_COUNT=1 + elif [ "${WITH_IPU}" == "ON" ];then + CUDA_DEVICE_COUNT=1 else CUDA_DEVICE_COUNT=$(nvidia-smi -L | wc -l) fi @@ -2240,6 +2244,130 @@ set -ex fi } +function parallel_test_base_ipu() { + mkdir -p ${PADDLE_ROOT}/build + cd ${PADDLE_ROOT}/build/python/paddle/fluid/tests/unittests/ipu + if [ ${WITH_TESTING:-ON} == "ON" ] ; then + cat <> ${PADDLE_ROOT}/build/build_summary.txt + ut_actual_total_endTime_s=`date +%s` + echo "ipipe_log_param_actual_TestCases_Total_Time: $[ $ut_actual_total_endTime_s - $ut_actual_total_startTime_s ]s" >> ${PADDLE_ROOT}/build/build_summary.txt + if [[ "$EXIT_CODE" != "0" ]]; then + show_ut_retry_result + fi +set -ex + fi +} + function parallel_test() { mkdir -p ${PADDLE_ROOT}/build cd ${PADDLE_ROOT}/build @@ -2257,6 +2385,8 @@ function parallel_test() { parallel_test_base_npu elif [ "$WITH_MLU" == "ON" ];then parallel_test_base_mlu + elif [ "$WITH_IPU" == "ON" ];then + parallel_test_base_ipu else parallel_test_base_cpu ${PROC_RUN:-1} fi @@ -3022,6 +3152,11 @@ function main() { parallel_test check_coverage ;; + check_ipu_coverage) + cmake_gen_and_build ${PYTHON_ABI:-""} ${parallel_number} + parallel_test + check_coverage + ;; reuse_so_cicheck_py35) reuse_so_cache parallel_test diff --git a/python/paddle/fluid/tests/unittests/ipu/CMakeLists.txt b/python/paddle/fluid/tests/unittests/ipu/CMakeLists.txt index 959700ad743b40420200b56055354279386a9a7c..79a2430a161703348824d8e4e687bf85569c408a 100644 --- a/python/paddle/fluid/tests/unittests/ipu/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/ipu/CMakeLists.txt @@ -4,5 +4,11 @@ if(WITH_IPU) foreach(TEST_OP ${TEST_OPS}) py_test_modules(${TEST_OP} MODULES ${TEST_OP}) + # set all UTs timeout to 200s + set_tests_properties(${TEST_OP} PROPERTIES TIMEOUT 200) endforeach(TEST_OP) + + set_tests_properties(test_conv_op_ipu PROPERTIES TIMEOUT 300) + set_tests_properties(test_elemetwise_x_op_ipu PROPERTIES TIMEOUT 300) + set_tests_properties(test_reduce_x_op_ipu PROPERTIES TIMEOUT 600) endif() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_activation_x_op.py b/python/paddle/fluid/tests/unittests/ipu/test_activation_x_op.py deleted file mode 100644 index 58a88c113fc0b6b82c1c58d50a1b0824cb530632..0000000000000000000000000000000000000000 --- a/python/paddle/fluid/tests/unittests/ipu/test_activation_x_op.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -import paddle -import paddle.fluid as fluid -import paddle.fluid.compiler as compiler -import paddle.nn.functional as F -import paddle.optimizer -import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, - np_dtype_to_fluid_str) - -paddle.enable_static() - - -@unittest.skipIf(not paddle.is_compiled_with_ipu(), - "core is not compiled with IPU") -class TestRelu(IPUOpTest): - def setUp(self): - self.set_atol() - self.set_training() - self.init_op() - - def init_op(self): - self.op = paddle.fluid.layers.relu - - def set_feed_attr(self): - self.feed_shape = [x.shape for x in self.feed.values()] - self.feed_list = list(self.feed.keys()) - self.feed_dtype = [ - np_dtype_to_fluid_str(x.dtype) for x in self.feed.values() - ] - - def _test_base(self, run_ipu=True): - scope = fluid.core.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - SEED = self.SEED - main_prog.random_seed = SEED - startup_prog.random_seed = SEED - - with fluid.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype=self.feed_dtype[0]) - out = self.op(x, **self.attrs) - - fetch_list = [out.name] - - if run_ipu: - place = paddle.IPUPlace() - else: - place = paddle.CPUPlace() - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - if run_ipu: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.SetGraphConfig(is_training=self.is_training) - program = compiler.IpuCompiler( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - program = main_prog - - result = exe.run(program, feed=self.feed, fetch_list=fetch_list) - return result[0] - - def run_test_base(self): - res0 = self._test_base(False) - res1 = self._test_base(True) - - self.assertTrue( - np.allclose( - res0.flatten(), res1.flatten(), atol=self.atol)) - - self.assertTrue(res0.shape == res1.shape) - - def test_case0(self): - self.feed = { - "x": np.random.uniform(size=[1, 3, 10, 10]).astype('float32'), - } - self.attrs = {} - self.set_feed_attr() - self.run_test_base() - - -class TestTanh(TestRelu): - def init_op(self): - self.op = F.tanh - - -class TestLog(TestRelu): - def init_op(self): - self.op = paddle.fluid.layers.log - - -class TestSigmoid(TestRelu): - def init_op(self): - self.op = F.sigmoid - - -class TestSqrt(TestRelu): - def init_op(self): - self.op = paddle.fluid.layers.sqrt - - -if __name__ == "__main__": - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_batch_norm_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_batch_norm_op_ipu.py index 1dab958c1ecbc806df94c651cf4a2d6cd82f3ddb..c640cd441f1b2589bcc3ffa466b865d1fb34c582 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_batch_norm_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_batch_norm_op_ipu.py @@ -115,7 +115,7 @@ class TestBase(IPUOpTest): class TestCase1(TestBase): def set_atol(self): - self.atol = 1e-7 + self.atol = 1e-6 self.rtol = 1e-6 self.atol_fp16 = 1e-3 self.rtol_fp16 = 1e-3 @@ -129,7 +129,7 @@ class TestCase1(TestBase): class TestCase2(TestBase): def set_atol(self): - self.atol = 1e-7 + self.atol = 1e-6 self.rtol = 1e-6 self.atol_fp16 = 1e-3 self.rtol_fp16 = 1e-3 diff --git a/python/paddle/fluid/tests/unittests/ipu/test_ipu_fp16_support.py b/python/paddle/fluid/tests/unittests/ipu/test_ipu_fp16_support.py deleted file mode 100644 index aa6c05dc59a87f844c19912be484a4b007f0adfc..0000000000000000000000000000000000000000 --- a/python/paddle/fluid/tests/unittests/ipu/test_ipu_fp16_support.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -import paddle -import paddle.fluid as fluid -import paddle.fluid.compiler as compiler -import paddle.optimizer -import paddle.static -from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, - np_dtype_to_fluid_str) - -paddle.enable_static() - - -@unittest.skipIf(not paddle.is_compiled_with_ipu(), - "core is not compiled with IPU") -class TestBase(IPUOpTest): - def setUp(self): - self.set_atol() - self.set_feed() - self.set_feed_attr() - self.set_attrs() - - def set_feed(self): - np_data = np.random.uniform(low=-1, high=1, size=[1, 3, 100, 100]) - self.feed_ipu = {"x": np_data.astype('float16')} - self.feed_cpu = {"x": np_data.astype('float32')} - - def set_feed_attr(self): - self.feed_shape = [x.shape for x in self.feed_cpu.values()] - self.feed_list = list(self.feed_cpu.keys()) - self.feed_dtype = [ - np_dtype_to_fluid_str(x.dtype) for x in self.feed_cpu.values() - ] - - def set_attrs(self): - self.attrs = {} - - def _test_base(self, run_ipu=True): - scope = fluid.core.Scope() - main_prog = paddle.static.Program() - startup_prog = paddle.static.Program() - SEED = self.SEED - main_prog.random_seed = SEED - startup_prog.random_seed = SEED - - with fluid.scope_guard(scope): - with paddle.static.program_guard(main_prog, startup_prog): - x = paddle.static.data( - name=self.feed_list[0], - shape=self.feed_shape[0], - dtype=self.feed_dtype[0]) - conv1 = paddle.static.nn.conv2d( - x, num_filters=3, filter_size=3, bias_attr=False) - conv2 = paddle.static.nn.conv2d( - x, num_filters=3, filter_size=3, bias_attr=False) - add1 = conv1 + conv2 - conv3 = paddle.static.nn.conv2d( - add1, num_filters=8, filter_size=8, bias_attr=False) - out = paddle.fluid.layers.relu(conv3, **self.attrs) - fetch_list = [out.name] - if run_ipu: - place = paddle.IPUPlace() - else: - place = paddle.CPUPlace() - exe = paddle.static.Executor(place) - exe.run(startup_prog) - - feed = self.feed_ipu if run_ipu else self.feed_cpu - if run_ipu: - feed_list = self.feed_list - ipu_strategy = paddle.static.IpuStrategy() - ipu_strategy.SetGraphConfig(is_training=False) - ipu_strategy.SetHalfConfig(enable_fp16=True) - program = compiler.IPUCompiledProgram( - main_prog, - ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) - else: - feed_list = self.feed_list - program = main_prog - result = exe.run(program, feed=feed, fetch_list=fetch_list) - return result[0] - - def test_base(self): - res0 = self._test_base(False) - res1 = self._test_base(True) - - self.assertTrue(res0.shape == res1.shape) - mae = np.mean(np.abs(res0.flatten() - res1.flatten())) - print("mae is ", mae) - self.assertTrue(mae < 0.001) - - -if __name__ == "__main__": - unittest.main()