未验证 提交 1db188f3 编写于 作者: A Allen Guo 提交者: GitHub

[IPU] update ipu unittests p0 (#39707)

* update ipu UTs part0

* rename UT

* sync api changes

* update uts for new api

* use_ipumodel() as classmethod
上级 0c3f7fbc
...@@ -12,17 +12,14 @@ ...@@ -12,17 +12,14 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import os
import random import random
import unittest import unittest
import numpy as np import numpy as np
from paddle.fluid.tests.unittests.op_test import _set_use_system_allocator from enum import Enum
from typing import Optional
import paddle.fluid.compiler as compiler
SEED = 2021
ipu_compiler_ref: Optional[compiler.IPUCompiledProgram] = None import paddle
import paddle.static
map_np_dtype_to_fluid_dtype = { map_np_dtype_to_fluid_dtype = {
'bool': "bool", 'bool': "bool",
...@@ -36,6 +33,19 @@ map_np_dtype_to_fluid_dtype = { ...@@ -36,6 +33,19 @@ map_np_dtype_to_fluid_dtype = {
} }
class ExecutionMode(Enum):
CPU_FP32 = 1
IPU_FP32 = 2
# enable_fp16 through ipu_strategy.enable_fp16
IPU_POPART_FP16 = 3
def __lt__(self, other):
return self.value < other.value
def __gt__(self, other):
return self.value > other.value
def np_dtype_to_fluid_str(dtype: np.dtype) -> str: def np_dtype_to_fluid_str(dtype: np.dtype) -> str:
return map_np_dtype_to_fluid_dtype[dtype.name] return map_np_dtype_to_fluid_dtype[dtype.name]
...@@ -43,14 +53,16 @@ def np_dtype_to_fluid_str(dtype: np.dtype) -> str: ...@@ -43,14 +53,16 @@ def np_dtype_to_fluid_str(dtype: np.dtype) -> str:
class IPUOpTest(unittest.TestCase): class IPUOpTest(unittest.TestCase):
@classmethod @classmethod
def setUpClass(cls): def setUpClass(cls):
# Get random seeds
cls._np_rand_state = np.random.get_state() cls._np_rand_state = np.random.get_state()
cls._py_rand_state = random.getstate() cls._py_rand_state = random.getstate()
cls.SEED = SEED cls.SEED = 2021
np.random.seed(cls.SEED) np.random.seed(cls.SEED)
random.seed(cls.SEED) random.seed(cls.SEED)
cls._use_system_allocator = _set_use_system_allocator(True) # Enable paddle static graph mode
paddle.enable_static()
@classmethod @classmethod
def tearDownClass(cls): def tearDownClass(cls):
...@@ -58,14 +70,47 @@ class IPUOpTest(unittest.TestCase): ...@@ -58,14 +70,47 @@ class IPUOpTest(unittest.TestCase):
np.random.set_state(cls._np_rand_state) np.random.set_state(cls._np_rand_state)
random.setstate(cls._py_rand_state) random.setstate(cls._py_rand_state)
_set_use_system_allocator(cls._use_system_allocator) @classmethod
# unittest will to trigger IPUCompiledProgram.__del__ automatically def use_ipumodel(cls):
global ipu_compiler_ref if 'POPLAR_IPUMODEL' not in os.environ:
ipu_compiler_ref is not None and ipu_compiler_ref.clean() return False
else:
flag = os.environ['POPLAR_IPUMODEL']
if flag.upper() in ['1', "TRUE"]:
return True
def set_atol(self): def set_atol(self):
self.atol = 1e-5 self.atol = 1e-10
self.rtol = 1e-6
self.atol_fp16 = 1e-3
self.rtol_fp16 = 1e-3
def set_training(self): def set_training(self):
self.is_training = False self.is_training = False
self.epoch = 1 self.epoch = 1
def check(self, outputs, check_shape=False):
cpu_fp32 = outputs[ExecutionMode.CPU_FP32]
ipu_fp32 = outputs[ExecutionMode.IPU_FP32]
max_diff = np.abs(cpu_fp32 - ipu_fp32).max()
fp32_flag = np.allclose(
cpu_fp32, ipu_fp32, rtol=self.rtol, atol=self.atol)
self.assertTrue(fp32_flag, "max diff is %f" % (max_diff))
if check_shape:
self.assertTrue(cpu_fp32.shape == ipu_fp32.shape)
ipu_popart_fp16 = None
if ExecutionMode.IPU_POPART_FP16 in outputs.keys():
ipu_popart_fp16 = outputs[ExecutionMode.IPU_POPART_FP16]
max_diff = np.abs(ipu_popart_fp16.astype(np.float32) -
cpu_fp32).max()
fp16_flag = np.allclose(
ipu_popart_fp16.astype(np.float32),
cpu_fp32,
rtol=self.rtol_fp16,
atol=self.atol_fp16)
self.assertTrue(fp16_flag, "max diff is %f" % (max_diff))
if check_shape:
self.assertTrue(ipu_popart_fp16.shape == cpu_fp32.shape)
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.nn.functional as F
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (ExecutionMode,
IPUOpTest)
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestRelu(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_test_op()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
@property
def fp16_enabled(self):
return True
def set_test_op(self):
self.op = paddle.fluid.layers.relu
self.op_attrs = {}
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {'in_0': data.astype(np.float32)}
self.feed_fp16 = {'in_0': data.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
out = self.op(x, **self.op_attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
class TestTanh(TestRelu):
def set_test_op(self):
self.op = F.tanh
self.op_attrs = {}
class TestLog(TestRelu):
def set_test_op(self):
self.op = paddle.fluid.layers.log
self.op_attrs = {}
class TestSigmoid(TestRelu):
def set_test_op(self):
self.op = F.sigmoid
self.op_attrs = {}
class TestSqrt(TestRelu):
def set_test_op(self):
self.op = paddle.fluid.layers.sqrt
self.op_attrs = {}
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (ExecutionMode,
IPUOpTest)
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
data = np.random.uniform(size=[10, 1000])
self.feed_fp32 = {"in_0": data.astype(np.float32)}
self.feed_fp16 = {"in_0": data.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def set_op_attrs(self):
self.attrs = {"axis": -1}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
out = paddle.fluid.layers.argmax(x, **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0].astype(np.int32)
def test_base(self):
output_dict_fp32 = {}
output_dict_fp16 = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
if mode > ExecutionMode.IPU_FP32:
output_dict_fp16[mode] = self._test_base(mode).flatten()
else:
output_dict_fp32[mode] = self._test_base(mode).flatten()
self.check(output_dict_fp32)
class TestCase1(TestBase):
def set_op_attrs(self):
self.attrs = {"axis": 0}
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
data = np.random.uniform(size=[2, 3, 1])
self.feed_fp32 = {'in_0': data.astype(np.float32)}
self.feed_fp16 = {'in_0': data.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
assign = paddle.assign(x)
out = paddle.fluid.layers.elementwise_add(assign, assign)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
if __name__ == "__main__":
unittest.main()
...@@ -16,13 +16,8 @@ import unittest ...@@ -16,13 +16,8 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -31,78 +26,89 @@ class TestBase(IPUOpTest): ...@@ -31,78 +26,89 @@ class TestBase(IPUOpTest):
def setUp(self): def setUp(self):
self.set_atol() self.set_atol()
self.set_training() self.set_training()
self.set_feed() self.set_data_feed()
self.set_attrs() self.set_feed_attr()
def set_feed(self): @property
self.feed_shape = [] def fp16_enabled(self):
self.feed_shape.append([1, 3, 128, 128]) return True
self.feed = {} def set_atol(self):
self.feed["in_0"] = np.random.uniform( self.atol = 2e-6
size=self.feed_shape[0]).astype(np.float32) self.rtol = 1e-5
self.atol_fp16 = 1e-2
self.feed_list = list(self.feed.keys()) self.rtol_fp16 = 1e-3
def set_attrs(self): def set_data_feed(self):
self.attrs = {} data = np.random.uniform(size=[1, 3, 128, 128])
self.feed_fp32 = {'in_0': data.astype(np.float32)}
def _test_base(self, run_ipu=True): self.feed_fp16 = {'in_0': data.astype(np.float16)}
scope = fluid.core.Scope()
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program() main_prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
SEED = self.SEED main_prog.random_seed = self.SEED
main_prog.random_seed = SEED startup_prog.random_seed = self.SEED
startup_prog.random_seed = SEED
with fluid.scope_guard(scope): with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0],
shape=self.feed_shape[0], shape=self.feed_shape[0],
dtype='float32') dtype='float32')
conv1 = paddle.static.nn.conv2d(
x = paddle.static.nn.conv2d(
x, num_filters=3, filter_size=3, bias_attr=False)
x = paddle.static.nn.conv2d(
x, num_filters=3, filter_size=3, bias_attr=False)
x = paddle.static.nn.conv2d(
x, num_filters=3, filter_size=3, bias_attr=False)
x = paddle.static.nn.conv2d(
x, num_filters=3, filter_size=3, bias_attr=False) x, num_filters=3, filter_size=3, bias_attr=False)
conv2 = paddle.static.nn.conv2d(
conv1, num_filters=3, filter_size=3, bias_attr=False)
conv3 = paddle.static.nn.conv2d(
conv2, num_filters=3, filter_size=3, bias_attr=False)
conv4 = paddle.static.nn.conv2d(
conv3, num_filters=3, filter_size=3, bias_attr=False)
fetch_list = [conv4.name] fetch_list = [x.name]
if run_ipu: if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace() place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(startup_prog)
if run_ipu: if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig( ipu_strategy.set_graph_config(is_training=self.is_training)
num_ipus=2, ipu_strategy.set_options({'need_avg_shard': True})
is_training=self.is_training, if exec_mode == ExecutionMode.IPU_POPART_FP16:
enable_manual_shard=True, ipu_strategy.set_precision_config(enable_fp16=True)
need_avg_shard=True) program = paddle.static.IpuCompiledProgram(
program = compiler.IPUCompiledProgram(
main_prog, main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else: else:
program = main_prog program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list) feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0] return result[0]
def test_base(self): def test(self):
res0 = self._test_base(True) output_dict = {}
res1 = self._test_base(False) for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.assertTrue( self.check(output_dict)
np.allclose(
res0.flatten(), res1.flatten(), atol=self.atol))
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -16,13 +16,9 @@ import unittest ...@@ -16,13 +16,9 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest from paddle.fluid.tests.unittests.ipu.op_test_ipu import (ExecutionMode,
IPUOpTest)
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -31,76 +27,100 @@ class TestBase(IPUOpTest): ...@@ -31,76 +27,100 @@ class TestBase(IPUOpTest):
def setUp(self): def setUp(self):
self.set_atol() self.set_atol()
self.set_training() self.set_training()
self.set_feed() self.set_data_feed()
self.set_attrs() self.set_feed_attr()
self.set_op_attrs()
def set_feed(self):
self.feed_shape = [] @property
self.feed_shape.append([1, 3, 10, 10]) def fp16_enabled(self):
return True
self.feed = {}
self.feed["in_0"] = np.random.uniform( def set_atol(self):
size=self.feed_shape[0]).astype(np.float32) self.atol = 1e-6
self.rtol = 1e-5
self.feed_list = list(self.feed.keys()) self.atol_fp16 = 1e-2
self.rtol_fp16 = 1e-3
def set_attrs(self):
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {'in_0': data.astype(np.float32)}
self.feed_fp16 = {'in_0': data.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def set_op_attrs(self):
self.attrs = {} self.attrs = {}
self.attrs['is_test'] = False self.attrs['is_test'] = False
self.attrs['data_layout'] = 'NCHW' self.attrs['data_layout'] = 'NCHW'
self.attrs['in_place'] = False self.attrs['in_place'] = False
def _test_base(self, run_ipu=True): def _test_base(self, exec_mode):
scope = fluid.core.Scope() scope = paddle.static.Scope()
main_prog = paddle.static.Program() main_prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
SEED = self.SEED main_prog.random_seed = self.SEED
main_prog.random_seed = SEED startup_prog.random_seed = self.SEED
startup_prog.random_seed = SEED
with fluid.scope_guard(scope): with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0],
shape=self.feed_shape[0], shape=self.feed_shape[0],
dtype='float32') dtype='float32')
conv1 = paddle.static.nn.conv2d( conv1 = paddle.static.nn.conv2d(
x, num_filters=3, filter_size=3, bias_attr=False) x, num_filters=3, filter_size=3, bias_attr=False)
out = paddle.fluid.layers.batch_norm(conv1, **self.attrs) out = paddle.fluid.layers.batch_norm(conv1, **self.attrs)
fetch_list = [out.name] fetch_list = [out.name]
if run_ipu: if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace() place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(startup_prog)
if run_ipu: if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training) ipu_strategy.set_graph_config(is_training=self.is_training)
program = compiler.IPUCompiledProgram( if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog, main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else: else:
program = main_prog program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list) feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0] return result[0]
def test_base(self): def test(self):
res0 = self._test_base(True) output_dict = {}
res1 = self._test_base(False) for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.assertTrue( self.check(output_dict)
np.allclose(
res0.flatten(), res1.flatten(), atol=self.atol))
class TestCase1(TestBase): class TestCase1(TestBase):
def set_attrs(self): def set_atol(self):
self.atol = 1e-7
self.rtol = 1e-6
self.atol_fp16 = 1e-3
self.rtol_fp16 = 1e-3
def set_op_attrs(self):
self.attrs = {} self.attrs = {}
self.attrs['is_test'] = True self.attrs['is_test'] = True
self.attrs['data_layout'] = 'NCHW' self.attrs['data_layout'] = 'NCHW'
...@@ -108,7 +128,13 @@ class TestCase1(TestBase): ...@@ -108,7 +128,13 @@ class TestCase1(TestBase):
class TestCase2(TestBase): class TestCase2(TestBase):
def set_attrs(self): def set_atol(self):
self.atol = 1e-7
self.rtol = 1e-6
self.atol_fp16 = 1e-3
self.rtol_fp16 = 1e-3
def set_op_attrs(self):
self.attrs = {} self.attrs = {}
self.attrs['is_test'] = True self.attrs['is_test'] = True
self.attrs['data_layout'] = 'NCHW' self.attrs['data_layout'] = 'NCHW'
......
...@@ -17,8 +17,7 @@ from __future__ import print_function ...@@ -17,8 +17,7 @@ from __future__ import print_function
import numpy as np import numpy as np
import unittest import unittest
import paddle import paddle
import paddle.fluid as fluid import paddle.static
import paddle.fluid.compiler as compiler
paddle.enable_static() paddle.enable_static()
SEED = 2021 SEED = 2021
...@@ -28,7 +27,7 @@ SEED = 2021 ...@@ -28,7 +27,7 @@ SEED = 2021
"core is not compiled with IPU") "core is not compiled with IPU")
class TestFunc(unittest.TestCase): class TestFunc(unittest.TestCase):
def _test_func(self, run_ipu=True): def _test_func(self, run_ipu=True):
scope = fluid.core.Scope() scope = paddle.static.Scope()
main_prog = paddle.static.Program() main_prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
main_prog.random_seed = SEED main_prog.random_seed = SEED
...@@ -40,22 +39,20 @@ class TestFunc(unittest.TestCase): ...@@ -40,22 +39,20 @@ class TestFunc(unittest.TestCase):
c, h, w = 3, 10, 10 c, h, w = 3, 10, 10
np_image = np.random.uniform(size=[1 * bps, c, h, w]).astype(np.float32) np_image = np.random.uniform(size=[1 * bps, c, h, w]).astype(np.float32)
with fluid.scope_guard(scope): with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
image = paddle.static.data( image = paddle.static.data(
name='image', shape=[n, c, h, w], dtype='float32') name='image', shape=[n, c, h, w], dtype='float32')
conv2d = paddle.static.nn.conv2d( conv2d = paddle.static.nn.conv2d(
image, num_filters=3, filter_size=3, bias_attr=False) image, num_filters=3, filter_size=3, bias_attr=False)
# paddle.mean oshape on ipu is [bps], need another mean()
# paddle.mean oshape on cpu is [1]
# out = paddle.mean(conv2d)
out = conv2d out = conv2d
if run_ipu: if run_ipu:
place = paddle.IPUPlace() place = paddle.IPUPlace()
else: else:
place = paddle.CPUPlace() place = paddle.CPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(startup_prog)
...@@ -63,14 +60,9 @@ class TestFunc(unittest.TestCase): ...@@ -63,14 +60,9 @@ class TestFunc(unittest.TestCase):
feed_list = [image.name] feed_list = [image.name]
fetch_list = [out.name] fetch_list = [out.name]
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig( ipu_strategy.set_graph_config(is_training=False)
num_ipus=2, ipu_strategy.set_pipelining_config(batches_per_step=bps)
is_training=False, program = paddle.static.IpuCompiledProgram(
enable_manual_shard=True,
need_avg_shard=True)
ipu_strategy.SetPipeliningConfig(
enable_pipelinin=True, batches_per_step=bps)
program = compiler.IPUCompiledProgram(
main_prog, ipu_strategy=ipu_strategy).compile(feed_list, main_prog, ipu_strategy=ipu_strategy).compile(feed_list,
fetch_list) fetch_list)
else: else:
......
...@@ -16,14 +16,8 @@ import unittest ...@@ -16,14 +16,8 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
np_dtype_to_fluid_str)
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -32,14 +26,14 @@ class TestBase(IPUOpTest): ...@@ -32,14 +26,14 @@ class TestBase(IPUOpTest):
def setUp(self): def setUp(self):
self.set_atol() self.set_atol()
self.set_training() self.set_training()
self.set_feed() self.set_data_feed()
self.set_feed_attr() self.set_feed_attr()
self.set_attrs() self.set_op_attrs()
def set_atol(self): def set_atol(self):
self.atol = 1e-3 self.atol = 1e-3
def set_feed(self): def set_data_feed(self):
self.feed = { self.feed = {
"x": np.random.uniform(size=[1, 3, 3, 3]).astype('float32'), "x": np.random.uniform(size=[1, 3, 3, 3]).astype('float32'),
} }
...@@ -47,23 +41,20 @@ class TestBase(IPUOpTest): ...@@ -47,23 +41,20 @@ class TestBase(IPUOpTest):
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()] self.feed_shape = [x.shape for x in self.feed.values()]
self.feed_list = list(self.feed.keys()) self.feed_list = list(self.feed.keys())
self.feed_dtype = [ self.feed_dtype = [x.dtype for x in self.feed.values()]
np_dtype_to_fluid_str(x.dtype) for x in self.feed.values()
]
def set_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
self.attrs['dtype'] = 'float16' self.attrs['dtype'] = 'float16'
def _test_base(self, run_ipu=True): def _test_base(self, run_ipu=True):
scope = fluid.core.Scope() scope = paddle.static.Scope()
main_prog = paddle.static.Program() main_prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
SEED = self.SEED main_prog.random_seed = self.SEED
main_prog.random_seed = SEED startup_prog.random_seed = self.SEED
startup_prog.random_seed = SEED
with fluid.scope_guard(scope): with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0],
...@@ -82,8 +73,8 @@ class TestBase(IPUOpTest): ...@@ -82,8 +73,8 @@ class TestBase(IPUOpTest):
if run_ipu: if run_ipu:
feed_list = self.feed_list feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training) ipu_strategy.set_graph_config(is_training=self.is_training)
program = compiler.IPUCompiledProgram( program = paddle.static.IpuCompiledProgram(
main_prog, main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else: else:
...@@ -103,27 +94,91 @@ class TestBase(IPUOpTest): ...@@ -103,27 +94,91 @@ class TestBase(IPUOpTest):
self.assertTrue(res0.shape == res1.shape) self.assertTrue(res0.shape == res1.shape)
class TestCase1(TestBase): class TestCase2(TestBase):
def set_attrs(self): def set_atol(self):
self.atol = 1e-10
def set_data_feed(self):
self.feed = {
"x": np.random.uniform(size=[1, 3, 3, 3]).astype('float16'),
}
def set_op_attrs(self):
self.attrs = {}
self.attrs['dtype'] = 'float32'
class TestCase3(TestBase):
def set_atol(self):
self.atol = 1e-10
def set_data_feed(self):
self.feed = {
"x": np.random.uniform(size=[1, 3, 3, 3]).astype('float32'),
}
def set_op_attrs(self):
self.attrs = {}
self.attrs['dtype'] = 'int32'
class TestCase4(TestBase):
def set_atol(self):
self.atol = 1e-10
def set_data_feed(self):
self.feed = {
"x": np.random.uniform(size=[1, 3, 3, 3]).astype('int32'),
}
def set_op_attrs(self):
self.attrs = {}
self.attrs['dtype'] = 'float32'
class TestCase5(TestBase):
def set_atol(self):
self.atol = 1e-10
def set_data_feed(self):
self.feed = {
"x": np.random.uniform(size=[1, 3, 3, 3]).astype('float16'),
}
def set_op_attrs(self):
self.attrs = {}
self.attrs['dtype'] = 'int32'
class TestCase6(TestBase):
def set_atol(self):
self.atol = 1e-10
def set_data_feed(self):
self.feed = {
"x": np.random.uniform(size=[1, 3, 3, 3]).astype('int32'),
}
def set_op_attrs(self):
self.attrs = {} self.attrs = {}
self.attrs['dtype'] = 'float16' self.attrs['dtype'] = 'float16'
@unittest.skip('float64 is not supported') @unittest.skip('float64 is not supported')
class TestCase2(TestBase): class TestCase2(TestBase):
def set_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
self.attrs['dtype'] = 'float64' self.attrs['dtype'] = 'float64'
@unittest.skip('skip float16 to float32') @unittest.skip('skip float16 to float32')
class TestCase3(TestBase): class TestCase3(TestBase):
def set_feed(self): def set_data_feed(self):
self.feed = { self.feed = {
"x": np.random.uniform(size=[1, 3, 3, 3]).astype('float16'), "x": np.random.uniform(size=[1, 3, 3, 3]).astype('float16'),
} }
def set_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
self.attrs['dtype'] = 'float32' self.attrs['dtype'] = 'float32'
...@@ -133,13 +188,13 @@ class TestCase4(TestBase): ...@@ -133,13 +188,13 @@ class TestCase4(TestBase):
def set_atol(self): def set_atol(self):
self.atol = 1 self.atol = 1
def set_feed(self): def set_data_feed(self):
self.feed = { self.feed = {
"x": np.random.randint( "x": np.random.randint(
low=1, high=100, size=[1, 3, 3, 3]).astype('int32'), low=1, high=100, size=[1, 3, 3, 3]).astype('int32'),
} }
def set_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
self.attrs['dtype'] = 'int8' self.attrs['dtype'] = 'int8'
......
...@@ -16,14 +16,9 @@ import unittest ...@@ -16,14 +16,9 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, from paddle.fluid.tests.unittests.ipu.op_test_ipu import (ExecutionMode,
np_dtype_to_fluid_str) IPUOpTest)
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -32,81 +27,95 @@ class TestBase(IPUOpTest): ...@@ -32,81 +27,95 @@ class TestBase(IPUOpTest):
def setUp(self): def setUp(self):
self.set_atol() self.set_atol()
self.set_training() self.set_training()
self.set_feed() self.set_data_feed()
self.set_feed_attr() self.set_feed_attr()
self.set_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
data1 = np.random.uniform(size=[1, 3, 10, 10])
data2 = np.random.uniform(size=[1, 3, 10, 10])
def set_feed(self): self.feed_fp32 = {
self.feed = { 'x': data1.astype(np.float32),
"x": np.random.uniform(size=[1, 3, 10, 10]).astype('float32'), 'y': data2.astype(np.float32)
"y": np.random.uniform(size=[1, 3, 10, 10]).astype('float32'), }
self.feed_fp16 = {
'x': data1.astype(np.float16),
'y': data2.astype(np.float16)
} }
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed.keys()) self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [
np_dtype_to_fluid_str(x.dtype) for x in self.feed.values()
]
def set_attrs(self): def set_op_attrs(self):
self.attrs = {"axis": 0} self.attrs = {"axis": 0}
def _test_base(self, run_ipu=True): def _test_base(self, exec_mode):
scope = fluid.core.Scope() scope = paddle.static.Scope()
main_prog = paddle.static.Program() main_prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
SEED = self.SEED main_prog.random_seed = self.SEED
main_prog.random_seed = SEED startup_prog.random_seed = self.SEED
startup_prog.random_seed = SEED
with fluid.scope_guard(scope): with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0],
shape=self.feed_shape[0], shape=self.feed_shape[0],
dtype=self.feed_dtype[0]) dtype='float32')
y = paddle.static.data( y = paddle.static.data(
name=self.feed_list[1], name=self.feed_list[1],
shape=self.feed_shape[1], shape=self.feed_shape[1],
dtype=self.feed_dtype[1]) dtype='float32')
out = paddle.fluid.layers.concat([x, y], **self.attrs) out = paddle.fluid.layers.concat([x, y], **self.attrs)
fetch_list = [out.name] fetch_list = [out.name]
if run_ipu: if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace() place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(startup_prog)
if run_ipu: if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training) ipu_strategy.set_graph_config(is_training=self.is_training)
program = compiler.IPUCompiledProgram( if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog, main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else: else:
program = main_prog program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list) feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0] return result[0]
def test_base(self): def test_base(self):
res0 = self._test_base(True) output_dict = {}
res1 = self._test_base(False) for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
self.assertTrue( break
np.allclose( output_dict[mode] = self._test_base(mode).flatten()
res0.flatten(), res1.flatten(), atol=self.atol))
self.assertTrue(res0.shape == res1.shape) self.check(output_dict)
class TestCase1(TestBase): class TestCase1(TestBase):
def set_attrs(self): def set_op_attrs(self):
self.attrs = {"axis": 1} self.attrs = {"axis": 1}
......
...@@ -16,13 +16,8 @@ import unittest ...@@ -16,13 +16,8 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -31,20 +26,30 @@ class TestBase(IPUOpTest): ...@@ -31,20 +26,30 @@ class TestBase(IPUOpTest):
def setUp(self): def setUp(self):
self.set_atol() self.set_atol()
self.set_training() self.set_training()
self.set_feed() self.set_data_feed()
self.set_attrs() self.set_feed_attr()
self.set_op_attrs()
def set_feed(self):
self.feed_shape = [] @property
self.feed_shape.append([1, 3, 10, 10]) def fp16_enabled(self):
return True
self.feed = {}
self.feed["in_0"] = np.random.uniform( def set_atol(self):
size=self.feed_shape[0]).astype(np.float32) self.atol = 1e-6
self.rtol = 1e-6
self.feed_list = list(self.feed.keys()) self.atol_fp16 = 1e-3
self.rtol_fp16 = 1e-3
def set_attrs(self):
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {'in_0': data.astype(np.float32)}
self.feed_fp16 = {'in_0': data.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def set_op_attrs(self):
self.attrs = {} self.attrs = {}
self.attrs['num_filters'] = 3 self.attrs['num_filters'] = 3
self.attrs['filter_size'] = 3 self.attrs['filter_size'] = 3
...@@ -54,104 +59,112 @@ class TestBase(IPUOpTest): ...@@ -54,104 +59,112 @@ class TestBase(IPUOpTest):
self.attrs['groups'] = 1 self.attrs['groups'] = 1
self.attrs['data_format'] = 'NCHW' self.attrs['data_format'] = 'NCHW'
def _test_base(self, run_ipu=True): def _test_base(self, exec_mode):
scope = fluid.core.Scope() scope = paddle.static.Scope()
main_prog = paddle.static.Program() main_prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
SEED = self.SEED main_prog.random_seed = self.SEED
main_prog.random_seed = SEED startup_prog.random_seed = self.SEED
startup_prog.random_seed = SEED
with fluid.scope_guard(scope): with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
image = paddle.static.data( image = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0],
shape=self.feed_shape[0], shape=self.feed_shape[0],
dtype='float32') dtype='float32')
out = paddle.fluid.layers.conv2d(image, **self.attrs) out = paddle.fluid.layers.conv2d(image, **self.attrs)
fetch_list = [out.name] fetch_list = [out.name]
if run_ipu: if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace() place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(startup_prog)
if run_ipu: if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training) ipu_strategy.set_graph_config(is_training=self.is_training)
program = compiler.IPUCompiledProgram( if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog, main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else: else:
program = main_prog program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list) feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0] return result[0]
def test_base(self): def test(self):
res0 = self._test_base(True) output_dict = {}
res1 = self._test_base(False) for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.assertTrue( self.check(output_dict)
np.allclose(
res0.flatten(), res1.flatten(), atol=self.atol))
class TestCase1(TestBase): class TestCase1(TestBase):
def set_attrs(self): def set_op_attrs(self):
super().set_attrs() super().set_op_attrs()
self.attrs['num_filters'] = 1 self.attrs['num_filters'] = 1
class TestCase2(TestBase): class TestCase2(TestBase):
def set_attrs(self): def set_op_attrs(self):
super().set_attrs() super().set_op_attrs()
self.attrs['filter_size'] = [3, 3] self.attrs['filter_size'] = [3, 3]
class TestCase2_1(TestBase): class TestCase2_1(TestBase):
def set_attrs(self): def set_op_attrs(self):
super().set_attrs() super().set_op_attrs()
self.attrs['filter_size'] = [3, 2] self.attrs['filter_size'] = [3, 2]
class TestCase3(TestBase): class TestCase3(TestBase):
def set_attrs(self): def set_op_attrs(self):
super().set_attrs() super().set_op_attrs()
self.attrs['stride'] = [2, 3] self.attrs['stride'] = [2, 3]
class TestCase4(TestBase): class TestCase4(TestBase):
def set_attrs(self): def set_op_attrs(self):
super().set_attrs() super().set_op_attrs()
self.attrs['dilation'] = [2, 2] self.attrs['dilation'] = [2, 2]
class TestCase5(TestBase): class TestCase5(TestBase):
def set_attrs(self): def set_op_attrs(self):
super().set_attrs() super().set_op_attrs()
self.attrs['groups'] = 3 self.attrs['groups'] = 3
class TestCase6(TestBase): class TestCase6(TestBase):
def set_attrs(self): def set_op_attrs(self):
super().set_attrs() super().set_op_attrs()
self.attrs['padding'] = 2 self.attrs['padding'] = 2
class TestCase7(TestBase): class TestCase7(TestBase):
def set_attrs(self): def set_op_attrs(self):
super().set_attrs() super().set_op_attrs()
self.attrs['padding'] = [2, 3] self.attrs['padding'] = [2, 3]
class TestCase8(TestBase): class TestCase8(TestBase):
def set_attrs(self): def set_op_attrs(self):
super().set_attrs() super().set_op_attrs()
self.attrs['padding'] = [1, 2, 2, 3] self.attrs['padding'] = [1, 2, 2, 3]
......
...@@ -16,14 +16,8 @@ import unittest ...@@ -16,14 +16,8 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
np_dtype_to_fluid_str)
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -32,44 +26,54 @@ class TestBase(IPUOpTest): ...@@ -32,44 +26,54 @@ class TestBase(IPUOpTest):
def setUp(self): def setUp(self):
self.set_atol() self.set_atol()
self.set_training() self.set_training()
self.set_feed() self.set_data_feed()
self.set_feed_attr() self.set_feed_attr()
self.set_attrs() self.set_op_attrs()
def set_feed(self): @property
self.feed = { def fp16_enabled(self):
"x": np.random.uniform(size=[3, 7]).astype('float32'), return True
"label": np.arange(3).reshape([3]).astype(np.int64),
def set_data_feed(self):
x = np.random.uniform(size=[3, 7])
label = np.arange(3).reshape([3, 1])
self.feed_fp32 = {
"x": x.astype(np.float32),
"label": label.astype(np.int64)
}
self.feed_fp16 = {
"x": x.astype(np.float16),
"label": label.astype(np.int32)
} }
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed.keys()) self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [
np_dtype_to_fluid_str(x.dtype) for x in self.feed.values()
]
def set_attrs(self): def set_op_attrs(self):
self.attrs = {'soft_label': False, } self.attrs = {'soft_label': False, }
def _test_base(self, run_ipu=True): def np_nll_loss(self):
scope = fluid.core.Scope() tmp = -np.log(self.feed_fp32['x'])
label = self.feed_fp32['label']
indice = [range(label.shape[0]), label.flatten()]
self.np_ref = tmp[indice]
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program() main_prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
SEED = self.SEED main_prog.random_seed = self.SEED
main_prog.random_seed = SEED startup_prog.random_seed = self.SEED
startup_prog.random_seed = SEED
with fluid.scope_guard(scope): with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0],
shape=self.feed_shape[0], shape=self.feed_shape[0],
dtype=self.feed_dtype[0]) dtype="float32")
# [warning] Copying (host) tensor input/1 from INT64 to INT32. if exec_mode != ExecutionMode.CPU_FP32:
# Will only warn once
if run_ipu:
label = paddle.static.data( label = paddle.static.data(
name=self.feed_list[1], name=self.feed_list[1],
shape=self.feed_shape[1], shape=self.feed_shape[1],
...@@ -80,52 +84,78 @@ class TestBase(IPUOpTest): ...@@ -80,52 +84,78 @@ class TestBase(IPUOpTest):
shape=self.feed_shape[1], shape=self.feed_shape[1],
dtype='int64') dtype='int64')
out = fluid.layers.cross_entropy( out = paddle.fluid.layers.cross_entropy(
input=x, label=label, **self.attrs) input=x, label=label, **self.attrs)
fetch_list = [out.name] fetch_list = [out.name]
if run_ipu: if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace() place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(startup_prog)
if run_ipu: if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training) ipu_strategy.set_graph_config(is_training=self.is_training)
program = compiler.IPUCompiledProgram( if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog, main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else: else:
program = main_prog program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list) feed = self.feed_fp32
return result[0] if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
def test_base(self): if exec_mode != ExecutionMode.CPU_FP32:
res0 = self._test_base(True) feed['label'] = feed['label'].astype(np.int32)
res1 = self._test_base(False)
self.assertTrue( result = exe.run(program, feed=feed, fetch_list=fetch_list)
np.allclose( return result[0]
res0.flatten(), res1.flatten(), atol=self.atol))
self.assertTrue(res0.shape == res1.shape) def test(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.np_nll_loss()
self.check(output_dict)
class TestCase1(TestBase): class TestCase1(TestBase):
def set_attrs(self): def set_op_attrs(self):
self.attrs = { self.attrs = {
'soft_label': False, 'soft_label': False,
'ignore_index': 1, 'ignore_index': 1,
} }
@unittest.skip("soft_label=True id not supported")
class TestCase2(TestBase): class TestCase2(TestBase):
def set_attrs(self): def set_data_feed(self):
x = np.random.uniform(size=[30, 70])
label = np.arange(30).reshape([30, 1])
self.feed_fp32 = {
"x": x.astype(np.float32),
"label": label.astype(np.int64)
}
self.feed_fp16 = {
"x": x.astype(np.float16),
"label": label.astype(np.int32)
}
@unittest.skip("soft_label=True is not supported")
class TestCase3(TestBase):
def set_op_attrs(self):
self.attrs = {'soft_label': True, } self.attrs = {'soft_label': True, }
......
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
# popart unsupport fp16 cumsum
@property
def fp16_enabled(self):
return False
def set_data_feed(self):
x = np.random.uniform(size=[1, 128])
self.feed_fp32 = {"x": x.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def set_op_attrs(self):
self.attrs = {}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype="float32")
out = paddle.fluid.layers.cumsum(x, **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
class TestCase1(TestBase):
def set_op_attrs(self):
self.attrs = {"exclusive": True, "reverse": False}
class TestCase2(TestBase):
def set_op_attrs(self):
self.attrs = {"exclusive": False, "reverse": True}
class TestCase3(TestBase):
def set_op_attrs(self):
self.attrs = {"exclusive": True, "reverse": True}
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册