未验证 提交 63d4d05a 编写于 作者: A Allen Guo 提交者: GitHub

[IPU] update UTs 0 (#42516)

* update UTs 0

* fix ci

* fix ci 3
上级 1b5647d7
...@@ -15,9 +15,10 @@ ...@@ -15,9 +15,10 @@
import os import os
import random import random
import unittest import unittest
import numpy as np
from enum import IntEnum from enum import IntEnum
from typing import Dict, List, Optional
import numpy as np
import paddle import paddle
import paddle.static import paddle.static
...@@ -33,31 +34,27 @@ map_np_dtype_to_fluid_dtype = { ...@@ -33,31 +34,27 @@ map_np_dtype_to_fluid_dtype = {
} }
def np_dtype_to_fluid_str(dtype: np.dtype) -> str:
return map_np_dtype_to_fluid_dtype[dtype.name]
class ExecutionModeFull(IntEnum): class ExecutionModeFull(IntEnum):
# Run fp32 model on cpu # Run fp32 model on cpu
CPU_FP32 = 1 CPU_FP32 = 1
# Run fp32 model on ipu # Run fp32 model on ipu
IPU_FP32 = 2 IPU_FP32 = 2
# Convert model to fp16 using popart transform # Convert model to fp16 using mixed-precision approch
# All parameters will be converted to fp16 # All parameters will be converted to fp16
# TODO rename to IPU_FP16 IPU_FP16 = 3
IPU_POPART_FP16 = 3
# Mix-precision mode, using `paddle.static.amp.fp16_guard()` to control the
# precision of each operator
IPU_MIXED_PRECISION = 4
class ExecutionMode(IntEnum): class ExecutionMode(IntEnum):
CPU_FP32 = ExecutionModeFull.CPU_FP32 CPU_FP32 = ExecutionModeFull.CPU_FP32
IPU_FP32 = ExecutionModeFull.IPU_FP32 IPU_FP32 = ExecutionModeFull.IPU_FP32
IPU_POPART_FP16 = ExecutionModeFull.IPU_POPART_FP16 IPU_FP16 = ExecutionModeFull.IPU_FP16
def np_dtype_to_fluid_str(dtype: np.dtype) -> str:
return map_np_dtype_to_fluid_dtype[dtype.name]
class IPUTest(unittest.TestCase):
class IPUOpTest(unittest.TestCase):
@classmethod @classmethod
def setUpClass(cls): def setUpClass(cls):
# Get random seeds # Get random seeds
...@@ -67,12 +64,7 @@ class IPUOpTest(unittest.TestCase): ...@@ -67,12 +64,7 @@ class IPUOpTest(unittest.TestCase):
cls.SEED = 2021 cls.SEED = 2021
np.random.seed(cls.SEED) np.random.seed(cls.SEED)
random.seed(cls.SEED) random.seed(cls.SEED)
paddle.seed(cls.SEED)
# For ipu, most ops support fp16
cls.amp_list = paddle.static.amp.CustomOpLists(
custom_black_list=[], custom_white_list=[])
cls.amp_list.unsupported_list = {}
cls.amp_list.black_list = {}
# Enable paddle static graph mode # Enable paddle static graph mode
paddle.enable_static() paddle.enable_static()
...@@ -83,6 +75,7 @@ class IPUOpTest(unittest.TestCase): ...@@ -83,6 +75,7 @@ class IPUOpTest(unittest.TestCase):
np.random.set_state(cls._np_rand_state) np.random.set_state(cls._np_rand_state)
random.setstate(cls._py_rand_state) random.setstate(cls._py_rand_state)
# Check if ipumodel mode is enabled
@classmethod @classmethod
def use_ipumodel(cls): def use_ipumodel(cls):
if 'POPLAR_IPUMODEL' not in os.environ: if 'POPLAR_IPUMODEL' not in os.environ:
...@@ -92,6 +85,69 @@ class IPUOpTest(unittest.TestCase): ...@@ -92,6 +85,69 @@ class IPUOpTest(unittest.TestCase):
if flag.upper() in ['1', "TRUE"]: if flag.upper() in ['1', "TRUE"]:
return True return True
# Decorator for static graph building
def static_graph(builder):
def wrapper(self, *args, **kwargs):
self.scope = paddle.static.Scope()
self.main_prog = paddle.static.Program()
self.startup_prog = paddle.static.Program()
self.main_prog.random_seed = self.SEED
self.startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(self.scope):
with paddle.utils.unique_name.guard(
paddle.utils.unique_name.generate('')):
with paddle.static.program_guard(self.main_prog,
self.startup_prog):
builder(self, *args, **kwargs)
return wrapper
# Cast a fp32 model to a full-fp16 model
@classmethod
def cast_model_to_fp16(cls, main_program):
amp_list = paddle.static.amp.CustomOpLists()
amp_list.unsupported_list = {}
to_fp16_var_names = paddle.static.amp.cast_model_to_fp16(
main_program, amp_list, use_fp16_guard=False)
paddle.static.amp.cast_parameters_to_fp16(
paddle.CPUPlace(),
main_program,
to_fp16_var_names=to_fp16_var_names)
class IPUOpTest(IPUTest):
@classmethod
def setUpClass(cls):
super().setUpClass()
# Items that a op_tester needs
cls.main_prog: paddle.static.Program = None
cls.startup_prog: paddle.static.Program = None
cls.scope: paddle.static.Scope = None
cls.feed_list: List[str] = None
cls.fetch_list: List[str] = None
cls.output_dict: Optional[Dict] = {}
@property
def fp16_enabled(self):
return True
def skip_mode(self, exec_mode):
if exec_mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
return True
else:
return False
def is_ipu_mode(self, exec_mode):
if exec_mode == ExecutionMode.CPU_FP32:
return False
return True
def is_fp16_mode(self, exec_mode):
if exec_mode != ExecutionMode.IPU_FP16:
return False
return True
def set_atol(self): def set_atol(self):
self.atol = 1e-10 self.atol = 1e-10
self.rtol = 1e-6 self.rtol = 1e-6
...@@ -102,55 +158,90 @@ class IPUOpTest(unittest.TestCase): ...@@ -102,55 +158,90 @@ class IPUOpTest(unittest.TestCase):
self.is_training = False self.is_training = False
self.epoch = 1 self.epoch = 1
def check(self, outputs, check_shape=False): def run_op_test(self, exec_mode, ipu_strategy=None):
cpu_fp32 = outputs[ExecutionMode.CPU_FP32] # NOTE: some op has no inputs
ipu_fp32 = outputs[ExecutionMode.IPU_FP32] # if len(self.feed_list) == 0 or len(self.fetch_list) == 0:
max_diff = np.abs(cpu_fp32 - ipu_fp32).max() # raise ValueError('feed_list or fetch_list is empty')
fp32_flag = np.allclose( if self.is_ipu_mode(exec_mode):
cpu_fp32, ipu_fp32, rtol=self.rtol, atol=self.atol) place = paddle.IPUPlace()
self.assertTrue(fp32_flag, "max diff is %f" % (max_diff)) else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(self.startup_prog)
if self.is_ipu_mode(exec_mode):
if ipu_strategy is None:
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if self.is_fp16_mode(exec_mode):
ipu_strategy.set_precision_config(enable_fp16=True)
IPUOpTest.cast_model_to_fp16(self.main_prog)
program = paddle.static.IpuCompiledProgram(
self.main_prog, ipu_strategy=ipu_strategy).compile(
self.feed_list, self.fetch_list)
else:
program = self.main_prog
feed = self.feed_fp32
if self.is_fp16_mode(exec_mode):
feed = self.feed_fp16
if self.is_training:
result = []
for _ in range(self.epoch):
loss_res = exe.run(program,
feed=feed,
fetch_list=self.fetch_list)
result.append(loss_res)
else:
result = exe.run(program, feed=feed, fetch_list=self.fetch_list)
if isinstance(result, list) and len(result) == 1:
self.output_dict[exec_mode] = result[0]
else:
self.output_dict[exec_mode] = result
def check(self, check_shape=False, output_dict=None):
if output_dict is None:
output_dict = self.output_dict
if len(output_dict) == 0:
raise ValueError("output_dict is empty")
cpu_fp32 = output_dict[ExecutionMode.CPU_FP32]
ipu_fp32 = output_dict[ExecutionMode.IPU_FP32]
cpu_fp32 = np.asarray(cpu_fp32).astype(np.float32).flatten()
ipu_fp32 = np.asarray(ipu_fp32).astype(np.float32).flatten()
pass_check = np.allclose(
ipu_fp32, cpu_fp32, rtol=self.rtol, atol=self.atol)
if not pass_check:
max_atol = np.abs(ipu_fp32 - cpu_fp32).max()
cpu_fp32_abs = np.abs(cpu_fp32)
cpu_fp32_abs[cpu_fp32_abs == 0.0] = 1e-20
max_rtol = (np.abs(ipu_fp32 - cpu_fp32) / cpu_fp32_abs).max()
raise AssertionError(
f"ipu_fp32 check failed. max_atol is {max_atol}, max_rtol is {max_rtol}"
)
if check_shape: if check_shape:
self.assertTrue(cpu_fp32.shape == ipu_fp32.shape) self.assertTrue(cpu_fp32.shape == ipu_fp32.shape)
ipu_popart_fp16 = None if ExecutionMode.IPU_FP16 in output_dict.keys():
if ExecutionMode.IPU_POPART_FP16 in outputs.keys(): ipu_fp16 = output_dict[ExecutionMode.IPU_FP16]
ipu_popart_fp16 = outputs[ExecutionMode.IPU_POPART_FP16] ipu_fp16 = np.asarray(ipu_fp16).astype(np.float32).flatten()
max_diff = np.abs(ipu_popart_fp16.astype(np.float32) - pass_check = np.allclose(
cpu_fp32).max() ipu_fp16, cpu_fp32, rtol=self.rtol_fp16, atol=self.atol_fp16)
fp16_flag = np.allclose( if not pass_check:
ipu_popart_fp16.astype(np.float32), max_atol = np.abs(ipu_fp16 - cpu_fp32).max()
cpu_fp32, cpu_fp32_abs = np.abs(cpu_fp32)
rtol=self.rtol_fp16, cpu_fp32_abs[cpu_fp32_abs == 0.0] = 1e-20
atol=self.atol_fp16) max_rtol = (np.abs(ipu_fp16 - cpu_fp32) / cpu_fp32_abs).max()
self.assertTrue(fp16_flag, "max diff is %f" % (max_diff)) raise AssertionError(
f"ipu_fp16 check failed. max_atol is {max_atol}, max_rtol is {max_rtol}"
)
if check_shape: if check_shape:
self.assertTrue(ipu_popart_fp16.shape == cpu_fp32.shape) self.assertTrue(ipu_fp16.shape == cpu_fp32.shape)
ipu_mixed_precision = None # Execution Mode
if ExecutionModeFull.IPU_MIXED_PRECISION in outputs.keys(): class ExecutionMode(IntEnum):
ipu_mixed_precision = outputs[ CPU_FP32 = ExecutionModeFull.CPU_FP32
ExecutionModeFull.IPU_MIXED_PRECISION] IPU_FP32 = ExecutionModeFull.IPU_FP32
max_diff = np.abs( IPU_FP16 = ExecutionModeFull.IPU_FP16
ipu_mixed_precision.astype(np.float32) - cpu_fp32).max()
fp16_flag = np.allclose(
ipu_mixed_precision.astype(np.float32),
cpu_fp32,
rtol=self.rtol_fp16,
atol=self.atol_fp16)
self.assertTrue(fp16_flag, "max diff is %f" % (max_diff))
if check_shape:
self.assertTrue(ipu_mixed_precision.shape == cpu_fp32.shape)
if ExecutionMode.IPU_POPART_FP16 in outputs.keys(
) and ExecutionModeFull.IPU_MIXED_PRECISION in outputs.keys():
max_diff = np.abs(ipu_popart_fp16 - ipu_mixed_precision).max()
self.assertEqual(ipu_popart_fp16.all(),
ipu_mixed_precision.all(),
"max diff is %f" % (max_diff))
if check_shape:
self.assertTrue(
ipu_popart_fp16.shape == ipu_mixed_precision.shape)
...@@ -18,8 +18,7 @@ import numpy as np ...@@ -18,8 +18,7 @@ import numpy as np
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (ExecutionMode, from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
IPUOpTest)
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -32,10 +31,6 @@ class TestRelu(IPUOpTest): ...@@ -32,10 +31,6 @@ class TestRelu(IPUOpTest):
self.set_data_feed() self.set_data_feed()
self.set_feed_attr() self.set_feed_attr()
@property
def fp16_enabled(self):
return True
def set_test_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.relu self.op = paddle.fluid.layers.relu
self.op_attrs = {} self.op_attrs = {}
...@@ -49,60 +44,22 @@ class TestRelu(IPUOpTest): ...@@ -49,60 +44,22 @@ class TestRelu(IPUOpTest):
self.feed_shape = [x.shape for x in self.feed_fp32.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys()) self.feed_list = list(self.feed_fp32.keys())
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED out = self.op(x, **self.op_attrs)
startup_prog.random_seed = self.SEED self.fetch_list = [out.name]
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
out = self.op(x, **self.op_attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place) def run_model(self, exec_mode):
exe.run(startup_prog) self.run_op_test(exec_mode)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test(self): def test(self):
output_dict = {} for m in IPUOpTest.ExecutionMode:
for mode in ExecutionMode: if not self.skip_mode(m):
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: self.build_model()
break self.run_model(m)
output_dict[mode] = self._test_base(mode).flatten() self.check()
self.check(output_dict)
class TestTanh(TestRelu): class TestTanh(TestRelu):
......
...@@ -17,8 +17,7 @@ import unittest ...@@ -17,8 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (ExecutionMode, from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
IPUOpTest)
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -31,12 +30,8 @@ class TestBase(IPUOpTest): ...@@ -31,12 +30,8 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
data = np.random.uniform(size=[10, 1000]) data = np.random.uniform(size=[10, 500]).astype(np.float16)
self.feed_fp32 = {"in_0": data.astype(np.float32)} self.feed_fp32 = {"in_0": data.astype(np.float32)}
self.feed_fp16 = {"in_0": data.astype(np.float16)} self.feed_fp16 = {"in_0": data.astype(np.float16)}
...@@ -48,64 +43,24 @@ class TestBase(IPUOpTest): ...@@ -48,64 +43,24 @@ class TestBase(IPUOpTest):
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {"axis": -1} self.attrs = {"axis": -1}
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED out = paddle.fluid.layers.argmax(x, **self.attrs)
startup_prog.random_seed = self.SEED self.fetch_list = [out.name]
with paddle.static.scope_guard(scope): def run_model(self, exec_mode):
with paddle.static.program_guard(main_prog, startup_prog): self.run_op_test(exec_mode)
x = paddle.static.data(
name=self.feed_list[0], def test(self):
shape=self.feed_shape[0], for m in IPUOpTest.ExecutionMode:
dtype='float32') if not self.skip_mode(m):
self.build_model()
out = paddle.fluid.layers.argmax(x, **self.attrs) self.run_model(m)
for k, v in self.output_dict.items():
fetch_list = [out.name] self.output_dict[k] = v.astype(np.int32)
self.check()
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0].astype(np.int32)
def test_base(self):
output_dict_fp32 = {}
output_dict_fp16 = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
if mode > ExecutionMode.IPU_FP32:
output_dict_fp16[mode] = self._test_base(mode).flatten()
else:
output_dict_fp32[mode] = self._test_base(mode).flatten()
self.check(output_dict_fp32)
class TestCase1(TestBase): class TestCase1(TestBase):
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -29,10 +29,6 @@ class TestBase(IPUOpTest): ...@@ -29,10 +29,6 @@ class TestBase(IPUOpTest):
self.set_data_feed() self.set_data_feed()
self.set_feed_attr() self.set_feed_attr()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
data = np.random.uniform(size=[2, 3, 1]) data = np.random.uniform(size=[2, 3, 1])
self.feed_fp32 = {'in_0': data.astype(np.float32)} self.feed_fp32 = {'in_0': data.astype(np.float32)}
...@@ -42,60 +38,23 @@ class TestBase(IPUOpTest): ...@@ -42,60 +38,23 @@ class TestBase(IPUOpTest):
self.feed_shape = [x.shape for x in self.feed_fp32.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys()) self.feed_list = list(self.feed_fp32.keys())
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED x = paddle.assign(x)
startup_prog.random_seed = self.SEED out = paddle.fluid.layers.elementwise_add(x, x)
self.fetch_list = [out.name]
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
assign = paddle.assign(x)
out = paddle.fluid.layers.elementwise_add(assign, assign)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test(self): def run_model(self, exec_mode):
output_dict = {} self.run_op_test(exec_mode)
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict) def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestAssignFp32Value(TestBase): class TestAssignFp32Value(TestBase):
...@@ -107,51 +66,13 @@ class TestAssignFp32Value(TestBase): ...@@ -107,51 +66,13 @@ class TestAssignFp32Value(TestBase):
data = np.random.uniform(size=[2, 3, 1]) data = np.random.uniform(size=[2, 3, 1])
self.assign_fp32 = data.astype(np.float32) self.assign_fp32 = data.astype(np.float32)
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED assign = paddle.assign(self.assign_fp32)
startup_prog.random_seed = self.SEED out = paddle.fluid.layers.elementwise_add(x, assign)
self.fetch_list = [out.name]
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
assign = paddle.assign(self.assign_fp32)
out = paddle.fluid.layers.elementwise_add(x, assign)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
class TestAssignBoolValue(TestBase): class TestAssignBoolValue(TestBase):
...@@ -162,52 +83,15 @@ class TestAssignBoolValue(TestBase): ...@@ -162,52 +83,15 @@ class TestAssignBoolValue(TestBase):
data = np.random.choice([True, False], size=(2, 3, 1)) data = np.random.choice([True, False], size=(2, 3, 1))
self.assign_bool = data.astype(np.bool) self.assign_bool = data.astype(np.bool)
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED x = paddle.less_than(x, x)
startup_prog.random_seed = self.SEED assign = paddle.assign(self.assign_bool)
x = paddle.logical_and(x, assign)
with paddle.static.scope_guard(scope): out = paddle.cast(x, 'float32')
with paddle.static.program_guard(main_prog, startup_prog): self.fetch_list = [out.name]
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
x = paddle.less_than(x, x)
assign = paddle.assign(self.assign_bool)
out = paddle.logical_and(x, assign)
out = paddle.cast(out, 'float32')
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -29,10 +29,6 @@ class TestBase(IPUOpTest): ...@@ -29,10 +29,6 @@ class TestBase(IPUOpTest):
self.set_data_feed() self.set_data_feed()
self.set_feed_attr() self.set_feed_attr()
@property
def fp16_enabled(self):
return True
def set_atol(self): def set_atol(self):
self.atol = 2e-6 self.atol = 2e-6
self.rtol = 1e-5 self.rtol = 1e-5
...@@ -48,67 +44,32 @@ class TestBase(IPUOpTest): ...@@ -48,67 +44,32 @@ class TestBase(IPUOpTest):
self.feed_shape = [x.shape for x in self.feed_fp32.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys()) self.feed_list = list(self.feed_fp32.keys())
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED x = paddle.static.nn.conv2d(
startup_prog.random_seed = self.SEED x, num_filters=3, filter_size=3, bias_attr=False)
x = paddle.static.nn.conv2d(
with paddle.static.scope_guard(scope): x, num_filters=3, filter_size=3, bias_attr=False)
with paddle.static.program_guard(main_prog, startup_prog): x = paddle.static.nn.conv2d(
x = paddle.static.data( x, num_filters=3, filter_size=3, bias_attr=False)
name=self.feed_list[0], x = paddle.static.nn.conv2d(
shape=self.feed_shape[0], x, num_filters=3, filter_size=3, bias_attr=False)
dtype='float32') self.fetch_list = [x.name]
x = paddle.static.nn.conv2d( def run_model(self, exec_mode):
x, num_filters=3, filter_size=3, bias_attr=False) ipu_strategy = paddle.static.IpuStrategy()
x = paddle.static.nn.conv2d( ipu_strategy.set_graph_config(is_training=self.is_training)
x, num_filters=3, filter_size=3, bias_attr=False) ipu_strategy.set_options({'need_avg_shard': True})
x = paddle.static.nn.conv2d( self.run_op_test(exec_mode, ipu_strategy)
x, num_filters=3, filter_size=3, bias_attr=False)
x = paddle.static.nn.conv2d(
x, num_filters=3, filter_size=3, bias_attr=False)
fetch_list = [x.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
ipu_strategy.set_options({'need_avg_shard': True})
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test(self): def test(self):
output_dict = {} for m in IPUOpTest.ExecutionMode:
for mode in ExecutionMode: if not self.skip_mode(m):
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: self.build_model()
break self.run_model(m)
output_dict[mode] = self._test_base(mode).flatten() self.check()
self.check(output_dict)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -17,8 +17,7 @@ import unittest ...@@ -17,8 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (ExecutionMode, from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
IPUOpTest)
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -31,10 +30,6 @@ class TestBase(IPUOpTest): ...@@ -31,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_atol(self): def set_atol(self):
self.atol = 1e-6 self.atol = 1e-6
self.rtol = 1e-5 self.rtol = 1e-5
...@@ -56,61 +51,24 @@ class TestBase(IPUOpTest): ...@@ -56,61 +51,24 @@ class TestBase(IPUOpTest):
self.attrs['data_layout'] = 'NCHW' self.attrs['data_layout'] = 'NCHW'
self.attrs['in_place'] = False self.attrs['in_place'] = False
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED x = paddle.static.nn.conv2d(
startup_prog.random_seed = self.SEED x, num_filters=3, filter_size=3, bias_attr=False)
x = paddle.fluid.layers.batch_norm(x, **self.attrs)
with paddle.static.scope_guard(scope): self.fetch_list = [x.name]
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
conv1 = paddle.static.nn.conv2d(
x, num_filters=3, filter_size=3, bias_attr=False)
out = paddle.fluid.layers.batch_norm(conv1, **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test(self): def run_model(self, exec_mode):
output_dict = {} self.run_op_test(exec_mode)
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict) def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase): class TestCase1(TestBase):
......
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import unittest
import paddle
import paddle.static
paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestFunc(unittest.TestCase):
def _test_func(self, run_ipu=True):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = SEED
startup_prog.random_seed = SEED
np.random.seed(SEED)
bps = 5
n = 1 if run_ipu else -1
c, h, w = 3, 10, 10
np_image = np.random.uniform(size=[1 * bps, c, h, w]).astype(np.float32)
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
image = paddle.static.data(
name='image', shape=[n, c, h, w], dtype='float32')
conv2d = paddle.static.nn.conv2d(
image, num_filters=3, filter_size=3, bias_attr=False)
out = conv2d
if run_ipu:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if run_ipu:
feed_list = [image.name]
fetch_list = [out.name]
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=False)
ipu_strategy.set_pipelining_config(batches_per_step=bps)
program = paddle.static.IpuCompiledProgram(
main_prog, ipu_strategy=ipu_strategy).compile(feed_list,
fetch_list)
else:
program = main_prog
result = exe.run(program,
feed={image.name: np_image},
fetch_list=[out])
return result[0]
def test_func(self):
ipu_res = self._test_func(True)
cpu_res = self._test_func(False)
if np.prod(ipu_res.shape) == np.prod(cpu_res.shape):
ipu_res = ipu_res.reshape(cpu_res.shape)
self.assertTrue(np.allclose(ipu_res, cpu_res, atol=1e-4))
if __name__ == "__main__":
unittest.main()
...@@ -30,175 +30,81 @@ class TestBase(IPUOpTest): ...@@ -30,175 +30,81 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
def set_atol(self): @property
self.atol = 1e-3 def fp16_enabled(self):
return False
def set_data_feed(self): def set_data_feed(self):
self.feed = { data = np.random.uniform(size=[1, 3, 3, 3])
"x": np.random.uniform(size=[1, 3, 3, 3]).astype('float32'), self.feed_fp32 = {'x': data.astype(np.float32)}
}
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed.keys()) self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed.values()] self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
self.attrs['dtype'] = 'float16' self.attrs['dtype'] = 'float16'
def _test_base(self, run_ipu=True): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0],
main_prog.random_seed = self.SEED shape=self.feed_shape[0],
startup_prog.random_seed = self.SEED dtype=self.feed_dtype[0])
out = paddle.cast(x, **self.attrs)
with paddle.static.scope_guard(scope): self.fetch_list = [out.name]
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( def run_model(self, exec_mode):
name=self.feed_list[0], self.run_op_test(exec_mode)
shape=self.feed_shape[0],
dtype=self.feed_dtype[0]) def test(self):
out = paddle.cast(x, **self.attrs) for m in IPUOpTest.ExecutionMode:
fetch_list = [out.name] if not self.skip_mode(m):
self.build_model()
if run_ipu: self.run_model(m)
place = paddle.IPUPlace() self.check()
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if run_ipu:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
res0 = self._test_base(True)
res1 = self._test_base(False)
self.assertTrue(
np.allclose(
res0.flatten(), res1.flatten(), atol=self.atol))
self.assertTrue(res0.shape == res1.shape)
class TestEnableFp16(TestBase): class TestEnableFp16(TestBase):
def set_atol(self): @property
self.atol = 1e-10 def fp16_enabled(self):
return True
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def set_data_feed(self): def set_data_feed(self):
self.feed = {"x": np.array([1, 200, 3000, 40000]).astype('int32'), } data = np.random.uniform(size=[1, 3, 3, 3])
self.feed_fp32 = {'x': data.astype(np.float32)}
self.feed_fp16 = {'x': data.astype(np.float16)}
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
self.attrs['dtype'] = 'float32' self.attrs['dtype'] = 'float32'
def _test_base(self, run_ipu=True):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype=self.feed_dtype[0])
out = paddle.cast(x, **self.attrs)
fetch_list = [out.name]
if run_ipu:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if run_ipu:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
return result[0]
class TestDisableTransferCast(TestEnableFp16): class TestDisableTransferCast(TestEnableFp16):
def set_atol(self):
self.atol = 1e-10
def set_data_feed(self): def set_data_feed(self):
self.feed = {"x": np.array([1, 200, 3000, 40000]).astype('int32'), } data = np.random.uniform(size=[1, 3, 3, 3])
self.feed_fp32 = {'x': data.astype(np.float32)}
self.feed_fp16 = {'x': data.astype(np.float16)}
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
self.attrs['dtype'] = 'float32' self.attrs['dtype'] = 'float32'
def _test_base(self, run_ipu=True): def run_model(self, exec_mode):
scope = paddle.static.Scope() ipu_strategy = paddle.static.IpuStrategy()
main_prog = paddle.static.Program() ipu_strategy.set_graph_config(is_training=self.is_training)
startup_prog = paddle.static.Program() ipu_strategy.set_options({"transfer_cast_op": False})
main_prog.random_seed = self.SEED self.run_op_test(exec_mode)
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype=self.feed_dtype[0])
out = paddle.cast(x, **self.attrs)
fetch_list = [out.name]
if run_ipu:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if run_ipu:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
ipu_strategy.set_precision_config(enable_fp16=True)
ipu_strategy.set_options({"transfer_cast_op": False})
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
return result[0]
class TestCase2(TestBase): class TestCase2(TestBase):
def set_atol(self):
self.atol = 1e-10
def set_data_feed(self): def set_data_feed(self):
self.feed = { self.feed_fp32 = {
"x": np.random.uniform(size=[1, 3, 3, 3]).astype('float16'), "x": np.random.uniform(size=[1, 3, 3, 3]).astype('float16'),
} }
...@@ -208,11 +114,8 @@ class TestCase2(TestBase): ...@@ -208,11 +114,8 @@ class TestCase2(TestBase):
class TestCase3(TestBase): class TestCase3(TestBase):
def set_atol(self):
self.atol = 1e-10
def set_data_feed(self): def set_data_feed(self):
self.feed = { self.feed_fp32 = {
"x": np.random.uniform(size=[1, 3, 3, 3]).astype('float32'), "x": np.random.uniform(size=[1, 3, 3, 3]).astype('float32'),
} }
...@@ -222,11 +125,8 @@ class TestCase3(TestBase): ...@@ -222,11 +125,8 @@ class TestCase3(TestBase):
class TestCase4(TestBase): class TestCase4(TestBase):
def set_atol(self):
self.atol = 1e-10
def set_data_feed(self): def set_data_feed(self):
self.feed = { self.feed_fp32 = {
"x": np.random.uniform(size=[1, 3, 3, 3]).astype('int32'), "x": np.random.uniform(size=[1, 3, 3, 3]).astype('int32'),
} }
...@@ -236,11 +136,8 @@ class TestCase4(TestBase): ...@@ -236,11 +136,8 @@ class TestCase4(TestBase):
class TestCase5(TestBase): class TestCase5(TestBase):
def set_atol(self):
self.atol = 1e-10
def set_data_feed(self): def set_data_feed(self):
self.feed = { self.feed_fp32 = {
"x": np.random.uniform(size=[1, 3, 3, 3]).astype('float16'), "x": np.random.uniform(size=[1, 3, 3, 3]).astype('float16'),
} }
...@@ -250,11 +147,8 @@ class TestCase5(TestBase): ...@@ -250,11 +147,8 @@ class TestCase5(TestBase):
class TestCase6(TestBase): class TestCase6(TestBase):
def set_atol(self):
self.atol = 1e-10
def set_data_feed(self): def set_data_feed(self):
self.feed = { self.feed_fp32 = {
"x": np.random.uniform(size=[1, 3, 3, 3]).astype('int32'), "x": np.random.uniform(size=[1, 3, 3, 3]).astype('int32'),
} }
...@@ -273,7 +167,7 @@ class TestCase2(TestBase): ...@@ -273,7 +167,7 @@ class TestCase2(TestBase):
@unittest.skip('skip float16 to float32') @unittest.skip('skip float16 to float32')
class TestCase3(TestBase): class TestCase3(TestBase):
def set_data_feed(self): def set_data_feed(self):
self.feed = { self.feed_fp32 = {
"x": np.random.uniform(size=[1, 3, 3, 3]).astype('float16'), "x": np.random.uniform(size=[1, 3, 3, 3]).astype('float16'),
} }
...@@ -285,10 +179,11 @@ class TestCase3(TestBase): ...@@ -285,10 +179,11 @@ class TestCase3(TestBase):
@unittest.skip('int32 to int8 is not supported') @unittest.skip('int32 to int8 is not supported')
class TestCase4(TestBase): class TestCase4(TestBase):
def set_atol(self): def set_atol(self):
super().set_atol()
self.atol = 1 self.atol = 1
def set_data_feed(self): def set_data_feed(self):
self.feed = { self.feed_fp32 = {
"x": np.random.randint( "x": np.random.randint(
low=1, high=100, size=[1, 3, 3, 3]).astype('int32'), low=1, high=100, size=[1, 3, 3, 3]).astype('int32'),
} }
......
...@@ -17,8 +17,7 @@ import unittest ...@@ -17,8 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (ExecutionMode, from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
IPUOpTest)
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -31,14 +30,9 @@ class TestBase(IPUOpTest): ...@@ -31,14 +30,9 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
data1 = np.random.uniform(size=[1, 3, 10, 10]) data1 = np.random.uniform(size=[1, 3, 10, 10])
data2 = np.random.uniform(size=[1, 3, 10, 10]) data2 = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = { self.feed_fp32 = {
'x': data1.astype(np.float32), 'x': data1.astype(np.float32),
'y': data2.astype(np.float32) 'y': data2.astype(np.float32)
...@@ -55,63 +49,24 @@ class TestBase(IPUOpTest): ...@@ -55,63 +49,24 @@ class TestBase(IPUOpTest):
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {"axis": 0} self.attrs = {"axis": 0}
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED y = paddle.static.data(
startup_prog.random_seed = self.SEED name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32')
out = paddle.fluid.layers.concat([x, y], **self.attrs)
with paddle.static.scope_guard(scope): self.fetch_list = [out.name]
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( def run_model(self, exec_mode):
name=self.feed_list[0], self.run_op_test(exec_mode)
shape=self.feed_shape[0],
dtype='float32') def test(self):
y = paddle.static.data( for m in IPUOpTest.ExecutionMode:
name=self.feed_list[1], if not self.skip_mode(m):
shape=self.feed_shape[1], self.build_model()
dtype='float32') self.run_model(m)
self.check()
out = paddle.fluid.layers.concat([x, y], **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
class TestCase1(TestBase): class TestCase1(TestBase):
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -26,26 +26,19 @@ class TestBase(IPUOpTest): ...@@ -26,26 +26,19 @@ class TestBase(IPUOpTest):
def setUp(self): def setUp(self):
self.set_atol() self.set_atol()
self.set_training() self.set_training()
self.set_data_feed() self.set_feed()
self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_atol(self): def set_atol(self):
self.atol = 1e-6 self.atol = 1e-6
self.rtol = 1e-6 self.rtol = 1e-6
self.atol_fp16 = 1e-3 self.atol_fp16 = 1e-3
self.rtol_fp16 = 1e-3 self.rtol_fp16 = 1e-3
def set_data_feed(self): def set_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10]) data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {'in_0': data.astype(np.float32)} self.feed_fp32 = {'in_0': data.astype(np.float32)}
self.feed_fp16 = {'in_0': data.astype(np.float16)} self.feed_fp16 = {'in_0': data.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys()) self.feed_list = list(self.feed_fp32.keys())
...@@ -59,59 +52,22 @@ class TestBase(IPUOpTest): ...@@ -59,59 +52,22 @@ class TestBase(IPUOpTest):
self.attrs['groups'] = 1 self.attrs['groups'] = 1
self.attrs['data_format'] = 'NCHW' self.attrs['data_format'] = 'NCHW'
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED x = paddle.fluid.layers.conv2d(x, **self.attrs)
startup_prog.random_seed = self.SEED self.fetch_list = [x.name]
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
image = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
out = paddle.fluid.layers.conv2d(image, **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test(self): def run_model(self, exec_mode):
output_dict = {} self.run_op_test(exec_mode)
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict) def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase): class TestCase1(TestBase):
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest): ...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
x = np.random.uniform(size=[3, 7]) x = np.random.uniform(size=[3, 7])
label = np.arange(3).reshape([3, 1]) label = np.arange(3).reshape([3, 1])
...@@ -53,81 +49,31 @@ class TestBase(IPUOpTest): ...@@ -53,81 +49,31 @@ class TestBase(IPUOpTest):
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {'soft_label': False, } self.attrs = {'soft_label': False, }
def np_nll_loss(self): @IPUOpTest.static_graph
tmp = -np.log(self.feed_fp32['x']) def build_model(self, on_ipu):
label = self.feed_fp32['label'] x = paddle.static.data(
indice = [range(label.shape[0]), label.flatten()] name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32")
self.np_ref = tmp[indice] if on_ipu:
label = paddle.static.data(
def _test_base(self, exec_mode): name=self.feed_list[1], shape=self.feed_shape[1], dtype='int32')
scope = paddle.static.Scope() else:
main_prog = paddle.static.Program() label = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[1], shape=self.feed_shape[1], dtype='int64')
main_prog.random_seed = self.SEED out = paddle.fluid.layers.cross_entropy(
startup_prog.random_seed = self.SEED input=x, label=label, **self.attrs)
self.fetch_list = [out.name]
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): def run_model(self, exec_mode):
x = paddle.static.data( if self.is_ipu_mode(exec_mode):
name=self.feed_list[0], self.feed_fp32['label'] = self.feed_fp32['label'].astype(np.int32)
shape=self.feed_shape[0], self.run_op_test(exec_mode)
dtype="float32")
if exec_mode != ExecutionMode.CPU_FP32:
label = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype='int32')
else:
label = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype='int64')
out = paddle.fluid.layers.cross_entropy(
input=x, label=label, **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
if exec_mode != ExecutionMode.CPU_FP32:
feed['label'] = feed['label'].astype(np.int32)
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test(self): def test(self):
output_dict = {} for m in IPUOpTest.ExecutionMode:
for mode in ExecutionMode: if not self.skip_mode(m):
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: self.build_model(self.is_ipu_mode(m))
break self.run_model(m)
output_dict[mode] = self._test_base(mode).flatten() self.check()
self.np_nll_loss()
self.check(output_dict)
class TestCase1(TestBase): class TestCase1(TestBase):
...@@ -142,7 +88,6 @@ class TestCase2(TestBase): ...@@ -142,7 +88,6 @@ class TestCase2(TestBase):
def set_data_feed(self): def set_data_feed(self):
x = np.random.uniform(size=[30, 70]) x = np.random.uniform(size=[30, 70])
label = np.arange(30).reshape([30, 1]) label = np.arange(30).reshape([30, 1])
self.feed_fp32 = { self.feed_fp32 = {
"x": x.astype(np.float32), "x": x.astype(np.float32),
"label": label.astype(np.int64) "label": label.astype(np.int64)
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -48,60 +48,22 @@ class TestBase(IPUOpTest): ...@@ -48,60 +48,22 @@ class TestBase(IPUOpTest):
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32")
main_prog.random_seed = self.SEED out = paddle.fluid.layers.cumsum(x, **self.attrs)
startup_prog.random_seed = self.SEED self.fetch_list = [out.name]
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype="float32")
out = paddle.fluid.layers.cumsum(x, **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test(self): def run_model(self, exec_mode):
output_dict = {} self.run_op_test(exec_mode)
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict) def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase): class TestCase1(TestBase):
......
...@@ -17,8 +17,7 @@ import unittest ...@@ -17,8 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (ExecutionMode, from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
IPUOpTest)
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -31,10 +30,6 @@ class TestBase(IPUOpTest): ...@@ -31,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10]) data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {'x': data.astype(np.float32)} self.feed_fp32 = {'x': data.astype(np.float32)}
...@@ -51,60 +46,23 @@ class TestBase(IPUOpTest): ...@@ -51,60 +46,23 @@ class TestBase(IPUOpTest):
"dropout_implementation": "downgrade_in_infer" "dropout_implementation": "downgrade_in_infer"
} }
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED x = paddle.fluid.layers.dropout(x, **self.attrs)
startup_prog.random_seed = self.SEED out = paddle.fluid.layers.elementwise_add(x, x)
self.fetch_list = [out.name]
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
dropout = paddle.fluid.layers.dropout(x, **self.attrs)
out = paddle.fluid.layers.elementwise_add(dropout, dropout)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test(self): def run_model(self, exec_mode):
output_dict = {} self.run_op_test(exec_mode)
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict) def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase): class TestCase1(TestBase):
......
...@@ -17,8 +17,7 @@ import unittest ...@@ -17,8 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (ExecutionMode, from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
IPUOpTest)
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -43,63 +42,24 @@ class TestMul(IPUOpTest): ...@@ -43,63 +42,24 @@ class TestMul(IPUOpTest):
self.feed_shape = [x.shape for x in self.feed_fp32.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys()) self.feed_list = list(self.feed_fp32.keys())
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED y = paddle.static.data(
startup_prog.random_seed = self.SEED name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32')
out = self.op(x, y, **self.attrs)
with paddle.static.scope_guard(scope): self.fetch_list = [out.name]
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
y = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype='float32')
out = self.op(x, y, **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def run_test_base(self): def run_model(self, exec_mode):
output_dict = {} self.run_op_test(exec_mode)
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict) def run_test_base(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
def test_case0(self): def test_case0(self):
data_x = np.random.uniform(size=(2, 3, 4, 5)) data_x = np.random.uniform(size=(2, 3, 4, 5))
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest): ...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
x = np.ones([1, 10]) x = np.ones([1, 10])
y = np.zeros([1, 10]) y = np.zeros([1, 10])
...@@ -53,63 +49,24 @@ class TestBase(IPUOpTest): ...@@ -53,63 +49,24 @@ class TestBase(IPUOpTest):
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED y = paddle.static.data(
startup_prog.random_seed = self.SEED name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32')
out = paddle.fluid.layers.equal(x, y, **self.attrs)
with paddle.static.scope_guard(scope): self.fetch_list = [out.name]
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
y = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype='float32')
out = paddle.fluid.layers.equal(x, y, **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test(self): def run_model(self, exec_mode):
output_dict = {} self.run_op_test(exec_mode)
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten().astype(np.int32)
self.check(output_dict) def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase): class TestCase1(TestBase):
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest): ...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
data = np.random.uniform(size=[2, 3, 1]) data = np.random.uniform(size=[2, 3, 1])
self.feed_fp32 = {'in_0': data.astype(np.float32)} self.feed_fp32 = {'in_0': data.astype(np.float32)}
...@@ -47,59 +43,22 @@ class TestBase(IPUOpTest): ...@@ -47,59 +43,22 @@ class TestBase(IPUOpTest):
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {"expand_times": [1, 2, 2]} self.attrs = {"expand_times": [1, 2, 2]}
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32")
main_prog.random_seed = self.SEED out = paddle.fluid.layers.expand(x, **self.attrs)
startup_prog.random_seed = self.SEED self.fetch_list = [out.name]
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype="float32")
out = paddle.fluid.layers.expand(x, **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test(self): def run_model(self, exec_mode):
output_dict = {} self.run_op_test(exec_mode)
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict) def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase): class TestCase1(TestBase):
...@@ -116,53 +75,15 @@ class TestCase1(TestBase): ...@@ -116,53 +75,15 @@ class TestCase1(TestBase):
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32")
main_prog.random_seed = self.SEED expand_times = paddle.fluid.layers.fill_constant(
startup_prog.random_seed = self.SEED shape=[len(self.feed_shape[0])], dtype="int32", value=2)
out = paddle.fluid.layers.expand(
with paddle.static.scope_guard(scope): x, expand_times=expand_times, **self.attrs)
with paddle.static.program_guard(main_prog, startup_prog): self.fetch_list = [out.name]
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype="float32")
expand_times = paddle.fluid.layers.fill_constant(
shape=[len(self.feed_shape[0])], dtype="int32", value=2)
out = paddle.fluid.layers.expand(
x, expand_times=expand_times, **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest): ...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
data = np.random.uniform(size=[2, 3, 1]) data = np.random.uniform(size=[2, 3, 1])
self.feed_fp32 = {'in_0': data.astype(np.float32)} self.feed_fp32 = {'in_0': data.astype(np.float32)}
...@@ -46,60 +42,23 @@ class TestBase(IPUOpTest): ...@@ -46,60 +42,23 @@ class TestBase(IPUOpTest):
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {'fill_value': 0.3, 'dtype': 'float32'} self.attrs = {'fill_value': 0.3, 'dtype': 'float32'}
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED x_fill = paddle.full_like(x, **self.attrs)
startup_prog.random_seed = self.SEED out = paddle.fluid.layers.elementwise_add(x_fill, x_fill)
self.fetch_list = [out.name]
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
x_fill = paddle.full_like(x, **self.attrs)
out = paddle.fluid.layers.elementwise_add(x_fill, x_fill)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place) def run_model(self, exec_mode):
exe.run(startup_prog) self.run_op_test(exec_mode)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test(self): def test(self):
output_dict = {} for m in IPUOpTest.ExecutionMode:
for mode in ExecutionMode: if not self.skip_mode(m):
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: self.build_model()
break self.run_model(m)
output_dict[mode] = self._test_base(mode).flatten() self.check()
self.check(output_dict)
class TestCase1(TestBase): class TestCase1(TestBase):
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -30,17 +30,14 @@ class TestBase(IPUOpTest): ...@@ -30,17 +30,14 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
self.feed = {} self.feed_fp32 = {}
self.feed_fp16 = {}
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed.keys()) self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed.values()] self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = { self.attrs = {
...@@ -50,50 +47,21 @@ class TestBase(IPUOpTest): ...@@ -50,50 +47,21 @@ class TestBase(IPUOpTest):
'value': 0.3, 'value': 0.3,
} }
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.fluid.layers.fill_constant(**self.attrs)
startup_prog = paddle.static.Program() out = paddle.fluid.layers.elementwise_add(x, x)
main_prog.random_seed = self.SEED self.fetch_list = [out.name]
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.fluid.layers.fill_constant(**self.attrs)
out = paddle.fluid.layers.elementwise_add(x, x)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place) def run_model(self, exec_mode):
exe.run(startup_prog) self.run_op_test(exec_mode)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
return result[0]
def test(self): def test(self):
output_dict = {} for m in IPUOpTest.ExecutionMode:
for mode in ExecutionMode: if not self.skip_mode(m):
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: self.build_model()
break self.run_model(m)
output_dict[mode] = self._test_base(mode).flatten() self.check()
self.check(output_dict)
class TestCase1(TestBase): class TestCase1(TestBase):
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest): ...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
data = np.random.uniform(size=[2, 2, 4, 6]) data = np.random.uniform(size=[2, 2, 4, 6])
self.feed_fp32 = {"in_0": data.astype(np.float32)} self.feed_fp32 = {"in_0": data.astype(np.float32)}
...@@ -47,59 +43,22 @@ class TestBase(IPUOpTest): ...@@ -47,59 +43,22 @@ class TestBase(IPUOpTest):
self.attrs = {} self.attrs = {}
self.attrs['axis'] = 1 self.attrs['axis'] = 1
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED out = paddle.fluid.layers.flatten(x=x, **self.attrs)
startup_prog.random_seed = self.SEED self.fetch_list = [out.name]
with paddle.static.scope_guard(scope): def run_model(self, exec_mode):
with paddle.static.program_guard(main_prog, startup_prog): self.run_op_test(exec_mode)
x = paddle.static.data(
name=self.feed_list[0], def test(self):
shape=self.feed_shape[0], for m in IPUOpTest.ExecutionMode:
dtype='float32') if not self.skip_mode(m):
self.build_model()
out = paddle.fluid.layers.flatten(x=x, **self.attrs) self.run_model(m)
self.check()
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode)
self.check(output_dict, check_shape=True)
class TestCase1(TestBase): class TestCase1(TestBase):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册