未验证 提交 fe765cb3 编写于 作者: A Allen Guo 提交者: GitHub

[IPU] update ipu unittests p1 (#39923)

* update ipu UTs part1

* rename ut

* sync api changes

* update uts for new api

* update use_ipumodel()

* update use_ipumodel()

* split pr
上级 86effa0c
...@@ -16,14 +16,9 @@ import unittest ...@@ -16,14 +16,9 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, from paddle.fluid.tests.unittests.ipu.op_test_ipu import (ExecutionMode,
np_dtype_to_fluid_str) IPUOpTest)
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -32,81 +27,88 @@ class TestBase(IPUOpTest): ...@@ -32,81 +27,88 @@ class TestBase(IPUOpTest):
def setUp(self): def setUp(self):
self.set_atol() self.set_atol()
self.set_training() self.set_training()
self.set_feed() self.set_data_feed()
self.set_feed_attr() self.set_feed_attr()
self.set_attrs() self.set_op_attrs()
def set_feed(self): @property
self.feed = { def fp16_enabled(self):
"x": np.random.uniform(size=[1, 3, 10, 10]).astype('float32') return True
}
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {'x': data.astype(np.float32)}
self.feed_fp16 = {'x': data.astype(np.float16)}
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed.keys()) self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [
np_dtype_to_fluid_str(x.dtype) for x in self.feed.values()
]
def set_attrs(self): def set_op_attrs(self):
self.attrs = { self.attrs = {
"dropout_prob": 0.5, "dropout_prob": 0.5,
"is_test": True, "is_test": True,
"dropout_implementation": "downgrade_in_infer" "dropout_implementation": "downgrade_in_infer"
} }
def _test_base(self, run_ipu=True): def _test_base(self, exec_mode):
scope = fluid.core.Scope() scope = paddle.static.Scope()
main_prog = paddle.static.Program() main_prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
SEED = self.SEED main_prog.random_seed = self.SEED
main_prog.random_seed = SEED startup_prog.random_seed = self.SEED
startup_prog.random_seed = SEED
with fluid.scope_guard(scope): with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0],
shape=self.feed_shape[0], shape=self.feed_shape[0],
dtype=self.feed_dtype[0]) dtype='float32')
dropout = paddle.fluid.layers.dropout(x, **self.attrs) dropout = paddle.fluid.layers.dropout(x, **self.attrs)
out = paddle.fluid.layers.elementwise_add(dropout, dropout) out = paddle.fluid.layers.elementwise_add(dropout, dropout)
fetch_list = [out.name] fetch_list = [out.name]
if run_ipu: if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace() place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(startup_prog)
if run_ipu: if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training) ipu_strategy.set_graph_config(is_training=self.is_training)
program = compiler.IPUCompiledProgram( if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog, main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else: else:
program = main_prog program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list) feed = self.feed_fp32
return result[0] if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
def test_base(self): result = exe.run(program, feed=feed, fetch_list=fetch_list)
res0 = self._test_base(True) return result[0]
res1 = self._test_base(False)
self.assertTrue( def test(self):
np.allclose( output_dict = {}
res0.flatten(), res1.flatten(), atol=self.atol)) for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.assertTrue(res0.shape == res1.shape) self.check(output_dict)
class TestCase1(TestBase): class TestCase1(TestBase):
def set_attrs(self): def set_op_attrs(self):
self.attrs = { self.attrs = {
"dropout_prob": 0.5, "dropout_prob": 0.5,
"is_test": True, "is_test": True,
...@@ -115,7 +117,7 @@ class TestCase1(TestBase): ...@@ -115,7 +117,7 @@ class TestCase1(TestBase):
class TestCase2(TestBase): class TestCase2(TestBase):
def set_attrs(self): def set_op_attrs(self):
self.attrs = { self.attrs = {
"dropout_prob": 0.0, "dropout_prob": 0.0,
"is_test": False, "is_test": False,
......
...@@ -16,14 +16,9 @@ import unittest ...@@ -16,14 +16,9 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, from paddle.fluid.tests.unittests.ipu.op_test_ipu import (ExecutionMode,
np_dtype_to_fluid_str) IPUOpTest)
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -32,101 +27,136 @@ class TestMul(IPUOpTest): ...@@ -32,101 +27,136 @@ class TestMul(IPUOpTest):
def setUp(self): def setUp(self):
self.set_atol() self.set_atol()
self.set_training() self.set_training()
self.init_op() self.set_test_op()
@property
def fp16_enabled(self):
if IPUOpTest.use_ipumodel():
return False
else:
return True
def init_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.elementwise_mul self.op = paddle.fluid.layers.elementwise_mul
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed.keys()) self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [
np_dtype_to_fluid_str(x.dtype) for x in self.feed.values() def _test_base(self, exec_mode):
] scope = paddle.static.Scope()
def _test_base(self, run_ipu=True):
scope = fluid.core.Scope()
main_prog = paddle.static.Program() main_prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
SEED = self.SEED main_prog.random_seed = self.SEED
main_prog.random_seed = SEED startup_prog.random_seed = self.SEED
startup_prog.random_seed = SEED
with fluid.scope_guard(scope): with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0],
shape=self.feed_shape[0], shape=self.feed_shape[0],
dtype=self.feed_dtype[0]) dtype='float32')
y = paddle.static.data( y = paddle.static.data(
name=self.feed_list[1], name=self.feed_list[1],
shape=self.feed_shape[1], shape=self.feed_shape[1],
dtype=self.feed_dtype[1]) dtype='float32')
out = self.op(x, y, **self.attrs) out = self.op(x, y, **self.attrs)
fetch_list = [out.name] fetch_list = [out.name]
if run_ipu: if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace() place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(startup_prog)
if run_ipu: if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training) ipu_strategy.set_graph_config(is_training=self.is_training)
program = compiler.IPUCompiledProgram( if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog, main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else: else:
program = main_prog program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list) feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0] return result[0]
def run_test_base(self): def run_test_base(self):
res0 = self._test_base(True) output_dict = {}
res1 = self._test_base(False) for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
self.assertTrue( break
np.allclose( output_dict[mode] = self._test_base(mode).flatten()
res0.flatten(), res1.flatten(), atol=self.atol))
self.assertTrue(res0.shape == res1.shape) self.check(output_dict)
def test_case0(self): def test_case0(self):
self.feed = { data_x = np.random.uniform(size=(2, 3, 4, 5))
"x": np.random.uniform(size=(2, 3, 4, 5)).astype('float32'), data_y = np.random.uniform(size=(2, 3, 4, 5))
"y": np.random.uniform(size=(2, 3, 4, 5)).astype('float32'),
self.feed_fp32 = {
"x": data_x.astype('float32'),
"y": data_y.astype('float32'),
}
self.feed_fp16 = {
"x": data_x.astype('float16'),
"y": data_y.astype('float16'),
} }
self.attrs = {} self.attrs = {}
self.set_feed_attr() self.set_feed_attr()
self.run_test_base() self.run_test_base()
def test_case1(self): def test_case1(self):
self.feed = { data_x = np.random.uniform(size=(2, 3, 4, 5))
"x": np.random.uniform(size=(2, 3, 4, 5)).astype('float32'), data_y = np.random.uniform(size=(3, 4))
"y": np.random.uniform(size=(3, 4)).astype('float32'), self.feed_fp32 = {
"x": data_x.astype('float32'),
"y": data_y.astype('float32'),
}
self.feed_fp16 = {
"x": data_x.astype('float16'),
"y": data_y.astype('float16'),
} }
self.set_feed_attr() self.set_feed_attr()
self.attrs = {"axis": 1} self.attrs = {"axis": 1}
self.run_test_base() self.run_test_base()
def test_case2(self): def test_case2(self):
self.feed = { data_x = np.random.uniform(size=(2, 3, 4, 5))
"x": np.random.uniform(size=(2, 3, 4, 5)).astype('float32'), data_y = np.random.uniform(size=(5))
"y": np.random.uniform(size=(5)).astype('float32'), self.feed_fp32 = {
"x": data_x.astype('float32'),
"y": data_y.astype('float32'),
}
self.feed_fp16 = {
"x": data_x.astype('float16'),
"y": data_y.astype('float16'),
} }
self.set_feed_attr() self.set_feed_attr()
self.attrs = {"axis": -1} self.attrs = {"axis": -1}
self.run_test_base() self.run_test_base()
def test_case3(self): def test_case3(self):
self.feed = { data_x = np.random.uniform(size=(2, 3, 4, 5))
"x": np.random.uniform(size=(2, 3, 4, 5)).astype('float32'), data_y = np.random.uniform(size=(2))
"y": np.random.uniform(size=(2)).astype('float32'), self.feed_fp32 = {
"x": data_x.astype('float32'),
"y": data_y.astype('float32'),
}
self.feed_fp16 = {
"x": data_x.astype('float16'),
"y": data_y.astype('float16'),
} }
self.set_feed_attr() self.set_feed_attr()
self.attrs = {"axis": 0} self.attrs = {"axis": 0}
...@@ -134,37 +164,43 @@ class TestMul(IPUOpTest): ...@@ -134,37 +164,43 @@ class TestMul(IPUOpTest):
class TestAdd(TestMul): class TestAdd(TestMul):
def init_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.elementwise_add self.op = paddle.fluid.layers.elementwise_add
class TestSub(TestMul): class TestSub(TestMul):
def init_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.elementwise_sub self.op = paddle.fluid.layers.elementwise_sub
class TestDiv(TestMul): class TestDiv(TestMul):
def init_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.elementwise_div self.op = paddle.fluid.layers.elementwise_div
class TestMin(TestMul): class TestMin(TestMul):
def init_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.elementwise_min self.op = paddle.fluid.layers.elementwise_min
class TestMax(TestMul): class TestMax(TestMul):
def init_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.elementwise_max self.op = paddle.fluid.layers.elementwise_max
class TestPow(TestMul): class TestPow(TestMul):
def init_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.elementwise_pow self.op = paddle.fluid.layers.elementwise_pow
class TestMod(TestMul): class TestMod(TestMul):
def init_op(self): def set_atol(self):
self.atol = 1e-7
self.rtol = 1e-5
self.atol_fp16 = 1e-2
self.rtol_fp16 = 1e-3
def set_test_op(self):
self.op = paddle.fluid.layers.elementwise_mod self.op = paddle.fluid.layers.elementwise_mod
......
...@@ -16,14 +16,8 @@ import unittest ...@@ -16,14 +16,8 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
np_dtype_to_fluid_str)
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -32,94 +26,106 @@ class TestBase(IPUOpTest): ...@@ -32,94 +26,106 @@ class TestBase(IPUOpTest):
def setUp(self): def setUp(self):
self.set_atol() self.set_atol()
self.set_training() self.set_training()
self.set_feed() self.set_data_feed()
self.set_feed_attr() self.set_feed_attr()
self.set_attrs() self.set_op_attrs()
def set_feed(self): @property
self.feed = { def fp16_enabled(self):
"x": np.ones([1, 10]).astype('float32'), return True
"y": np.zeros([1, 10]).astype('float32'),
def set_data_feed(self):
x = np.ones([1, 10])
y = np.zeros([1, 10])
self.feed_fp32 = {
"x": x.astype(np.float32),
"y": y.astype(np.float32),
}
self.feed_fp16 = {
"x": x.astype(np.float16),
"y": y.astype(np.float16),
} }
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed.keys()) self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [
np_dtype_to_fluid_str(x.dtype) for x in self.feed.values()
]
def set_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
def _test_base(self, run_ipu=True): def _test_base(self, exec_mode):
scope = fluid.core.Scope() scope = paddle.static.Scope()
main_prog = paddle.static.Program() main_prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
SEED = self.SEED main_prog.random_seed = self.SEED
main_prog.random_seed = SEED startup_prog.random_seed = self.SEED
startup_prog.random_seed = SEED
with fluid.scope_guard(scope): with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
# XX
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0],
shape=self.feed_shape[0], shape=self.feed_shape[0],
dtype=self.feed_dtype[0]) dtype='float32')
y = paddle.static.data( y = paddle.static.data(
name=self.feed_list[1], name=self.feed_list[1],
shape=self.feed_shape[1], shape=self.feed_shape[1],
dtype=self.feed_dtype[1]) dtype='float32')
out = paddle.fluid.layers.equal(x, y, **self.attrs) out = paddle.fluid.layers.equal(x, y, **self.attrs)
fetch_list = [out.name] fetch_list = [out.name]
if run_ipu: if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace() place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(startup_prog)
if run_ipu: if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training) ipu_strategy.set_graph_config(is_training=self.is_training)
program = compiler.IPUCompiledProgram( if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog, main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else: else:
program = main_prog program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list) feed = self.feed_fp32
return result[0] if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
def test_base(self): result = exe.run(program, feed=feed, fetch_list=fetch_list)
res0 = self._test_base(True) return result[0]
res1 = self._test_base(False)
self.assertTrue( def test(self):
np.allclose( output_dict = {}
res0.flatten(), res1.flatten(), atol=self.atol)) for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten().astype(np.int32)
self.assertTrue(res0.shape == res1.shape) self.check(output_dict)
class TestCase1(TestBase): class TestCase1(TestBase):
def set_feed(self): def set_data_feed(self):
self.feed = { x = np.ones([1, 10])
"x": np.ones([1, 10]).astype('float32'), y = np.ones([1, 10])
"y": np.ones([1, 10]).astype('float32'), self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
} self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
class TestCase2(TestBase): class TestCase2(TestBase):
def set_feed(self): def set_data_feed(self):
self.feed = { x = np.ones([1, 10])
"x": np.ones([1, 10]).astype('float32'), y = np.arange(0, 10).reshape([1, 10])
"y": np.arange(0, 10).reshape([1, 10]).astype('float32'), self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
} self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -16,14 +16,8 @@ import unittest ...@@ -16,14 +16,8 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
np_dtype_to_fluid_str)
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -32,125 +26,142 @@ class TestBase(IPUOpTest): ...@@ -32,125 +26,142 @@ class TestBase(IPUOpTest):
def setUp(self): def setUp(self):
self.set_atol() self.set_atol()
self.set_training() self.set_training()
self.set_feed() self.set_data_feed()
self.set_feed_attr() self.set_feed_attr()
self.set_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_feed(self): def set_data_feed(self):
self.feed = {"x": np.random.uniform(size=[2, 3, 1]).astype('float32')} data = np.random.uniform(size=[2, 3, 1])
self.feed_fp32 = {'in_0': data.astype(np.float32)}
self.feed_fp16 = {'in_0': data.astype(np.float16)}
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed.keys()) self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [ self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
np_dtype_to_fluid_str(x.dtype) for x in self.feed.values()
]
def set_attrs(self): def set_op_attrs(self):
self.attrs = {"expand_times": [1, 2, 2]} self.attrs = {"expand_times": [1, 2, 2]}
def _test_base(self, run_ipu=True): def _test_base(self, exec_mode):
scope = fluid.core.Scope() scope = paddle.static.Scope()
main_prog = paddle.static.Program() main_prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
SEED = self.SEED main_prog.random_seed = self.SEED
main_prog.random_seed = SEED startup_prog.random_seed = self.SEED
startup_prog.random_seed = SEED
with fluid.scope_guard(scope): with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0],
shape=self.feed_shape[0], shape=self.feed_shape[0],
dtype=self.feed_dtype[0]) dtype="float32")
out = paddle.fluid.layers.expand(x, **self.attrs) out = paddle.fluid.layers.expand(x, **self.attrs)
fetch_list = [out.name] fetch_list = [out.name]
if run_ipu: if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace() place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(startup_prog)
if run_ipu: if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training) ipu_strategy.set_graph_config(is_training=self.is_training)
program = compiler.IPUCompiledProgram( if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog, main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else: else:
program = main_prog program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list) feed = self.feed_fp32
return result[0] if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
def test_base(self): result = exe.run(program, feed=feed, fetch_list=fetch_list)
res0 = self._test_base(False) return result[0]
res1 = self._test_base(True)
self.assertTrue( def test(self):
np.allclose( output_dict = {}
res0.flatten(), res1.flatten(), atol=self.atol)) for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.assertTrue(res0.shape == res1.shape) self.check(output_dict)
class TestCase1(TestBase): class TestCase1(TestBase):
def set_feed(self): def set_data_feed(self):
self.feed = {"x": np.random.uniform(size=[2, 2]).astype('float32')} x = np.random.uniform(size=[2, 2])
self.feed_fp32 = {"x": x.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16)}
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed.keys()) self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [ self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
np_dtype_to_fluid_str(x.dtype) for x in self.feed.values()
]
def set_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
def _test_base(self, run_ipu=True): def _test_base(self, exec_mode):
scope = fluid.core.Scope() scope = paddle.static.Scope()
main_prog = paddle.static.Program() main_prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
SEED = self.SEED main_prog.random_seed = self.SEED
main_prog.random_seed = SEED startup_prog.random_seed = self.SEED
startup_prog.random_seed = SEED
with fluid.scope_guard(scope): with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0],
shape=self.feed_shape[0], shape=self.feed_shape[0],
dtype=self.feed_dtype[0]) dtype="float32")
expand_times = fluid.layers.fill_constant(
expand_times = paddle.fluid.layers.fill_constant(
shape=[len(self.feed_shape[0])], dtype="int32", value=2) shape=[len(self.feed_shape[0])], dtype="int32", value=2)
out = paddle.fluid.layers.expand( out = paddle.fluid.layers.expand(
x, expand_times=expand_times, **self.attrs) x, expand_times=expand_times, **self.attrs)
fetch_list = [out.name] fetch_list = [out.name]
if run_ipu: if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace() place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(startup_prog)
if run_ipu: if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training) ipu_strategy.set_graph_config(is_training=self.is_training)
program = compiler.IPUCompiledProgram( if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog, main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else: else:
program = main_prog program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list) feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0] return result[0]
......
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
data = np.random.uniform(size=[2, 3, 1])
self.feed_fp32 = {'in_0': data.astype(np.float32)}
self.feed_fp16 = {'in_0': data.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def set_op_attrs(self):
self.attrs = {'fill_value': 0.3, 'dtype': 'float32'}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
x_fill = paddle.full_like(x, **self.attrs)
out = paddle.fluid.layers.elementwise_add(x_fill, x_fill)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
class TestCase1(TestBase):
def set_op_attrs(self):
self.attrs = {'fill_value': 3, 'dtype': 'int32'}
if __name__ == "__main__":
unittest.main()
...@@ -16,14 +16,8 @@ import unittest ...@@ -16,14 +16,8 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
np_dtype_to_fluid_str)
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -32,21 +26,23 @@ class TestBase(IPUOpTest): ...@@ -32,21 +26,23 @@ class TestBase(IPUOpTest):
def setUp(self): def setUp(self):
self.set_atol() self.set_atol()
self.set_training() self.set_training()
self.set_feed() self.set_data_feed()
self.set_feed_attr() self.set_feed_attr()
self.set_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_feed(self): def set_data_feed(self):
self.feed = {} self.feed = {}
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()] self.feed_shape = [x.shape for x in self.feed.values()]
self.feed_list = list(self.feed.keys()) self.feed_list = list(self.feed.keys())
self.feed_dtype = [ self.feed_dtype = [x.dtype for x in self.feed.values()]
np_dtype_to_fluid_str(x.dtype) for x in self.feed.values()
]
def set_attrs(self): def set_op_attrs(self):
self.attrs = { self.attrs = {
'name': 'x', 'name': 'x',
'shape': [1, 3, 3, 3], 'shape': [1, 3, 3, 3],
...@@ -54,33 +50,34 @@ class TestBase(IPUOpTest): ...@@ -54,33 +50,34 @@ class TestBase(IPUOpTest):
'value': 0.3, 'value': 0.3,
} }
def _test_base(self, run_ipu=True): def _test_base(self, exec_mode):
scope = fluid.core.Scope() scope = paddle.static.Scope()
main_prog = paddle.static.Program() main_prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
SEED = self.SEED main_prog.random_seed = self.SEED
main_prog.random_seed = SEED startup_prog.random_seed = self.SEED
startup_prog.random_seed = SEED
with fluid.scope_guard(scope): with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.fluid.layers.fill_constant(**self.attrs) x = paddle.fluid.layers.fill_constant(**self.attrs)
out = paddle.fluid.layers.elementwise_add(x, x) out = paddle.fluid.layers.elementwise_add(x, x)
fetch_list = [out.name] fetch_list = [out.name]
if run_ipu: if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace() place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(startup_prog)
if run_ipu: if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training) ipu_strategy.set_graph_config(is_training=self.is_training)
program = compiler.IPUCompiledProgram( if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog, main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else: else:
...@@ -89,19 +86,18 @@ class TestBase(IPUOpTest): ...@@ -89,19 +86,18 @@ class TestBase(IPUOpTest):
result = exe.run(program, feed=self.feed, fetch_list=fetch_list) result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
return result[0] return result[0]
def test_base(self): def test(self):
res0 = self._test_base(False) output_dict = {}
res1 = self._test_base(True) for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
self.assertTrue( break
np.allclose( output_dict[mode] = self._test_base(mode).flatten()
res0.flatten(), res1.flatten(), atol=self.atol))
self.assertTrue(res0.shape == res1.shape) self.check(output_dict)
class TestCase1(TestBase): class TestCase1(TestBase):
def set_attrs(self): def set_op_attrs(self):
self.attrs = { self.attrs = {
'name': 'x', 'name': 'x',
'shape': [1, 3, 3, 3], 'shape': [1, 3, 3, 3],
......
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import shutil
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
def set_atol(self):
self.atol = 1e-6
self.rtol = 1e-5
self.atol_fp16 = 1e-2
self.rtol_fp16 = 1e-3
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {"in_0": data.astype(np.float32)}
self.feed_fp16 = {"in_0": data.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def set_op_attrs(self):
self.attrs = {}
self.attrs['steps'] = 100
self.attrs['save_at_step'] = 20
self.attrs['is_training'] = True
self.attrs['opt_type'] = 'sgd'
self.attrs['path'] = 'model'
self.attrs['model_name'] = 'test'
def _test_save(self):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
generator = paddle.fluid.unique_name.UniqueNameGenerator()
self.full_name = '/'.join(
[self.attrs['path'], self.attrs['model_name']])
with paddle.fluid.unique_name.guard(generator):
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
scale = paddle.fluid.layers.scale(
x, scale=1.0, bias=0.0, bias_after_scale=True)
conv = paddle.static.nn.conv2d(
scale,
num_filters=3,
filter_size=3,
bias_attr=False,
name='conv2d')
loss = paddle.mean(conv)
if self.attrs['is_training']:
if self.attrs['opt_type'] == 'sgd':
sgd = paddle.optimizer.SGD(learning_rate=1e-2)
sgd.minimize(loss)
elif self.attrs['opt_type'] == 'adam':
adam = paddle.optimizer.Adam(learning_rate=1e-2)
adam.minimize(loss)
elif self.attrs['opt_type'] == 'lamb':
lamb = paddle.optimizer.Lamb(learning_rate=1e-2)
lamb.minimize(loss)
fetch_list = [loss.name]
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=True)
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog, ipu_strategy=ipu_strategy).compile(
self.feed_list, fetch_list)
for _ in range(self.attrs['steps']):
exe.run(program, feed=self.feed_fp16, fetch_list=fetch_list)
paddle.static.save_inference_model(
self.full_name, x, loss, exe, program=program.org_program)
def _test_load(self, run_ipu):
if run_ipu:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
[inference_program, feed_target_names, fetch_targets] = (
paddle.static.load_inference_model(self.full_name, exe))
if run_ipu:
feed_list = feed_target_names
fetch_list = [fetch_targets[0].name]
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=False)
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
inference_program,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = inference_program
feed = self.feed_fp16 if run_ipu else self.feed_fp32
result = []
for i in range(10):
feed["in_0"] += np.array([1.1 * i]).astype(feed["in_0"].dtype)
out = exe.run(program, feed=feed, fetch_list=[fetch_targets])
result.append(out)
return np.array(result)
def test_base(self):
self._test_save()
cpu_res = self._test_load(False)
ipu_res = self._test_load(True).astype(np.float32)
self.assertTrue(
np.allclose(
cpu_res, ipu_res, rtol=self.rtol_fp16, atol=self.atol_fp16))
shutil.rmtree(self.attrs['path'], True)
if __name__ == "__main__":
unittest.main()
...@@ -16,14 +16,8 @@ import unittest ...@@ -16,14 +16,8 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
np_dtype_to_fluid_str)
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -32,85 +26,92 @@ class TestBase(IPUOpTest): ...@@ -32,85 +26,92 @@ class TestBase(IPUOpTest):
def setUp(self): def setUp(self):
self.set_atol() self.set_atol()
self.set_training() self.set_training()
self.set_feed() self.set_data_feed()
self.set_feed_attr() self.set_feed_attr()
self.set_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_feed(self): def set_data_feed(self):
self.feed = { x = np.random.uniform(size=[10, 20])
"x": np.random.uniform(size=[10, 20]).astype('float32'), y = np.array([1, 3, 5])
"y": np.array([1, 3, 5]).astype('int32'), self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.int32)}
} self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.int32)}
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed.keys()) self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [
np_dtype_to_fluid_str(x.dtype) for x in self.feed.values()
]
def set_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
def _test_base(self, run_ipu=True): def _test_base(self, exec_mode):
scope = fluid.core.Scope() scope = paddle.static.Scope()
main_prog = paddle.static.Program() main_prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
SEED = self.SEED main_prog.random_seed = self.SEED
main_prog.random_seed = SEED startup_prog.random_seed = self.SEED
startup_prog.random_seed = SEED
with fluid.scope_guard(scope): with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0],
shape=self.feed_shape[0], shape=self.feed_shape[0],
dtype=self.feed_dtype[0]) dtype='float32')
y = paddle.static.data( y = paddle.static.data(
name=self.feed_list[1], name=self.feed_list[1],
shape=self.feed_shape[1], shape=self.feed_shape[1],
dtype=self.feed_dtype[1]) dtype='int32')
out = paddle.fluid.layers.gather(x, index=y, **self.attrs) out = paddle.fluid.layers.gather(x, index=y, **self.attrs)
fetch_list = [out.name] fetch_list = [out.name]
if run_ipu: if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace() place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(startup_prog)
if run_ipu: if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training) ipu_strategy.set_graph_config(is_training=self.is_training)
program = compiler.IPUCompiledProgram( if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog, main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else: else:
program = main_prog program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list) feed = self.feed_fp32
return result[0] if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
def test_base(self): result = exe.run(program, feed=feed, fetch_list=fetch_list)
res0 = self._test_base(False) return result[0]
res1 = self._test_base(True)
self.assertTrue( def test(self):
np.allclose( output_dict = {}
res0.flatten(), res1.flatten(), atol=self.atol)) for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.assertTrue(res0.shape == res1.shape) self.check(output_dict)
class TestCase1(TestBase): class TestCase1(TestBase):
def set_feed(self): def set_data_feed(self):
self.feed = { x = np.random.uniform(size=[100])
"x": np.random.uniform(size=[100]).astype('float32'), y = np.array([1, 3, 5])
"y": np.array([1, 3, 5]).astype('int32'), self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.int32)}
} self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.int32)}
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -16,14 +16,8 @@ import unittest ...@@ -16,14 +16,8 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
np_dtype_to_fluid_str)
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -32,80 +26,89 @@ class TestBase(IPUOpTest): ...@@ -32,80 +26,89 @@ class TestBase(IPUOpTest):
def setUp(self): def setUp(self):
self.set_atol() self.set_atol()
self.set_training() self.set_training()
self.set_feed() self.set_data_feed()
self.set_feed_attr() self.set_feed_attr()
self.set_attrs() self.set_op_attrs()
def set_atol(self): @property
self.atol = 1e-3 def fp16_enabled(self):
return True
def set_feed(self): def set_data_feed(self):
self.feed = { data = np.random.uniform(size=[1, 3, 10, 10])
"x": np.random.uniform(size=[1, 3, 10, 10]).astype('float32') self.feed_fp32 = {'in_0': data.astype(np.float32)}
} self.feed_fp16 = {'in_0': data.astype(np.float16)}
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed.keys()) self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [
np_dtype_to_fluid_str(x.dtype) for x in self.feed.values()
]
def set_attrs(self): def set_op_attrs(self):
self.attrs = {"approximate": False} self.attrs = {"approximate": False}
def _test_base(self, run_ipu=True): def _test_base(self, exec_mode):
scope = fluid.core.Scope() scope = paddle.static.Scope()
main_prog = paddle.static.Program() main_prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
SEED = self.SEED main_prog.random_seed = self.SEED
main_prog.random_seed = SEED startup_prog.random_seed = self.SEED
startup_prog.random_seed = SEED
with fluid.scope_guard(scope): with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0],
shape=self.feed_shape[0], shape=self.feed_shape[0],
dtype=self.feed_dtype[0]) dtype='float32')
out = paddle.fluid.layers.gelu(x, **self.attrs) out = paddle.fluid.layers.gelu(x, **self.attrs)
fetch_list = [out.name] fetch_list = [out.name]
if run_ipu: if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace() place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(startup_prog)
if run_ipu: if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training) ipu_strategy.set_graph_config(is_training=self.is_training)
program = compiler.IPUCompiledProgram( if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog, main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else: else:
program = main_prog program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list) feed = self.feed_fp32
return result[0] if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
def test_base(self): result = exe.run(program, feed=feed, fetch_list=fetch_list)
res0 = self._test_base(False) return result[0]
res1 = self._test_base(True)
self.assertTrue( def test(self):
np.allclose( output_dict = {}
res0.flatten(), res1.flatten(), atol=self.atol)) for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.assertTrue(res0.shape == res1.shape) self.check(output_dict)
@unittest.skip('approximate=True is not supported')
class TestCase1(TestBase): class TestCase1(TestBase):
def set_attrs(self): def set_atol(self):
self.atol = 1e-10
self.rtol = 1e-6
self.atol_fp16 = 2e-3
self.rtol_fp16 = 1e-3
def set_op_attrs(self):
self.attrs = {"approximate": True} self.attrs = {"approximate": True}
......
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
x = np.random.randn(3, 4, 5)
y = np.random.randn(3, 4, 5)
self.feed_fp32 = {
"x": x.astype(np.float32),
"y": y.astype(np.float32),
}
self.feed_fp16 = {
"x": x.astype(np.float16),
"y": y.astype(np.float16),
}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def set_op_attrs(self):
self.attrs = {}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
y = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype='float32')
out = paddle.fluid.layers.greater_than(x, y, **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten().astype(np.int32)
self.check(output_dict)
class TestCase1(TestBase):
def set_data_feed(self):
x = np.ones([1, 10])
y = np.ones([10])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
class TestCase2(TestBase):
def set_data_feed(self):
x = np.ones([1, 10])
y = np.zeros([1, 10])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
class TestCase3(TestBase):
def set_data_feed(self):
x = np.zeros([1, 10])
y = np.ones([1, 10])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
if __name__ == "__main__":
unittest.main()
...@@ -16,14 +16,8 @@ import unittest ...@@ -16,14 +16,8 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
np_dtype_to_fluid_str)
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -32,43 +26,49 @@ class TestBase(IPUOpTest): ...@@ -32,43 +26,49 @@ class TestBase(IPUOpTest):
def setUp(self): def setUp(self):
self.set_atol() self.set_atol()
self.set_training() self.set_training()
self.set_feed() self.set_data_feed()
self.set_feed_attr() self.set_feed_attr()
self.set_attrs() self.set_op_attrs()
def set_feed(self): @property
self.feed = { def fp16_enabled(self):
"x": np.random.uniform(size=[1, 8, 10, 10]).astype('float32'), return True
}
def set_atol(self):
self.atol = 3e-6
self.rtol = 1e-6
self.atol_fp16 = 4e-3
self.rtol_fp16 = 1e-3
def set_data_feed(self):
data = np.random.uniform(size=[1, 8, 10, 10])
self.feed_fp32 = {'in_0': data.astype(np.float32)}
self.feed_fp16 = {'in_0': data.astype(np.float16)}
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed.keys()) self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [
np_dtype_to_fluid_str(x.dtype) for x in self.feed.values()
]
def set_attrs(self): def set_op_attrs(self):
self.attrs = { self.attrs = {
"groups": 8, "groups": 8,
"epsilon": 1e-05, "epsilon": 1e-05,
"data_layout": 'NCHW', "data_layout": 'NCHW',
} }
def _test_base(self, run_ipu=True): def _test_base(self, exec_mode):
scope = fluid.core.Scope() scope = paddle.static.Scope()
main_prog = paddle.static.Program() main_prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
SEED = self.SEED main_prog.random_seed = self.SEED
main_prog.random_seed = SEED startup_prog.random_seed = self.SEED
startup_prog.random_seed = SEED
with fluid.scope_guard(scope): with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0],
shape=self.feed_shape[0], shape=self.feed_shape[0],
dtype=self.feed_dtype[0]) dtype='float32')
if self.is_training: if self.is_training:
ch = self.feed_shape[0][1] ch = self.feed_shape[0][1]
...@@ -78,62 +78,68 @@ class TestBase(IPUOpTest): ...@@ -78,62 +78,68 @@ class TestBase(IPUOpTest):
bias = paddle.ParamAttr(trainable=True) bias = paddle.ParamAttr(trainable=True)
out = paddle.fluid.layers.nn.group_norm( out = paddle.fluid.layers.nn.group_norm(
conv1, param_attr=scale, bias_attr=bias, **self.attrs) conv1, param_attr=scale, bias_attr=bias, **self.attrs)
loss = paddle.mean(out)
adam = paddle.optimizer.Adam(learning_rate=1e-2)
adam.minimize(loss)
else: else:
scale = True
bias = True
out = paddle.fluid.layers.nn.group_norm( out = paddle.fluid.layers.nn.group_norm(
x, param_attr=scale, bias_attr=bias, **self.attrs) x, param_attr=True, bias_attr=True, **self.attrs)
if self.is_training: if self.is_training:
loss = paddle.mean(out)
adam = paddle.optimizer.Adam(learning_rate=1e-2)
adam.minimize(loss)
fetch_list = [loss.name] fetch_list = [loss.name]
else: else:
fetch_list = [out.name] fetch_list = [out.name]
if run_ipu: if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace() place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(startup_prog)
if run_ipu: if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training) ipu_strategy.set_graph_config(is_training=self.is_training)
program = compiler.IPUCompiledProgram( if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog, main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else: else:
program = main_prog program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
if self.is_training: if self.is_training:
result = [] result = []
for _ in range(self.epoch): for _ in range(self.epoch):
loss_res = exe.run(program, loss_res = exe.run(program,
feed=self.feed, feed=feed,
fetch_list=fetch_list) fetch_list=fetch_list)
result.append(loss_res[0]) result.append(loss_res[0])
return np.array(result) return np.array(result)
else: else:
result = exe.run(program, feed=self.feed, fetch_list=fetch_list) result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0] return result[0]
def test_base(self): def test_base(self):
res0 = self._test_base(False) output_dict = {}
res1 = self._test_base(True) for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
if mode > ExecutionMode.IPU_FP32 and self.is_training:
break
output_dict[mode] = self._test_base(mode).flatten()
self.assertTrue( self.check(output_dict)
np.allclose(
res0.flatten(), res1.flatten(), atol=self.atol))
self.assertTrue(res0.shape == res1.shape)
class TestCase1(TestBase): class TestCase1(TestBase):
def set_attrs(self): def set_op_attrs(self):
self.attrs = { self.attrs = {
"groups": 4, "groups": 4,
"epsilon": 1e-05, "epsilon": 1e-05,
...@@ -147,11 +153,15 @@ class TestTrainCase1(TestBase): ...@@ -147,11 +153,15 @@ class TestTrainCase1(TestBase):
self.epoch = 10 self.epoch = 10
@unittest.skipIf(IPUOpTest.use_ipumodel(), "skip for ipumodel")
class TestTrainCase2(TestBase): class TestTrainCase2(TestBase):
def set_atol(self): def set_atol(self):
self.atol = 1e-3 self.atol = 7e-4
self.rtol = 1e-6
self.atol_fp16 = 4e-3
self.rtol_fp16 = 1e-3
def set_attrs(self): def set_op_attrs(self):
self.attrs = { self.attrs = {
"groups": 4, "groups": 4,
"epsilon": 1e-05, "epsilon": 1e-05,
...@@ -163,7 +173,5 @@ class TestTrainCase2(TestBase): ...@@ -163,7 +173,5 @@ class TestTrainCase2(TestBase):
self.epoch = 10 self.epoch = 10
# not support `group_norm(x, param_attr=False, bias_attr=False, **self.attrs)`
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -12,59 +12,59 @@ ...@@ -12,59 +12,59 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import tempfile
import unittest import unittest
import shutil
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU") "core is not compiled with IPU")
class TestBase(IPUOpTest): class TestBase(IPUOpTest):
def setUp(self): def setUp(self):
self.set_atol() self.set_atol()
self.set_feed() self.set_data_feed()
self.set_attrs() self.set_feed_attr()
self.set_op_attrs()
def set_feed(self):
self.feed_shape = [] def set_atol(self):
self.feed_shape.append([1, 3, 10, 10]) self.atol = 1e-6
self.rtol = 1e-5
self.feed = {} self.atol_fp16 = 1e-2
self.feed["in_0"] = np.random.uniform( self.rtol_fp16 = 1e-3
size=self.feed_shape[0]).astype(np.float32)
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10])
self.feed = {"in_0": data.astype(np.float32)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()]
self.feed_list = list(self.feed.keys()) self.feed_list = list(self.feed.keys())
def set_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
self.attrs['steps'] = 100 self.attrs['steps'] = 100
self.attrs['save_at_step'] = 20 self.attrs['save_at_step'] = 20
self.attrs['is_training'] = True self.attrs['is_training'] = True
self.attrs['opt_type'] = 'sgd' self.attrs['opt_type'] = 'sgd'
self.attrs['path'] = 'model' self.attrs['path'] = tempfile.TemporaryDirectory()
self.attrs['model_name'] = 'test' self.attrs['model_name'] = 'test'
def _test_save(self): def _test_save(self):
scope = fluid.core.Scope() scope = paddle.static.Scope()
main_prog = paddle.static.Program() main_prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED startup_prog.random_seed = self.SEED
generator = fluid.unique_name.UniqueNameGenerator() generator = paddle.fluid.unique_name.UniqueNameGenerator()
self.full_name = '/'.join( self.full_name = '/'.join(
[self.attrs['path'], self.attrs['model_name']]) [self.attrs['path'].name, self.attrs['model_name']])
with fluid.unique_name.guard(generator): with paddle.fluid.unique_name.guard(generator):
with fluid.scope_guard(scope): with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0],
...@@ -88,16 +88,16 @@ class TestBase(IPUOpTest): ...@@ -88,16 +88,16 @@ class TestBase(IPUOpTest):
elif self.attrs['opt_type'] == 'lamb': elif self.attrs['opt_type'] == 'lamb':
lamb = paddle.optimizer.Lamb(learning_rate=1e-2) lamb = paddle.optimizer.Lamb(learning_rate=1e-2)
lamb.minimize(loss) lamb.minimize(loss)
fetch_list = [loss.name] fetch_list = [loss.name]
place = paddle.IPUPlace() place = paddle.IPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(startup_prog)
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig( ipu_strategy.set_graph_config(
is_training=self.attrs['is_training']) is_training=self.attrs['is_training'])
program = compiler.IPUCompiledProgram( program = paddle.static.IpuCompiledProgram(
main_prog, ipu_strategy=ipu_strategy).compile( main_prog, ipu_strategy=ipu_strategy).compile(
self.feed_list, fetch_list) self.feed_list, fetch_list)
...@@ -125,8 +125,8 @@ class TestBase(IPUOpTest): ...@@ -125,8 +125,8 @@ class TestBase(IPUOpTest):
feed_list = feed_target_names feed_list = feed_target_names
fetch_list = [fetch_targets[0].name] fetch_list = [fetch_targets[0].name]
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=False) ipu_strategy.set_graph_config(is_training=False)
program = compiler.IPUCompiledProgram( program = paddle.static.IpuCompiledProgram(
inference_program, inference_program,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else: else:
...@@ -134,7 +134,7 @@ class TestBase(IPUOpTest): ...@@ -134,7 +134,7 @@ class TestBase(IPUOpTest):
tmp = exe.run(program, feed=self.feed, fetch_list=[fetch_targets]) tmp = exe.run(program, feed=self.feed, fetch_list=[fetch_targets])
return tmp return np.array(tmp)
def test_base(self): def test_base(self):
self._test_save() self._test_save()
...@@ -142,27 +142,26 @@ class TestBase(IPUOpTest): ...@@ -142,27 +142,26 @@ class TestBase(IPUOpTest):
ipu_res = self._test_load(True) ipu_res = self._test_load(True)
self.assertTrue(np.allclose(cpu_res, ipu_res, atol=self.atol)) self.assertTrue(np.allclose(cpu_res, ipu_res, atol=self.atol))
self.attrs['path'].cleanup()
shutil.rmtree(self.attrs['path'], True)
class TestAdam(TestBase): class TestAdam(TestBase):
def set_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
self.attrs['steps'] = 100 self.attrs['steps'] = 100
self.attrs['is_training'] = True self.attrs['is_training'] = True
self.attrs['opt_type'] = 'adam' self.attrs['opt_type'] = 'adam'
self.attrs['path'] = 'model' self.attrs['path'] = tempfile.TemporaryDirectory()
self.attrs['model_name'] = 'test' self.attrs['model_name'] = 'test'
class TestLamb(TestBase): class TestLamb(TestBase):
def set_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
self.attrs['steps'] = 100 self.attrs['steps'] = 100
self.attrs['is_training'] = True self.attrs['is_training'] = True
self.attrs['opt_type'] = 'lamb' self.attrs['opt_type'] = 'lamb'
self.attrs['path'] = 'model' self.attrs['path'] = tempfile.TemporaryDirectory()
self.attrs['model_name'] = 'test' self.attrs['model_name'] = 'test'
......
...@@ -16,14 +16,8 @@ import unittest ...@@ -16,14 +16,8 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
np_dtype_to_fluid_str)
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -32,39 +26,45 @@ class TestBase(IPUOpTest): ...@@ -32,39 +26,45 @@ class TestBase(IPUOpTest):
def setUp(self): def setUp(self):
self.set_atol() self.set_atol()
self.set_training() self.set_training()
self.set_feed() self.set_data_feed()
self.set_feed_attr() self.set_feed_attr()
self.set_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_atol(self):
self.atol = 1e-6
self.rtol = 1e-5
self.atol_fp16 = 1e-2
self.rtol_fp16 = 1e-3
def set_feed(self): def set_data_feed(self):
self.feed = { x = np.random.uniform(size=[1, 3, 10, 10])
"x": np.random.uniform(size=[1, 3, 10, 10]).astype('float32'), self.feed_fp32 = {"x": x.astype(np.float32)}
} self.feed_fp16 = {"x": x.astype(np.float16)}
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed.keys()) self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [
np_dtype_to_fluid_str(x.dtype) for x in self.feed.values()
]
def set_attrs(self): def set_op_attrs(self):
self.attrs = {"epsilon": 1e-05} self.attrs = {"epsilon": 1e-05}
def _test_base(self, run_ipu=True): def _test_base(self, exec_mode):
scope = fluid.core.Scope() scope = paddle.static.Scope()
main_prog = paddle.static.Program() main_prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
SEED = self.SEED main_prog.random_seed = self.SEED
main_prog.random_seed = SEED startup_prog.random_seed = self.SEED
startup_prog.random_seed = SEED
with fluid.scope_guard(scope): with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0],
shape=self.feed_shape[0], shape=self.feed_shape[0],
dtype=self.feed_dtype[0]) dtype='float32')
if self.is_training: if self.is_training:
ch = self.feed_shape[0][1] ch = self.feed_shape[0][1]
...@@ -74,58 +74,64 @@ class TestBase(IPUOpTest): ...@@ -74,58 +74,64 @@ class TestBase(IPUOpTest):
bias = paddle.ParamAttr(trainable=True) bias = paddle.ParamAttr(trainable=True)
out = paddle.fluid.layers.nn.instance_norm( out = paddle.fluid.layers.nn.instance_norm(
conv1, param_attr=scale, bias_attr=bias, **self.attrs) conv1, param_attr=scale, bias_attr=bias, **self.attrs)
loss = paddle.mean(out)
adam = paddle.optimizer.Adam(learning_rate=1e-2)
adam.minimize(loss)
else: else:
scale = True
bias = True
out = paddle.fluid.layers.nn.instance_norm( out = paddle.fluid.layers.nn.instance_norm(
x, param_attr=scale, bias_attr=bias, **self.attrs) x, param_attr=True, bias_attr=True, **self.attrs)
if self.is_training: if self.is_training:
loss = paddle.mean(out)
adam = paddle.optimizer.Adam(learning_rate=1e-2)
adam.minimize(loss)
fetch_list = [loss.name] fetch_list = [loss.name]
else: else:
fetch_list = [out.name] fetch_list = [out.name]
if run_ipu: if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace() place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(startup_prog)
if run_ipu: if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training) ipu_strategy.set_graph_config(is_training=self.is_training)
program = compiler.IPUCompiledProgram( if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog, main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else: else:
program = main_prog program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
if self.is_training: if self.is_training:
result = [] result = []
for _ in range(self.epoch): for _ in range(self.epoch):
loss_res = exe.run(program, loss_res = exe.run(program,
feed=self.feed, feed=feed,
fetch_list=fetch_list) fetch_list=fetch_list)
result.append(loss_res) result.append(loss_res)
return np.array(result) return np.array(result)
else: else:
result = exe.run(program, feed=self.feed, fetch_list=fetch_list) result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0] return result[0]
def test_base(self): def test(self):
res0 = self._test_base(False) output_dict = {}
res1 = self._test_base(True) for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
if mode > ExecutionMode.IPU_FP32 and self.is_training:
break
output_dict[mode] = self._test_base(mode).flatten()
self.assertTrue( self.check(output_dict)
np.allclose(
res0.flatten(), res1.flatten(), atol=self.atol))
self.assertTrue(res0.shape == res1.shape)
class TestTrainCase1(TestBase): class TestTrainCase1(TestBase):
...@@ -134,7 +140,5 @@ class TestTrainCase1(TestBase): ...@@ -134,7 +140,5 @@ class TestTrainCase1(TestBase):
self.epoch = 10 self.epoch = 10
# not support `instance_norm(x, param_attr=False, bias_attr=False, **self.attrs)`
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册