未验证 提交 86effa0c 编写于 作者: A Allen Guo 提交者: GitHub

[IPU] update ipu unittests p3 (#40072)

* update ipu UTs part3

* rename uts

* sync api changes

* update uts for new api

* update use_ipumodel()

* split pr
上级 b5a8a0d9
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
x = np.random.uniform(size=[2, 3])
y = np.random.uniform(size=[3, 2])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def set_op_attrs(self):
self.attrs = {"transpose_x": False, "transpose_y": False}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
y = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype='float32')
out = paddle.matmul(x, y, **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
class TestCase1(TestBase):
def set_op_attrs(self):
self.attrs = {
"transpose_x": True,
"transpose_y": True,
}
class TestCase3(TestBase):
def set_data_feed(self):
x = np.random.uniform(size=[5, 4, 2, 3])
y = np.random.uniform(size=[5, 4, 3, 2])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
class TestCase4(TestBase):
def set_data_feed(self):
x = np.random.uniform(size=[4, 2, 3])
y = np.random.uniform(size=[4, 3, 2])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
class TestCase5(TestBase):
def set_data_feed(self):
x = np.random.uniform(size=[4, 2, 3])
y = np.random.uniform(size=[3, 2])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
class TestCase6(TestBase):
def set_data_feed(self):
x = np.random.uniform(size=[3])
y = np.random.uniform(size=[3])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
@unittest.skip("not supported")
class TestCase6_2(TestCase6):
def set_data_feed(self):
x = np.random.uniform(size=[3])
y = np.random.uniform(size=[3])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
def set_op_attrs(self):
self.attrs = {"transpose_x": True, "transpose_y": True}
class TestCase7(TestBase):
def set_data_feed(self):
x = np.random.uniform(size=[3, 1])
y = np.random.uniform(size=[1, 2])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
@unittest.skip("dim > 4 is not supported")
class TestCase8(TestBase):
def set_data_feed(self):
self.feed = {
"x": np.random.uniform(size=[6, 5, 4, 2, 3]).astype('float32'),
"y": np.random.uniform(size=[6, 5, 4, 3, 2]).astype('float32'),
}
if __name__ == "__main__":
unittest.main()
...@@ -16,13 +16,8 @@ import unittest ...@@ -16,13 +16,8 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -31,97 +26,79 @@ class TestBase(IPUOpTest): ...@@ -31,97 +26,79 @@ class TestBase(IPUOpTest):
def setUp(self): def setUp(self):
self.set_atol() self.set_atol()
self.set_training() self.set_training()
self.set_feed() self.set_data_feed()
self.set_attrs() self.set_feed_attr()
self.set_op_attrs()
def set_feed(self): @property
self.feed_shape = [] def fp16_enabled(self):
self.feed_shape.append([1, 3, 10, 10]) return True
self.feed = {} def set_data_feed(self):
self.feed["in_0"] = np.random.uniform( data = np.random.uniform(size=[1, 3, 10, 10])
size=self.feed_shape[0]).astype(np.float32) self.feed_fp32 = {"in_0": data.astype(np.float32)}
self.feed_fp16 = {"in_0": data.astype(np.float16)}
self.feed_list = list(self.feed.keys()) def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def set_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
self.attrs['axis'] = None
self.attrs['keepdim'] = False
def _test_base(self, run_ipu=True): def _test_base(self, exec_mode):
scope = fluid.core.Scope() scope = paddle.static.Scope()
main_prog = paddle.static.Program() main_prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
SEED = self.SEED main_prog.random_seed = self.SEED
main_prog.random_seed = SEED startup_prog.random_seed = self.SEED
startup_prog.random_seed = SEED
with fluid.scope_guard(scope): with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0],
shape=self.feed_shape[0], shape=self.feed_shape[0],
dtype='float32') dtype='float32')
out = paddle.mean(x, **self.attrs)
out = paddle.fluid.layers.mean(x)
fetch_list = [out.name] fetch_list = [out.name]
if run_ipu: if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace() place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(startup_prog)
if run_ipu: if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training) ipu_strategy.set_graph_config(is_training=self.is_training)
program = compiler.IPUCompiledProgram( if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog, main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else: else:
program = main_prog program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list) feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0] return result[0]
def test_base(self): def test_base(self):
res0 = self._test_base(True) output_dict = {}
res1 = self._test_base(False) for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
self.assertTrue( break
np.allclose( output_dict[mode] = self._test_base(mode).flatten()
res0.flatten(), res1.flatten(), atol=self.atol))
class TestCase1(TestBase): self.check(output_dict)
def set_attrs(self):
self.attrs = {}
self.attrs['axis'] = 1
self.attrs['keepdim'] = False
class TestCase2(TestBase):
def set_attrs(self):
self.attrs = {}
self.attrs['axis'] = 2
self.attrs['keepdim'] = False
class TestCase3(TestBase):
def set_attrs(self):
self.attrs = {}
self.attrs['axis'] = 2
self.attrs['keepdim'] = True
class TestCase4(TestBase):
def set_attrs(self):
self.attrs = {}
self.attrs['axis'] = None
self.attrs['keepdim'] = True
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -17,8 +17,7 @@ from __future__ import print_function ...@@ -17,8 +17,7 @@ from __future__ import print_function
import numpy as np import numpy as np
import unittest import unittest
import paddle import paddle
import paddle.fluid as fluid import paddle.static
import paddle.fluid.compiler as compiler
paddle.enable_static() paddle.enable_static()
SEED = 2021 SEED = 2021
...@@ -28,7 +27,7 @@ SEED = 2021 ...@@ -28,7 +27,7 @@ SEED = 2021
"core is not compiled with IPU") "core is not compiled with IPU")
class TestCastNet(unittest.TestCase): class TestCastNet(unittest.TestCase):
def _test(self, run_ipu=True): def _test(self, run_ipu=True):
scope = fluid.core.Scope() scope = paddle.static.Scope()
main_prog = paddle.static.Program() main_prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
main_prog.random_seed = SEED main_prog.random_seed = SEED
...@@ -37,14 +36,14 @@ class TestCastNet(unittest.TestCase): ...@@ -37,14 +36,14 @@ class TestCastNet(unittest.TestCase):
np_image = np.random.rand(1, 3, 10, 10).astype(np.float32) np_image = np.random.rand(1, 3, 10, 10).astype(np.float32)
with fluid.scope_guard(scope): with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
image = paddle.static.data( image = paddle.static.data(
name='image', shape=[1, 3, 10, 10], dtype='float32') name='image', shape=[1, 3, 10, 10], dtype='float32')
with fluid.ipu_shard(ipu_index=0): with paddle.static.ipu_shard_guard(index=0):
conv1 = paddle.static.nn.conv2d( conv1 = paddle.static.nn.conv2d(
image, num_filters=3, filter_size=3, bias_attr=False) image, num_filters=3, filter_size=3, bias_attr=False)
with fluid.ipu_shard(ipu_index=1): with paddle.static.ipu_shard_guard(index=1):
conv2 = paddle.static.nn.conv2d( conv2 = paddle.static.nn.conv2d(
conv1, num_filters=3, filter_size=3, bias_attr=False) conv1, num_filters=3, filter_size=3, bias_attr=False)
loss = paddle.mean(conv2) loss = paddle.mean(conv2)
...@@ -60,9 +59,10 @@ class TestCastNet(unittest.TestCase): ...@@ -60,9 +59,10 @@ class TestCastNet(unittest.TestCase):
feed_list = [image.name] feed_list = [image.name]
fetch_list = [loss.name] fetch_list = [loss.name]
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig( ipu_strategy.set_graph_config(
num_ipus=2, is_training=False, enable_manual_shard=True) num_ipus=2, is_training=False, enable_manual_shard=True)
program = compiler.IPUCompiledProgram( ipu_strategy.set_pipelining_config(enable_pipelining=False)
program = paddle.static.IpuCompiledProgram(
main_prog, main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else: else:
......
...@@ -16,14 +16,8 @@ import unittest ...@@ -16,14 +16,8 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
np_dtype_to_fluid_str)
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -32,90 +26,98 @@ class TestBase(IPUOpTest): ...@@ -32,90 +26,98 @@ class TestBase(IPUOpTest):
def setUp(self): def setUp(self):
self.set_atol() self.set_atol()
self.set_training() self.set_training()
self.set_feed() self.set_data_feed()
self.set_feed_attr() self.set_feed_attr()
self.set_attrs() self.set_op_attrs()
def set_feed(self): @property
self.feed = { def fp16_enabled(self):
"x": np.random.uniform(size=[2, 5]).astype('float32'), return True
"y": np.random.uniform(size=[5, 3]).astype('float32'),
} def set_data_feed(self):
x = np.random.uniform(size=[2, 5])
y = np.random.uniform(size=[5, 3])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed.keys()) self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [ self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
np_dtype_to_fluid_str(x.dtype) for x in self.feed.values()
]
def set_attrs(self): def set_op_attrs(self):
self.attrs = { self.attrs = {
"x_num_col_dims": 1, "x_num_col_dims": 1,
"y_num_col_dims": 1, "y_num_col_dims": 1,
} }
def _test_base(self, run_ipu=True): def _test_base(self, exec_mode):
scope = fluid.core.Scope() scope = paddle.static.Scope()
main_prog = paddle.static.Program() main_prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
SEED = self.SEED main_prog.random_seed = self.SEED
main_prog.random_seed = SEED startup_prog.random_seed = self.SEED
startup_prog.random_seed = SEED
with fluid.scope_guard(scope): with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0],
shape=self.feed_shape[0], shape=self.feed_shape[0],
dtype=self.feed_dtype[0]) dtype='float32')
y = paddle.static.data( y = paddle.static.data(
name=self.feed_list[1], name=self.feed_list[1],
shape=self.feed_shape[1], shape=self.feed_shape[1],
dtype=self.feed_dtype[1]) dtype='float32')
out = paddle.fluid.layers.mul(x, y, **self.attrs) out = paddle.fluid.layers.mul(x, y, **self.attrs)
fetch_list = [out.name] fetch_list = [out.name]
if run_ipu: if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace() place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(startup_prog)
if run_ipu: if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training) ipu_strategy.set_graph_config(is_training=self.is_training)
program = compiler.IPUCompiledProgram( if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog, main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else: else:
program = main_prog program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list) feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0] return result[0]
def test_base(self): def test_base(self):
res0 = self._test_base(False) output_dict = {}
res1 = self._test_base(True) for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.assertTrue( self.check(output_dict)
np.allclose(
res0.flatten(), res1.flatten(), atol=self.atol))
self.assertTrue(res0.shape == res1.shape)
class TestCase1(TestBase): class TestCase1(TestBase):
def set_feed(self): def set_data_feed(self):
self.feed = { x = np.random.uniform(size=[1, 2, 5])
"x": np.random.uniform(size=[1, 2, 5]).astype('float32'), y = np.random.uniform(size=[5, 3])
"y": np.random.uniform(size=[5, 3]).astype('float32'), self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
} self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
def set_attrs(self): def set_op_attrs(self):
self.attrs = { self.attrs = {
"x_num_col_dims": 2, "x_num_col_dims": 2,
"y_num_col_dims": 1, "y_num_col_dims": 1,
...@@ -123,13 +125,13 @@ class TestCase1(TestBase): ...@@ -123,13 +125,13 @@ class TestCase1(TestBase):
class TestCase2(TestBase): class TestCase2(TestBase):
def set_feed(self): def set_data_feed(self):
self.feed = { x = np.random.uniform(size=[3, 4, 2, 9])
"x": np.random.uniform(size=[3, 4, 2, 9]).astype('float32'), y = np.random.uniform(size=[3, 6, 1, 2, 3])
"y": np.random.uniform(size=[3, 6, 1, 2, 3]).astype('float32'), self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
} self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
def set_attrs(self): def set_op_attrs(self):
self.attrs = { self.attrs = {
'x_num_col_dims': 2, 'x_num_col_dims': 2,
'y_num_col_dims': 2, 'y_num_col_dims': 2,
......
...@@ -16,14 +16,8 @@ import unittest ...@@ -16,14 +16,8 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
np_dtype_to_fluid_str)
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -32,23 +26,25 @@ class TestBase(IPUOpTest): ...@@ -32,23 +26,25 @@ class TestBase(IPUOpTest):
def setUp(self): def setUp(self):
self.set_atol() self.set_atol()
self.set_training() self.set_training()
self.set_feed() self.set_data_feed()
self.set_feed_attr() self.set_feed_attr()
self.set_attrs() self.set_op_attrs()
def set_feed(self): @property
self.feed = { def fp16_enabled(self):
"x": np.random.uniform(size=[1, 3, 10, 10]).astype('float32'), return True
}
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {'in_0': data.astype(np.float32)}
self.feed_fp16 = {'in_0': data.astype(np.float16)}
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed.keys()) self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [ self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
np_dtype_to_fluid_str(x.dtype) for x in self.feed.values()
]
def set_attrs(self): def set_op_attrs(self):
self.attrs = { self.attrs = {
"pool_size": 3, "pool_size": 3,
"pool_type": 'avg', "pool_type": 'avg',
...@@ -60,53 +56,59 @@ class TestBase(IPUOpTest): ...@@ -60,53 +56,59 @@ class TestBase(IPUOpTest):
"data_format": 'NCHW', "data_format": 'NCHW',
} }
def _test_base(self, run_ipu=True): def _test_base(self, exec_mode):
scope = fluid.core.Scope() scope = paddle.static.Scope()
main_prog = paddle.static.Program() main_prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
SEED = self.SEED main_prog.random_seed = self.SEED
main_prog.random_seed = SEED startup_prog.random_seed = self.SEED
startup_prog.random_seed = SEED
with fluid.scope_guard(scope): with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0],
shape=self.feed_shape[0], shape=self.feed_shape[0],
dtype=self.feed_dtype[0]) dtype='float32')
out = paddle.fluid.layers.pool2d(x, **self.attrs) out = paddle.fluid.layers.pool2d(x, **self.attrs)
fetch_list = [out.name] fetch_list = [out.name]
if run_ipu: if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace() place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(startup_prog)
if run_ipu: if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training) ipu_strategy.set_graph_config(is_training=self.is_training)
program = compiler.IPUCompiledProgram( if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog, main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else: else:
program = main_prog program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list) feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0] return result[0]
def test_base(self): def test_base(self):
res0 = self._test_base(False) output_dict = {}
res1 = self._test_base(True) for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
self.assertTrue( break
np.allclose( output_dict[mode] = self._test_base(mode).flatten()
res0.flatten(), res1.flatten(), atol=self.atol))
self.assertTrue(res0.shape == res1.shape) self.check(output_dict)
class TestCase1(TestBase): class TestCase1(TestBase):
......
...@@ -16,14 +16,8 @@ import unittest ...@@ -16,14 +16,8 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
np_dtype_to_fluid_str)
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -32,23 +26,25 @@ class TestBase(IPUOpTest): ...@@ -32,23 +26,25 @@ class TestBase(IPUOpTest):
def setUp(self): def setUp(self):
self.set_atol() self.set_atol()
self.set_training() self.set_training()
self.set_feed() self.set_data_feed()
self.set_feed_attr() self.set_feed_attr()
self.set_attrs() self.set_op_attrs()
def set_feed(self): @property
self.feed = { def fp16_enabled(self):
"x": np.random.uniform(size=[1, 3, 10, 10]).astype('float32'), return True
}
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {'in_0': data.astype(np.float32)}
self.feed_fp16 = {'in_0': data.astype(np.float16)}
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed.keys()) self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [ self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
np_dtype_to_fluid_str(x.dtype) for x in self.feed.values()
]
def set_attrs(self): def set_op_attrs(self):
self.attrs = { self.attrs = {
"pool_size": 3, "pool_size": 3,
"pool_type": 'max', "pool_type": 'max',
...@@ -60,120 +56,126 @@ class TestBase(IPUOpTest): ...@@ -60,120 +56,126 @@ class TestBase(IPUOpTest):
"data_format": 'NCHW', "data_format": 'NCHW',
} }
def _test_base(self, run_ipu=True): def _test_base(self, exec_mode):
scope = fluid.core.Scope() scope = paddle.static.Scope()
main_prog = paddle.static.Program() main_prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
SEED = self.SEED main_prog.random_seed = self.SEED
main_prog.random_seed = SEED startup_prog.random_seed = self.SEED
startup_prog.random_seed = SEED
with fluid.scope_guard(scope): with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0],
shape=self.feed_shape[0], shape=self.feed_shape[0],
dtype=self.feed_dtype[0]) dtype='float32')
out = paddle.fluid.layers.pool2d(x, **self.attrs) out = paddle.fluid.layers.pool2d(x, **self.attrs)
fetch_list = [out.name] fetch_list = [out.name]
if run_ipu: if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace() place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(startup_prog)
if run_ipu: if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training) ipu_strategy.set_graph_config(is_training=self.is_training)
program = compiler.IPUCompiledProgram( if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog, main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else: else:
program = main_prog program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list) feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0] return result[0]
def test_base(self): def test_base(self):
res0 = self._test_base(False) output_dict = {}
res1 = self._test_base(True) for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
self.assertTrue( break
np.allclose( output_dict[mode] = self._test_base(mode).flatten()
res0.flatten(), res1.flatten(), atol=self.atol))
self.assertTrue(res0.shape == res1.shape) self.check(output_dict)
class TestCase1(TestBase): class TestCase1(TestBase):
def set_attrs(self): def set_op_attrs(self):
super().set_attrs() super().set_op_attrs()
self.attrs['pool_size'] = 3 self.attrs['pool_size'] = 3
class TestCase1_2(TestBase): class TestCase1_2(TestBase):
def set_attrs(self): def set_op_attrs(self):
super().set_attrs() super().set_op_attrs()
self.attrs['pool_size'] = [3, 1] self.attrs['pool_size'] = [3, 1]
class TestCase2(TestBase): class TestCase2(TestBase):
def set_attrs(self): def set_op_attrs(self):
super().set_attrs() super().set_op_attrs()
self.attrs['pool_stride'] = 2 self.attrs['pool_stride'] = 2
class TestCase2_2(TestBase): class TestCase2_2(TestBase):
def set_attrs(self): def set_op_attrs(self):
super().set_attrs() super().set_op_attrs()
self.attrs['pool_stride'] = [2, 1] self.attrs['pool_stride'] = [2, 1]
class TestCase3(TestBase): class TestCase3(TestBase):
def set_attrs(self): def set_op_attrs(self):
super().set_attrs() super().set_op_attrs()
self.attrs['pool_padding'] = [1, 1] self.attrs['pool_padding'] = [1, 1]
class TestCase3_2(TestBase): class TestCase3_2(TestBase):
def set_attrs(self): def set_op_attrs(self):
super().set_attrs() super().set_op_attrs()
self.attrs['pool_padding'] = [1, 1, 2, 2] self.attrs['pool_padding'] = [1, 1, 2, 2]
@unittest.skip('auto_pad is not currently supported') @unittest.skip('auto_pad is not currently supported')
class TestCase3_3(TestBase): class TestCase3_3(TestBase):
def set_attrs(self): def set_op_attrs(self):
super().set_attrs() super().set_op_attrs()
self.attrs['pool_padding'] = 'VALID' self.attrs['pool_padding'] = 'VALID'
@unittest.skip('auto_pad is not currently supported') @unittest.skip('auto_pad is not currently supported')
class TestCase3_4(TestBase): class TestCase3_4(TestBase):
def set_attrs(self): def set_op_attrs(self):
super().set_attrs() super().set_op_attrs()
self.attrs['pool_padding'] = 'SAME' self.attrs['pool_padding'] = 'SAME'
class TestCase4(TestBase): class TestCase4(TestBase):
def set_attrs(self): def set_op_attrs(self):
super().set_attrs() super().set_op_attrs()
self.attrs['global_pooling'] = True self.attrs['global_pooling'] = True
class TestCase5(TestBase): class TestCase5(TestBase):
def set_attrs(self): def set_op_attrs(self):
super().set_attrs() super().set_op_attrs()
self.attrs['ceil_mode'] = True self.attrs['ceil_mode'] = True
class TestCase6(TestBase): class TestCase6(TestBase):
def set_attrs(self): def set_op_attrs(self):
super().set_attrs() super().set_op_attrs()
self.attrs['exclusive'] = False self.attrs['exclusive'] = False
......
...@@ -16,14 +16,8 @@ import unittest ...@@ -16,14 +16,8 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
np_dtype_to_fluid_str)
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -32,124 +26,146 @@ class TestBase(IPUOpTest): ...@@ -32,124 +26,146 @@ class TestBase(IPUOpTest):
def setUp(self): def setUp(self):
self.set_atol() self.set_atol()
self.set_training() self.set_training()
self.set_feed() self.set_data_feed()
self.set_feed_attr() self.set_feed_attr()
self.set_attrs() self.set_op_attrs()
def set_feed(self): @property
self.feed = { def fp16_enabled(self):
"x": np.random.uniform(size=[1, 3, 2, 2]).astype('float32'), return True
}
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 2, 2])
self.feed_fp32 = {"x": data.astype(np.float32)}
self.feed_fp16 = {"x": data.astype(np.float16)}
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed.keys()) self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [ self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
np_dtype_to_fluid_str(x.dtype) for x in self.feed.values()
]
def set_attrs(self): def set_op_attrs(self):
self.attrs = {"factor": 2.0} self.attrs = {"factor": 2.0}
def _test_base(self, run_ipu=True): def _test_base(self, exec_mode):
scope = fluid.core.Scope() scope = paddle.static.Scope()
main_prog = paddle.static.Program() main_prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
SEED = self.SEED main_prog.random_seed = self.SEED
main_prog.random_seed = SEED startup_prog.random_seed = self.SEED
startup_prog.random_seed = SEED
with fluid.scope_guard(scope): with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0],
shape=self.feed_shape[0], shape=self.feed_shape[0],
dtype=self.feed_dtype[0]) dtype='float32')
out = paddle.fluid.layers.pow(x, **self.attrs) out = paddle.fluid.layers.pow(x, **self.attrs)
fetch_list = [out.name] fetch_list = [out.name]
if run_ipu: if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace() place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(startup_prog)
if run_ipu: if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training) ipu_strategy.set_graph_config(is_training=self.is_training)
program = compiler.IPUCompiledProgram( if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog, main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else: else:
program = main_prog program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list) feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0] return result[0]
def test_base(self): def test_base(self):
res0 = self._test_base(False) output_dict = {}
res1 = self._test_base(True) for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.assertTrue( self.check(output_dict)
np.allclose(
res0.flatten(), res1.flatten(), atol=self.atol))
self.assertTrue(res0.shape == res1.shape)
class TestCase1(TestBase): class TestCase1(TestBase):
def set_feed(self): def set_data_feed(self):
self.feed = { data1 = np.random.uniform(size=[1, 3, 2, 2])
"x": np.random.uniform(size=[1, 3, 2, 2]).astype('float32'), data2 = np.array([2.0])
"y": np.array([2.0]).astype('float32'),
self.feed_fp32 = {
"x": data1.astype(np.float32),
"y": data2.astype(np.float32)
}
self.feed_fp16 = {
"x": data1.astype(np.float16),
"y": data2.astype(np.float16)
} }
def set_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
def _test_base(self, run_ipu=True): def _test_base(self, exec_mode):
scope = fluid.core.Scope() scope = paddle.static.Scope()
main_prog = paddle.static.Program() main_prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
SEED = self.SEED main_prog.random_seed = self.SEED
main_prog.random_seed = SEED startup_prog.random_seed = self.SEED
startup_prog.random_seed = SEED
with fluid.scope_guard(scope): with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0],
shape=self.feed_shape[0], shape=self.feed_shape[0],
dtype=self.feed_dtype[0]) dtype='float32')
factor = paddle.static.data( factor = paddle.static.data(
name=self.feed_list[1], name=self.feed_list[1],
shape=self.feed_shape[1], shape=self.feed_shape[1],
dtype=self.feed_dtype[1]) dtype='float32')
out = paddle.fluid.layers.pow(x, factor=factor, **self.attrs) out = paddle.fluid.layers.pow(x, factor=factor, **self.attrs)
fetch_list = [out.name] fetch_list = [out.name]
if run_ipu: if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace() place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(startup_prog)
if run_ipu: if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training) ipu_strategy.set_graph_config(is_training=self.is_training)
program = compiler.IPUCompiledProgram( if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog, main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else: else:
program = main_prog program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list) feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0] return result[0]
......
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
def set_data_feed(self):
self.feed = {
"x": np.random.uniform(size=[1, 3, 3, 3]).astype('float32'),
}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()]
self.feed_list = list(self.feed.keys())
self.feed_dtype = [x.dtype for x in self.feed.values()]
def set_op_attrs(self):
self.attrs = {}
def _test_base(self, run_ipu=True):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype=self.feed_dtype[0])
out = paddle.fluid.layers.conv2d(
x, num_filters=3, filter_size=3)
out = paddle.fluid.layers.Print(out, **self.attrs)
if self.is_training:
loss = paddle.mean(out)
adam = paddle.optimizer.Adam(learning_rate=1e-2)
adam.minimize(loss)
fetch_list = [loss.name]
else:
fetch_list = [out.name]
if run_ipu:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if run_ipu:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
if self.is_training:
result = []
for _ in range(self.epoch):
loss_res = exe.run(program,
feed=self.feed,
fetch_list=fetch_list)
result.append(loss_res[0])
return np.array(result)
else:
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
return result[0]
def test(self):
res0 = self._test_base(False)
res1 = self._test_base(True)
self.assertTrue(
np.allclose(
res0.flatten(), res1.flatten(), atol=self.atol))
self.assertTrue(res0.shape == res1.shape)
class TestCase1(TestBase):
def set_op_attrs(self):
self.attrs = {"message": "input_data"}
class TestTrainCase1(TestBase):
def set_op_attrs(self):
# "forward" : print forward
# "backward" : print forward and backward
# "both": print forward and backward
self.attrs = {"message": "input_data2", "print_phase": "both"}
def set_training(self):
self.is_training = True
self.epoch = 2
@unittest.skip("attrs are not supported")
class TestCase2(TestBase):
def set_op_attrs(self):
self.attrs = {
"first_n": 10,
"summarize": 10,
"print_tensor_name": True,
"print_tensor_type": True,
"print_tensor_shape": True,
"print_tensor_layout": True,
"print_tensor_lod": True
}
if __name__ == "__main__":
unittest.main()
...@@ -16,14 +16,8 @@ import unittest ...@@ -16,14 +16,8 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
np_dtype_to_fluid_str)
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -32,125 +26,137 @@ class TestMean(IPUOpTest): ...@@ -32,125 +26,137 @@ class TestMean(IPUOpTest):
def setUp(self): def setUp(self):
self.set_atol() self.set_atol()
self.set_training() self.set_training()
self.init_op() self.set_test_op()
@property
def fp16_enabled(self):
return True
def init_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.reduce_mean self.op = paddle.fluid.layers.reduce_mean
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed.keys()) self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [ self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
np_dtype_to_fluid_str(x.dtype) for x in self.feed.values()
] def _test_base(self, exec_mode):
scope = paddle.static.Scope()
def _test_base(self, run_ipu=True):
scope = fluid.core.Scope()
main_prog = paddle.static.Program() main_prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
SEED = self.SEED main_prog.random_seed = self.SEED
main_prog.random_seed = SEED startup_prog.random_seed = self.SEED
startup_prog.random_seed = SEED
with fluid.scope_guard(scope): with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0],
shape=self.feed_shape[0], shape=self.feed_shape[0],
dtype='float32') dtype='float32')
out = self.op(x, **self.attrs) out = self.op(x, **self.attrs)
fetch_list = [out.name] fetch_list = [out.name]
if run_ipu: if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace() place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(startup_prog)
if run_ipu: if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training) ipu_strategy.set_graph_config(is_training=self.is_training)
program = compiler.IPUCompiledProgram( if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog, main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else: else:
program = main_prog program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list) feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0] return result[0]
def run_test_base(self): def run_test_base(self):
res0 = self._test_base(True) output_dict = {}
res1 = self._test_base(False) for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
self.assertTrue( break
np.allclose( output_dict[mode] = self._test_base(mode).flatten()
res0.flatten(), res1.flatten(), atol=self.atol))
self.check(output_dict)
def set_feed0(self):
self.feed = {} def set_data_feed0(self):
self.feed["in_0"] = np.random.uniform(size=[2, 4]).astype(np.float32) data = np.random.uniform(size=[2, 4])
self.feed_fp32 = {"in_0": data.astype(np.float32)}
self.feed_fp16 = {"in_0": data.astype(np.float16)}
self.set_feed_attr() self.set_feed_attr()
def set_feed1(self): def set_data_feed1(self):
self.feed = {} data = np.random.uniform(size=[2, 2, 2])
self.feed["in_0"] = np.random.uniform(size=[2, 2, 2]).astype(np.float32) self.feed_fp32 = {"in_0": data.astype(np.float32)}
self.feed_fp16 = {"in_0": data.astype(np.float16)}
self.set_feed_attr() self.set_feed_attr()
def set_attr0(self): def set_op_attr0(self):
self.attrs = {} self.attrs = {}
self.attrs['dim'] = None self.attrs['dim'] = None
self.attrs['keep_dim'] = False self.attrs['keep_dim'] = False
def test_case0(self): def test_case0(self):
self.set_feed0() self.set_data_feed0()
self.set_attr0() self.set_op_attr0()
self.run_test_base() self.run_test_base()
def test_case1(self): def test_case1(self):
self.set_feed0() self.set_data_feed0()
self.set_attr0() self.set_op_attr0()
self.attrs['dim'] = 0 self.attrs['dim'] = 0
self.run_test_base() self.run_test_base()
def test_case2(self): def test_case2(self):
self.set_feed0() self.set_data_feed0()
self.set_attr0() self.set_op_attr0()
self.attrs['dim'] = -1 self.attrs['dim'] = -1
self.run_test_base() self.run_test_base()
def test_case3(self): def test_case3(self):
self.set_feed0() self.set_data_feed0()
self.set_attr0() self.set_op_attr0()
self.attrs['dim'] = 1 self.attrs['dim'] = 1
self.run_test_base() self.run_test_base()
def test_case4(self): def test_case4(self):
self.set_feed0() self.set_data_feed0()
self.attrs = {} self.attrs = {}
self.attrs['dim'] = 1 self.attrs['dim'] = 1
self.attrs['keep_dim'] = True self.attrs['keep_dim'] = True
self.run_test_base() self.run_test_base()
def test_case5(self): def test_case5(self):
self.set_feed1() self.set_data_feed1()
self.attrs = {} self.attrs = {}
self.attrs['dim'] = [1, 2] self.attrs['dim'] = [1, 2]
self.attrs['keep_dim'] = False self.attrs['keep_dim'] = False
self.run_test_base() self.run_test_base()
def test_case6(self): def test_case6(self):
self.set_feed1() self.set_data_feed1()
self.attrs = {} self.attrs = {}
self.attrs['dim'] = [0, 1] self.attrs['dim'] = [0, 1]
self.attrs['keep_dim'] = False self.attrs['keep_dim'] = False
self.run_test_base() self.run_test_base()
def test_case7(self): def test_case7(self):
self.set_feed1() self.set_data_feed1()
self.attrs = {} self.attrs = {}
self.attrs['dim'] = [0, 1] self.attrs['dim'] = [0, 1]
self.attrs['keep_dim'] = True self.attrs['keep_dim'] = True
...@@ -158,22 +164,22 @@ class TestMean(IPUOpTest): ...@@ -158,22 +164,22 @@ class TestMean(IPUOpTest):
class TestMax(TestMean): class TestMax(TestMean):
def init_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.reduce_max self.op = paddle.fluid.layers.reduce_max
class TestMin(TestMean): class TestMin(TestMean):
def init_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.reduce_min self.op = paddle.fluid.layers.reduce_min
class TestProd(TestMean): class TestProd(TestMean):
def init_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.reduce_prod self.op = paddle.fluid.layers.reduce_prod
class TestSum(TestMean): class TestSum(TestMean):
def init_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.reduce_sum self.op = paddle.fluid.layers.reduce_sum
......
...@@ -16,14 +16,8 @@ import unittest ...@@ -16,14 +16,8 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
np_dtype_to_fluid_str)
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -32,76 +26,84 @@ class TestBase(IPUOpTest): ...@@ -32,76 +26,84 @@ class TestBase(IPUOpTest):
def setUp(self): def setUp(self):
self.set_atol() self.set_atol()
self.set_training() self.set_training()
self.set_feed() self.set_data_feed()
self.set_feed_attr() self.set_feed_attr()
self.set_attrs() self.set_op_attrs()
def set_feed(self): @property
self.feed = { def fp16_enabled(self):
"x": np.random.uniform(size=[1, 3, 10, 10]).astype('float32'), return True
}
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {"x": data.astype(np.float32)}
self.feed_fp16 = {"x": data.astype(np.float16)}
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed.keys()) self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [ self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
np_dtype_to_fluid_str(x.dtype) for x in self.feed.values()
]
def set_attrs(self): def set_op_attrs(self):
self.attrs = { self.attrs = {
"shape": [30, 10], "shape": [30, 10],
"inplace": True, "inplace": True,
} }
def _test_base(self, run_ipu=True): def _test_base(self, exec_mode):
scope = fluid.core.Scope() scope = paddle.static.Scope()
main_prog = paddle.static.Program() main_prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
SEED = self.SEED main_prog.random_seed = self.SEED
main_prog.random_seed = SEED startup_prog.random_seed = self.SEED
startup_prog.random_seed = SEED
with fluid.scope_guard(scope): with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0],
shape=self.feed_shape[0], shape=self.feed_shape[0],
dtype=self.feed_dtype[0]) dtype='float32')
add = paddle.fluid.layers.elementwise_add(x, x) add = paddle.fluid.layers.elementwise_add(x, x)
out = paddle.fluid.layers.reshape(add, **self.attrs) out = paddle.fluid.layers.reshape(add, **self.attrs)
fetch_list = [out.name] fetch_list = [out.name]
if run_ipu: if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace() place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(startup_prog)
if run_ipu: if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training) ipu_strategy.set_graph_config(is_training=self.is_training)
program = compiler.IPUCompiledProgram( if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog, main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else: else:
program = main_prog program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list) feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0] return result[0]
def test_base(self): def test_base(self):
res0 = self._test_base(True) output_dict = {}
res1 = self._test_base(False) for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
self.assertTrue( break
np.allclose( output_dict[mode] = self._test_base(mode)
res0.flatten(), res1.flatten(), atol=self.atol))
self.assertTrue(res0.shape == res1.shape) self.check(output_dict, check_shape=True)
class TestCase1(TestBase): class TestCase1(TestBase):
......
...@@ -16,13 +16,8 @@ import unittest ...@@ -16,13 +16,8 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -31,82 +26,92 @@ class TestBase(IPUOpTest): ...@@ -31,82 +26,92 @@ class TestBase(IPUOpTest):
def setUp(self): def setUp(self):
self.set_atol() self.set_atol()
self.set_training() self.set_training()
self.set_feed() self.set_data_feed()
self.set_attrs() self.set_feed_attr()
self.set_op_attrs()
def set_feed(self): @property
self.feed_shape = [] def fp16_enabled(self):
self.feed_shape.append([2, 4, 6]) return True
self.feed = {} def set_data_feed(self):
self.feed["in_0"] = np.random.uniform( data = np.random.uniform(size=[2, 4, 6])
size=self.feed_shape[0]).astype(np.float32) self.feed_fp32 = {"in_0": data.astype(np.float32)}
self.feed_fp16 = {"in_0": data.astype(np.float16)}
self.feed_list = list(self.feed.keys()) def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def set_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
self.attrs['shape'] = [6, 8] self.attrs['shape'] = [6, 8]
self.attrs['inplace'] = False self.attrs['inplace'] = False
def _test_base(self, run_ipu=True): def _test_base(self, exec_mode):
scope = fluid.core.Scope() scope = paddle.static.Scope()
main_prog = paddle.static.Program() main_prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
SEED = self.SEED main_prog.random_seed = self.SEED
main_prog.random_seed = SEED startup_prog.random_seed = self.SEED
startup_prog.random_seed = SEED
with fluid.scope_guard(scope): with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0],
shape=self.feed_shape[0], shape=self.feed_shape[0],
dtype='float32') dtype='float32')
out = paddle.fluid.layers.reshape(x=x, **self.attrs) out = paddle.fluid.layers.reshape(x=x, **self.attrs)
fetch_list = [out.name] fetch_list = [out.name]
if run_ipu: if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace() place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(startup_prog)
if run_ipu: if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training) ipu_strategy.set_graph_config(is_training=self.is_training)
program = compiler.IPUCompiledProgram( if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog, main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else: else:
program = main_prog program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list) feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0] return result[0]
def test_base(self): def test_base(self):
res0 = self._test_base(True) output_dict = {}
res1 = self._test_base(False) for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
self.assertTrue( break
np.allclose( output_dict[mode] = self._test_base(mode)
res0.flatten(), res1.flatten(), atol=self.atol))
self.assertTrue(res0.shape == res1.shape) self.check(output_dict, check_shape=True)
class TestCase1(TestBase): class TestCase1(TestBase):
def set_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
self.attrs['shape'] = [2, 3, -1, 2] self.attrs['shape'] = [2, 3, -1, 2]
self.attrs['inplace'] = False self.attrs['inplace'] = False
class TestCase2(TestBase): class TestCase2(TestBase):
def set_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
self.attrs['shape'] = [-1, 0, 3, 2] self.attrs['shape'] = [-1, 0, 3, 2]
self.attrs['inplace'] = False self.attrs['inplace'] = False
......
...@@ -12,55 +12,52 @@ ...@@ -12,55 +12,52 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import tempfile
import unittest import unittest
import shutil
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU") "core is not compiled with IPU")
class TestBase(IPUOpTest): class TestBase(IPUOpTest):
def setUp(self): def setUp(self):
self.set_atol() self.set_atol()
self.set_feed() self.set_data_feed()
self.set_attrs() self.set_feed_attr()
self.set_op_attrs()
def set_feed(self):
self.feed_shape = []
self.feed_shape.append([1, 3, 10, 10])
self.feed = {} def set_data_feed(self):
self.feed["in_0"] = np.random.uniform( data = np.random.uniform(size=[1, 3, 10, 10])
size=self.feed_shape[0]).astype(np.float32) self.feed_fp32 = {"in_0": data.astype(np.float32)}
self.feed_fp16 = {"in_0": data.astype(np.float16)}
self.feed_list = list(self.feed.keys()) def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def set_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
self.attrs['steps'] = 100 self.attrs['steps'] = 100
self.attrs['save_at_step'] = 20 self.attrs['save_at_step'] = 20
self.attrs['is_training'] = True self.attrs['is_training'] = True
self.attrs['opt_type'] = 'sgd' self.attrs['opt_type'] = 'sgd'
self.attrs['enable_fp16'] = False
self.attrs['model_path'] = tempfile.TemporaryDirectory()
def _test_base(self, save_otherwise_load): def _test_base(self, save_otherwise_load):
scope = fluid.core.Scope() scope = paddle.static.Scope()
main_prog = paddle.static.Program() main_prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED startup_prog.random_seed = self.SEED
generator = fluid.unique_name.UniqueNameGenerator() generator = paddle.fluid.unique_name.UniqueNameGenerator()
with fluid.unique_name.guard(generator): with paddle.fluid.unique_name.guard(generator):
with fluid.scope_guard(scope): with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0],
...@@ -91,12 +88,17 @@ class TestBase(IPUOpTest): ...@@ -91,12 +88,17 @@ class TestBase(IPUOpTest):
exe.run(startup_prog) exe.run(startup_prog)
if not save_otherwise_load: if not save_otherwise_load:
paddle.static.load(main_prog, "model/model") paddle.static.load(main_prog, self.attrs['model_path'].name)
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig( ipu_strategy.set_graph_config(
is_training=self.attrs['is_training']) is_training=self.attrs['is_training'])
program = compiler.IPUCompiledProgram( ipu_strategy.set_precision_config(
enable_fp16=self.attrs['enable_fp16'])
ipu_strategy.set_options({
'save_per_n_step': self.attrs['save_at_step']
})
program = paddle.static.IpuCompiledProgram(
main_prog, ipu_strategy=ipu_strategy).compile( main_prog, ipu_strategy=ipu_strategy).compile(
self.feed_list, fetch_list) self.feed_list, fetch_list)
...@@ -104,16 +106,17 @@ class TestBase(IPUOpTest): ...@@ -104,16 +106,17 @@ class TestBase(IPUOpTest):
run_steps = self.attrs['steps'] if save_otherwise_load \ run_steps = self.attrs['steps'] if save_otherwise_load \
else self.attrs['steps'] - self.attrs['save_at_step'] else self.attrs['steps'] - self.attrs['save_at_step']
feed = self.feed_fp16 if self.attrs[
'enable_fp16'] else self.feed_fp32
for i in range(run_steps): for i in range(run_steps):
tmp = exe.run(program, tmp = exe.run(program, feed=feed, fetch_list=fetch_list)
feed=self.feed,
fetch_list=fetch_list)
# currently, we update opt state every sess.run, # currently, we update opt state every sess.run,
# will optimize # will optimize
if save_otherwise_load and \ if save_otherwise_load and \
i == self.attrs['save_at_step'] - 1: i == self.attrs['save_at_step'] - 1:
paddle.static.save(main_prog, "model/model") paddle.static.save(main_prog,
self.attrs['model_path'].name)
if save_otherwise_load and i >= self.attrs['save_at_step']: if save_otherwise_load and i >= self.attrs['save_at_step']:
result.append(tmp) result.append(tmp)
...@@ -129,25 +132,65 @@ class TestBase(IPUOpTest): ...@@ -129,25 +132,65 @@ class TestBase(IPUOpTest):
self.assertTrue( self.assertTrue(
np.allclose( np.allclose(
res0.flatten(), res1.flatten(), atol=self.atol)) res0.flatten(), res1.flatten(), atol=self.atol))
shutil.rmtree("model", True) self.attrs['model_path'].cleanup()
class TestAdam(TestBase): class TestAdam(TestBase):
def set_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
self.attrs['steps'] = 100 self.attrs['steps'] = 100
self.attrs['save_at_step'] = 20 self.attrs['save_at_step'] = 20
self.attrs['is_training'] = True self.attrs['is_training'] = True
self.attrs['opt_type'] = 'adam' self.attrs['opt_type'] = 'adam'
self.attrs['enable_fp16'] = False
self.attrs['model_path'] = tempfile.TemporaryDirectory()
class TestLamb(TestBase): class TestLamb(TestBase):
def set_attrs(self): def set_op_attrs(self):
self.attrs = {}
self.attrs['steps'] = 100
self.attrs['save_at_step'] = 20
self.attrs['is_training'] = True
self.attrs['opt_type'] = 'lamb'
self.attrs['enable_fp16'] = False
self.attrs['model_path'] = tempfile.TemporaryDirectory()
@unittest.skipIf(IPUOpTest.use_ipumodel(), "skip for ipumodel")
class TestSGDFP16(TestBase):
def set_op_attrs(self):
self.attrs = {}
self.attrs['steps'] = 100
self.attrs['save_at_step'] = 20
self.attrs['is_training'] = True
self.attrs['opt_type'] = 'sgd'
self.attrs['enable_fp16'] = True
self.attrs['model_path'] = tempfile.TemporaryDirectory()
@unittest.skipIf(IPUOpTest.use_ipumodel(), "skip for ipumodel")
class TestAdamFP16(TestBase):
def set_op_attrs(self):
self.attrs = {}
self.attrs['steps'] = 100
self.attrs['save_at_step'] = 20
self.attrs['is_training'] = True
self.attrs['opt_type'] = 'adam'
self.attrs['enable_fp16'] = True
self.attrs['model_path'] = tempfile.TemporaryDirectory()
@unittest.skipIf(IPUOpTest.use_ipumodel(), "skip for ipumodel")
class TestLambFP16(TestBase):
def set_op_attrs(self):
self.attrs = {} self.attrs = {}
self.attrs['steps'] = 100 self.attrs['steps'] = 100
self.attrs['save_at_step'] = 20 self.attrs['save_at_step'] = 20
self.attrs['is_training'] = True self.attrs['is_training'] = True
self.attrs['opt_type'] = 'lamb' self.attrs['opt_type'] = 'lamb'
self.attrs['enable_fp16'] = True
self.attrs['model_path'] = tempfile.TemporaryDirectory()
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -16,14 +16,8 @@ import unittest ...@@ -16,14 +16,8 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest, from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
np_dtype_to_fluid_str)
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -32,80 +26,88 @@ class TestBase(IPUOpTest): ...@@ -32,80 +26,88 @@ class TestBase(IPUOpTest):
def setUp(self): def setUp(self):
self.set_atol() self.set_atol()
self.set_training() self.set_training()
self.set_feed() self.set_data_feed()
self.set_feed_attr() self.set_feed_attr()
self.set_attrs() self.set_op_attrs()
def set_feed(self): @property
self.feed = { def fp16_enabled(self):
"x": np.random.uniform(size=[1, 3, 10, 10]).astype('float32'), return False
}
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {"x": data.astype(np.float32)}
self.feed_fp16 = {"x": data.astype(np.float16)}
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed.keys()) self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [ self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
np_dtype_to_fluid_str(x.dtype) for x in self.feed.values()
]
def set_attrs(self): def set_op_attrs(self):
self.attrs = { self.attrs = {
"scale": 1.0, "scale": 1.0,
"bias": 0.0, "bias": 0.0,
"bias_after_scale": True, "bias_after_scale": True,
} }
def _test_base(self, run_ipu=True): def _test_base(self, exec_mode):
scope = fluid.core.Scope() scope = paddle.static.Scope()
main_prog = paddle.static.Program() main_prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
SEED = self.SEED main_prog.random_seed = self.SEED
main_prog.random_seed = SEED startup_prog.random_seed = self.SEED
startup_prog.random_seed = SEED
with fluid.scope_guard(scope): with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0],
shape=self.feed_shape[0], shape=self.feed_shape[0],
dtype=self.feed_dtype[0]) dtype='float32')
out = paddle.fluid.layers.scale(x, **self.attrs) out = paddle.fluid.layers.scale(x, **self.attrs)
fetch_list = [out.name] fetch_list = [out.name]
if run_ipu: if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace() place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(startup_prog)
if run_ipu: if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training) ipu_strategy.set_graph_config(is_training=self.is_training)
program = compiler.IPUCompiledProgram( if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog, main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else: else:
program = main_prog program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list) feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0] return result[0]
def test_base(self): def test_base(self):
res0 = self._test_base(False) output_dict = {}
res1 = self._test_base(True) for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
self.assertTrue( break
np.allclose( output_dict[mode] = self._test_base(mode).flatten()
res0.flatten(), res1.flatten(), atol=self.atol))
self.assertTrue(res0.shape == res1.shape) self.check(output_dict)
class TestCase1(TestBase): class TestCase1(TestBase):
def set_attrs(self): def set_op_attrs(self):
self.attrs = { self.attrs = {
"scale": 5.0, "scale": 5.0,
"bias": 0.0, "bias": 0.0,
...@@ -114,7 +116,7 @@ class TestCase1(TestBase): ...@@ -114,7 +116,7 @@ class TestCase1(TestBase):
class TestCase2(TestBase): class TestCase2(TestBase):
def set_attrs(self): def set_op_attrs(self):
self.attrs = { self.attrs = {
"scale": 1.0, "scale": 1.0,
"bias": 0.5, "bias": 0.5,
...@@ -123,7 +125,16 @@ class TestCase2(TestBase): ...@@ -123,7 +125,16 @@ class TestCase2(TestBase):
class TestCase3(TestBase): class TestCase3(TestBase):
def set_attrs(self): def set_op_attrs(self):
self.attrs = {
"scale": 5.0,
"bias": 0.7,
"bias_after_scale": True,
}
class TestCase4(TestBase):
def set_op_attrs(self):
self.attrs = { self.attrs = {
"scale": 1.0, "scale": 1.0,
"bias": 0.0, "bias": 0.0,
...@@ -131,59 +142,66 @@ class TestCase3(TestBase): ...@@ -131,59 +142,66 @@ class TestCase3(TestBase):
} }
class TestCase4(TestBase): class TestCase5(TestBase):
def set_feed(self): def set_data_feed(self):
self.feed = { x = np.random.uniform(size=[3, 3, 10, 10])
"x": np.random.uniform(size=[3, 3, 10, 10]).astype('float32'), y = np.array([3.0])
"y": np.array([3.0]).astype('float32'), self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
} self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
def set_attrs(self): def set_op_attrs(self):
self.attrs = { self.attrs = {
"bias": 0.0, "bias": 0.0,
"bias_after_scale": True, "bias_after_scale": True,
} }
def _test_base(self, run_ipu=True): def _test_base(self, exec_mode):
scope = fluid.core.Scope() scope = paddle.static.Scope()
main_prog = paddle.static.Program() main_prog = paddle.static.Program()
startup_prog = paddle.static.Program() startup_prog = paddle.static.Program()
SEED = self.SEED main_prog.random_seed = self.SEED
main_prog.random_seed = SEED startup_prog.random_seed = self.SEED
startup_prog.random_seed = SEED
with fluid.scope_guard(scope): with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0],
shape=self.feed_shape[0], shape=self.feed_shape[0],
dtype=self.feed_dtype[0]) dtype='float32')
y = paddle.static.data( y = paddle.static.data(
name=self.feed_list[1], name=self.feed_list[1],
shape=self.feed_shape[1], shape=self.feed_shape[1],
dtype=self.feed_dtype[1]) dtype='float32')
out = paddle.fluid.layers.scale(x, scale=y, **self.attrs) out = paddle.fluid.layers.scale(x, scale=y, **self.attrs)
fetch_list = [out.name] fetch_list = [out.name]
if run_ipu: if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace() place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(startup_prog)
if run_ipu: if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training) ipu_strategy.set_graph_config(is_training=self.is_training)
program = compiler.IPUCompiledProgram( if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog, main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else: else:
program = main_prog program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list) feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0] return result[0]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册