未验证 提交 86effa0c 编写于 作者: A Allen Guo 提交者: GitHub

[IPU] update ipu unittests p3 (#40072)

* update ipu UTs part3

* rename uts

* sync api changes

* update uts for new api

* update use_ipumodel()

* split pr
上级 b5a8a0d9
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
x = np.random.uniform(size=[2, 3])
y = np.random.uniform(size=[3, 2])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def set_op_attrs(self):
self.attrs = {"transpose_x": False, "transpose_y": False}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
y = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype='float32')
out = paddle.matmul(x, y, **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
class TestCase1(TestBase):
def set_op_attrs(self):
self.attrs = {
"transpose_x": True,
"transpose_y": True,
}
class TestCase3(TestBase):
def set_data_feed(self):
x = np.random.uniform(size=[5, 4, 2, 3])
y = np.random.uniform(size=[5, 4, 3, 2])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
class TestCase4(TestBase):
def set_data_feed(self):
x = np.random.uniform(size=[4, 2, 3])
y = np.random.uniform(size=[4, 3, 2])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
class TestCase5(TestBase):
def set_data_feed(self):
x = np.random.uniform(size=[4, 2, 3])
y = np.random.uniform(size=[3, 2])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
class TestCase6(TestBase):
def set_data_feed(self):
x = np.random.uniform(size=[3])
y = np.random.uniform(size=[3])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
@unittest.skip("not supported")
class TestCase6_2(TestCase6):
def set_data_feed(self):
x = np.random.uniform(size=[3])
y = np.random.uniform(size=[3])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
def set_op_attrs(self):
self.attrs = {"transpose_x": True, "transpose_y": True}
class TestCase7(TestBase):
def set_data_feed(self):
x = np.random.uniform(size=[3, 1])
y = np.random.uniform(size=[1, 2])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
@unittest.skip("dim > 4 is not supported")
class TestCase8(TestBase):
def set_data_feed(self):
self.feed = {
"x": np.random.uniform(size=[6, 5, 4, 2, 3]).astype('float32'),
"y": np.random.uniform(size=[6, 5, 4, 3, 2]).astype('float32'),
}
if __name__ == "__main__":
unittest.main()
......@@ -16,13 +16,8 @@ import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
paddle.enable_static()
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -31,97 +26,79 @@ class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_feed()
self.set_attrs()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
def set_feed(self):
self.feed_shape = []
self.feed_shape.append([1, 3, 10, 10])
@property
def fp16_enabled(self):
return True
self.feed = {}
self.feed["in_0"] = np.random.uniform(
size=self.feed_shape[0]).astype(np.float32)
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {"in_0": data.astype(np.float32)}
self.feed_fp16 = {"in_0": data.astype(np.float16)}
self.feed_list = list(self.feed.keys())
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def set_attrs(self):
def set_op_attrs(self):
self.attrs = {}
self.attrs['axis'] = None
self.attrs['keepdim'] = False
def _test_base(self, run_ipu=True):
scope = fluid.core.Scope()
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
SEED = self.SEED
main_prog.random_seed = SEED
startup_prog.random_seed = SEED
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with fluid.scope_guard(scope):
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
out = paddle.mean(x, **self.attrs)
fetch_list = [out.name]
out = paddle.fluid.layers.mean(x)
if run_ipu:
place = paddle.IPUPlace()
else:
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if run_ipu:
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training)
program = compiler.IPUCompiledProgram(
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
res0 = self._test_base(True)
res1 = self._test_base(False)
self.assertTrue(
np.allclose(
res0.flatten(), res1.flatten(), atol=self.atol))
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
class TestCase1(TestBase):
def set_attrs(self):
self.attrs = {}
self.attrs['axis'] = 1
self.attrs['keepdim'] = False
class TestCase2(TestBase):
def set_attrs(self):
self.attrs = {}
self.attrs['axis'] = 2
self.attrs['keepdim'] = False
class TestCase3(TestBase):
def set_attrs(self):
self.attrs = {}
self.attrs['axis'] = 2
self.attrs['keepdim'] = True
class TestCase4(TestBase):
def set_attrs(self):
self.attrs = {}
self.attrs['axis'] = None
self.attrs['keepdim'] = True
self.check(output_dict)
if __name__ == "__main__":
......
......@@ -17,8 +17,7 @@ from __future__ import print_function
import numpy as np
import unittest
import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.static
paddle.enable_static()
SEED = 2021
......@@ -28,7 +27,7 @@ SEED = 2021
"core is not compiled with IPU")
class TestCastNet(unittest.TestCase):
def _test(self, run_ipu=True):
scope = fluid.core.Scope()
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = SEED
......@@ -37,14 +36,14 @@ class TestCastNet(unittest.TestCase):
np_image = np.random.rand(1, 3, 10, 10).astype(np.float32)
with fluid.scope_guard(scope):
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
image = paddle.static.data(
name='image', shape=[1, 3, 10, 10], dtype='float32')
with fluid.ipu_shard(ipu_index=0):
with paddle.static.ipu_shard_guard(index=0):
conv1 = paddle.static.nn.conv2d(
image, num_filters=3, filter_size=3, bias_attr=False)
with fluid.ipu_shard(ipu_index=1):
with paddle.static.ipu_shard_guard(index=1):
conv2 = paddle.static.nn.conv2d(
conv1, num_filters=3, filter_size=3, bias_attr=False)
loss = paddle.mean(conv2)
......@@ -60,9 +59,10 @@ class TestCastNet(unittest.TestCase):
feed_list = [image.name]
fetch_list = [loss.name]
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(
ipu_strategy.set_graph_config(
num_ipus=2, is_training=False, enable_manual_shard=True)
program = compiler.IPUCompiledProgram(
ipu_strategy.set_pipelining_config(enable_pipelining=False)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
......
......@@ -16,14 +16,8 @@ import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest,
np_dtype_to_fluid_str)
paddle.enable_static()
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -32,90 +26,98 @@ class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_feed()
self.set_data_feed()
self.set_feed_attr()
self.set_attrs()
self.set_op_attrs()
def set_feed(self):
self.feed = {
"x": np.random.uniform(size=[2, 5]).astype('float32'),
"y": np.random.uniform(size=[5, 3]).astype('float32'),
}
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
x = np.random.uniform(size=[2, 5])
y = np.random.uniform(size=[5, 3])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()]
self.feed_list = list(self.feed.keys())
self.feed_dtype = [
np_dtype_to_fluid_str(x.dtype) for x in self.feed.values()
]
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def set_attrs(self):
def set_op_attrs(self):
self.attrs = {
"x_num_col_dims": 1,
"y_num_col_dims": 1,
}
def _test_base(self, run_ipu=True):
scope = fluid.core.Scope()
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
SEED = self.SEED
main_prog.random_seed = SEED
startup_prog.random_seed = SEED
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with fluid.scope_guard(scope):
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype=self.feed_dtype[0])
dtype='float32')
y = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype=self.feed_dtype[1])
dtype='float32')
out = paddle.fluid.layers.mul(x, y, **self.attrs)
fetch_list = [out.name]
fetch_list = [out.name]
if run_ipu:
place = paddle.IPUPlace()
else:
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if run_ipu:
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training)
program = compiler.IPUCompiledProgram(
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
res0 = self._test_base(False)
res1 = self._test_base(True)
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.assertTrue(
np.allclose(
res0.flatten(), res1.flatten(), atol=self.atol))
self.assertTrue(res0.shape == res1.shape)
self.check(output_dict)
class TestCase1(TestBase):
def set_feed(self):
self.feed = {
"x": np.random.uniform(size=[1, 2, 5]).astype('float32'),
"y": np.random.uniform(size=[5, 3]).astype('float32'),
}
def set_data_feed(self):
x = np.random.uniform(size=[1, 2, 5])
y = np.random.uniform(size=[5, 3])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
def set_attrs(self):
def set_op_attrs(self):
self.attrs = {
"x_num_col_dims": 2,
"y_num_col_dims": 1,
......@@ -123,13 +125,13 @@ class TestCase1(TestBase):
class TestCase2(TestBase):
def set_feed(self):
self.feed = {
"x": np.random.uniform(size=[3, 4, 2, 9]).astype('float32'),
"y": np.random.uniform(size=[3, 6, 1, 2, 3]).astype('float32'),
}
def set_data_feed(self):
x = np.random.uniform(size=[3, 4, 2, 9])
y = np.random.uniform(size=[3, 6, 1, 2, 3])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
def set_attrs(self):
def set_op_attrs(self):
self.attrs = {
'x_num_col_dims': 2,
'y_num_col_dims': 2,
......
......@@ -16,14 +16,8 @@ import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest,
np_dtype_to_fluid_str)
paddle.enable_static()
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -32,23 +26,25 @@ class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_feed()
self.set_data_feed()
self.set_feed_attr()
self.set_attrs()
self.set_op_attrs()
def set_feed(self):
self.feed = {
"x": np.random.uniform(size=[1, 3, 10, 10]).astype('float32'),
}
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {'in_0': data.astype(np.float32)}
self.feed_fp16 = {'in_0': data.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()]
self.feed_list = list(self.feed.keys())
self.feed_dtype = [
np_dtype_to_fluid_str(x.dtype) for x in self.feed.values()
]
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def set_attrs(self):
def set_op_attrs(self):
self.attrs = {
"pool_size": 3,
"pool_type": 'avg',
......@@ -60,53 +56,59 @@ class TestBase(IPUOpTest):
"data_format": 'NCHW',
}
def _test_base(self, run_ipu=True):
scope = fluid.core.Scope()
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
SEED = self.SEED
main_prog.random_seed = SEED
startup_prog.random_seed = SEED
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with fluid.scope_guard(scope):
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype=self.feed_dtype[0])
dtype='float32')
out = paddle.fluid.layers.pool2d(x, **self.attrs)
fetch_list = [out.name]
fetch_list = [out.name]
if run_ipu:
place = paddle.IPUPlace()
else:
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if run_ipu:
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training)
program = compiler.IPUCompiledProgram(
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
res0 = self._test_base(False)
res1 = self._test_base(True)
self.assertTrue(
np.allclose(
res0.flatten(), res1.flatten(), atol=self.atol))
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.assertTrue(res0.shape == res1.shape)
self.check(output_dict)
class TestCase1(TestBase):
......
......@@ -16,14 +16,8 @@ import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest,
np_dtype_to_fluid_str)
paddle.enable_static()
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -32,23 +26,25 @@ class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_feed()
self.set_data_feed()
self.set_feed_attr()
self.set_attrs()
self.set_op_attrs()
def set_feed(self):
self.feed = {
"x": np.random.uniform(size=[1, 3, 10, 10]).astype('float32'),
}
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {'in_0': data.astype(np.float32)}
self.feed_fp16 = {'in_0': data.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()]
self.feed_list = list(self.feed.keys())
self.feed_dtype = [
np_dtype_to_fluid_str(x.dtype) for x in self.feed.values()
]
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def set_attrs(self):
def set_op_attrs(self):
self.attrs = {
"pool_size": 3,
"pool_type": 'max',
......@@ -60,120 +56,126 @@ class TestBase(IPUOpTest):
"data_format": 'NCHW',
}
def _test_base(self, run_ipu=True):
scope = fluid.core.Scope()
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
SEED = self.SEED
main_prog.random_seed = SEED
startup_prog.random_seed = SEED
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with fluid.scope_guard(scope):
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype=self.feed_dtype[0])
dtype='float32')
out = paddle.fluid.layers.pool2d(x, **self.attrs)
fetch_list = [out.name]
fetch_list = [out.name]
if run_ipu:
place = paddle.IPUPlace()
else:
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if run_ipu:
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training)
program = compiler.IPUCompiledProgram(
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
res0 = self._test_base(False)
res1 = self._test_base(True)
self.assertTrue(
np.allclose(
res0.flatten(), res1.flatten(), atol=self.atol))
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.assertTrue(res0.shape == res1.shape)
self.check(output_dict)
class TestCase1(TestBase):
def set_attrs(self):
super().set_attrs()
def set_op_attrs(self):
super().set_op_attrs()
self.attrs['pool_size'] = 3
class TestCase1_2(TestBase):
def set_attrs(self):
super().set_attrs()
def set_op_attrs(self):
super().set_op_attrs()
self.attrs['pool_size'] = [3, 1]
class TestCase2(TestBase):
def set_attrs(self):
super().set_attrs()
def set_op_attrs(self):
super().set_op_attrs()
self.attrs['pool_stride'] = 2
class TestCase2_2(TestBase):
def set_attrs(self):
super().set_attrs()
def set_op_attrs(self):
super().set_op_attrs()
self.attrs['pool_stride'] = [2, 1]
class TestCase3(TestBase):
def set_attrs(self):
super().set_attrs()
def set_op_attrs(self):
super().set_op_attrs()
self.attrs['pool_padding'] = [1, 1]
class TestCase3_2(TestBase):
def set_attrs(self):
super().set_attrs()
def set_op_attrs(self):
super().set_op_attrs()
self.attrs['pool_padding'] = [1, 1, 2, 2]
@unittest.skip('auto_pad is not currently supported')
class TestCase3_3(TestBase):
def set_attrs(self):
super().set_attrs()
def set_op_attrs(self):
super().set_op_attrs()
self.attrs['pool_padding'] = 'VALID'
@unittest.skip('auto_pad is not currently supported')
class TestCase3_4(TestBase):
def set_attrs(self):
super().set_attrs()
def set_op_attrs(self):
super().set_op_attrs()
self.attrs['pool_padding'] = 'SAME'
class TestCase4(TestBase):
def set_attrs(self):
super().set_attrs()
def set_op_attrs(self):
super().set_op_attrs()
self.attrs['global_pooling'] = True
class TestCase5(TestBase):
def set_attrs(self):
super().set_attrs()
def set_op_attrs(self):
super().set_op_attrs()
self.attrs['ceil_mode'] = True
class TestCase6(TestBase):
def set_attrs(self):
super().set_attrs()
def set_op_attrs(self):
super().set_op_attrs()
self.attrs['exclusive'] = False
......
......@@ -16,14 +16,8 @@ import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest,
np_dtype_to_fluid_str)
paddle.enable_static()
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -32,124 +26,146 @@ class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_feed()
self.set_data_feed()
self.set_feed_attr()
self.set_attrs()
self.set_op_attrs()
def set_feed(self):
self.feed = {
"x": np.random.uniform(size=[1, 3, 2, 2]).astype('float32'),
}
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 2, 2])
self.feed_fp32 = {"x": data.astype(np.float32)}
self.feed_fp16 = {"x": data.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()]
self.feed_list = list(self.feed.keys())
self.feed_dtype = [
np_dtype_to_fluid_str(x.dtype) for x in self.feed.values()
]
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def set_attrs(self):
def set_op_attrs(self):
self.attrs = {"factor": 2.0}
def _test_base(self, run_ipu=True):
scope = fluid.core.Scope()
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
SEED = self.SEED
main_prog.random_seed = SEED
startup_prog.random_seed = SEED
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with fluid.scope_guard(scope):
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype=self.feed_dtype[0])
dtype='float32')
out = paddle.fluid.layers.pow(x, **self.attrs)
fetch_list = [out.name]
fetch_list = [out.name]
if run_ipu:
place = paddle.IPUPlace()
else:
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if run_ipu:
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training)
program = compiler.IPUCompiledProgram(
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
res0 = self._test_base(False)
res1 = self._test_base(True)
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.assertTrue(
np.allclose(
res0.flatten(), res1.flatten(), atol=self.atol))
self.assertTrue(res0.shape == res1.shape)
self.check(output_dict)
class TestCase1(TestBase):
def set_feed(self):
self.feed = {
"x": np.random.uniform(size=[1, 3, 2, 2]).astype('float32'),
"y": np.array([2.0]).astype('float32'),
def set_data_feed(self):
data1 = np.random.uniform(size=[1, 3, 2, 2])
data2 = np.array([2.0])
self.feed_fp32 = {
"x": data1.astype(np.float32),
"y": data2.astype(np.float32)
}
self.feed_fp16 = {
"x": data1.astype(np.float16),
"y": data2.astype(np.float16)
}
def set_attrs(self):
def set_op_attrs(self):
self.attrs = {}
def _test_base(self, run_ipu=True):
scope = fluid.core.Scope()
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
SEED = self.SEED
main_prog.random_seed = SEED
startup_prog.random_seed = SEED
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with fluid.scope_guard(scope):
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype=self.feed_dtype[0])
dtype='float32')
factor = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype=self.feed_dtype[1])
dtype='float32')
out = paddle.fluid.layers.pow(x, factor=factor, **self.attrs)
fetch_list = [out.name]
fetch_list = [out.name]
if run_ipu:
place = paddle.IPUPlace()
else:
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if run_ipu:
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training)
program = compiler.IPUCompiledProgram(
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
......
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
def set_data_feed(self):
self.feed = {
"x": np.random.uniform(size=[1, 3, 3, 3]).astype('float32'),
}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()]
self.feed_list = list(self.feed.keys())
self.feed_dtype = [x.dtype for x in self.feed.values()]
def set_op_attrs(self):
self.attrs = {}
def _test_base(self, run_ipu=True):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype=self.feed_dtype[0])
out = paddle.fluid.layers.conv2d(
x, num_filters=3, filter_size=3)
out = paddle.fluid.layers.Print(out, **self.attrs)
if self.is_training:
loss = paddle.mean(out)
adam = paddle.optimizer.Adam(learning_rate=1e-2)
adam.minimize(loss)
fetch_list = [loss.name]
else:
fetch_list = [out.name]
if run_ipu:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if run_ipu:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
if self.is_training:
result = []
for _ in range(self.epoch):
loss_res = exe.run(program,
feed=self.feed,
fetch_list=fetch_list)
result.append(loss_res[0])
return np.array(result)
else:
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
return result[0]
def test(self):
res0 = self._test_base(False)
res1 = self._test_base(True)
self.assertTrue(
np.allclose(
res0.flatten(), res1.flatten(), atol=self.atol))
self.assertTrue(res0.shape == res1.shape)
class TestCase1(TestBase):
def set_op_attrs(self):
self.attrs = {"message": "input_data"}
class TestTrainCase1(TestBase):
def set_op_attrs(self):
# "forward" : print forward
# "backward" : print forward and backward
# "both": print forward and backward
self.attrs = {"message": "input_data2", "print_phase": "both"}
def set_training(self):
self.is_training = True
self.epoch = 2
@unittest.skip("attrs are not supported")
class TestCase2(TestBase):
def set_op_attrs(self):
self.attrs = {
"first_n": 10,
"summarize": 10,
"print_tensor_name": True,
"print_tensor_type": True,
"print_tensor_shape": True,
"print_tensor_layout": True,
"print_tensor_lod": True
}
if __name__ == "__main__":
unittest.main()
......@@ -16,14 +16,8 @@ import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest,
np_dtype_to_fluid_str)
paddle.enable_static()
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -32,125 +26,137 @@ class TestMean(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.init_op()
self.set_test_op()
@property
def fp16_enabled(self):
return True
def init_op(self):
def set_test_op(self):
self.op = paddle.fluid.layers.reduce_mean
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()]
self.feed_list = list(self.feed.keys())
self.feed_dtype = [
np_dtype_to_fluid_str(x.dtype) for x in self.feed.values()
]
def _test_base(self, run_ipu=True):
scope = fluid.core.Scope()
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
SEED = self.SEED
main_prog.random_seed = SEED
startup_prog.random_seed = SEED
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with fluid.scope_guard(scope):
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
out = self.op(x, **self.attrs)
fetch_list = [out.name]
fetch_list = [out.name]
if run_ipu:
place = paddle.IPUPlace()
else:
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if run_ipu:
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training)
program = compiler.IPUCompiledProgram(
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def run_test_base(self):
res0 = self._test_base(True)
res1 = self._test_base(False)
self.assertTrue(
np.allclose(
res0.flatten(), res1.flatten(), atol=self.atol))
def set_feed0(self):
self.feed = {}
self.feed["in_0"] = np.random.uniform(size=[2, 4]).astype(np.float32)
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
def set_data_feed0(self):
data = np.random.uniform(size=[2, 4])
self.feed_fp32 = {"in_0": data.astype(np.float32)}
self.feed_fp16 = {"in_0": data.astype(np.float16)}
self.set_feed_attr()
def set_feed1(self):
self.feed = {}
self.feed["in_0"] = np.random.uniform(size=[2, 2, 2]).astype(np.float32)
def set_data_feed1(self):
data = np.random.uniform(size=[2, 2, 2])
self.feed_fp32 = {"in_0": data.astype(np.float32)}
self.feed_fp16 = {"in_0": data.astype(np.float16)}
self.set_feed_attr()
def set_attr0(self):
def set_op_attr0(self):
self.attrs = {}
self.attrs['dim'] = None
self.attrs['keep_dim'] = False
def test_case0(self):
self.set_feed0()
self.set_attr0()
self.set_data_feed0()
self.set_op_attr0()
self.run_test_base()
def test_case1(self):
self.set_feed0()
self.set_attr0()
self.set_data_feed0()
self.set_op_attr0()
self.attrs['dim'] = 0
self.run_test_base()
def test_case2(self):
self.set_feed0()
self.set_attr0()
self.set_data_feed0()
self.set_op_attr0()
self.attrs['dim'] = -1
self.run_test_base()
def test_case3(self):
self.set_feed0()
self.set_attr0()
self.set_data_feed0()
self.set_op_attr0()
self.attrs['dim'] = 1
self.run_test_base()
def test_case4(self):
self.set_feed0()
self.set_data_feed0()
self.attrs = {}
self.attrs['dim'] = 1
self.attrs['keep_dim'] = True
self.run_test_base()
def test_case5(self):
self.set_feed1()
self.set_data_feed1()
self.attrs = {}
self.attrs['dim'] = [1, 2]
self.attrs['keep_dim'] = False
self.run_test_base()
def test_case6(self):
self.set_feed1()
self.set_data_feed1()
self.attrs = {}
self.attrs['dim'] = [0, 1]
self.attrs['keep_dim'] = False
self.run_test_base()
def test_case7(self):
self.set_feed1()
self.set_data_feed1()
self.attrs = {}
self.attrs['dim'] = [0, 1]
self.attrs['keep_dim'] = True
......@@ -158,22 +164,22 @@ class TestMean(IPUOpTest):
class TestMax(TestMean):
def init_op(self):
def set_test_op(self):
self.op = paddle.fluid.layers.reduce_max
class TestMin(TestMean):
def init_op(self):
def set_test_op(self):
self.op = paddle.fluid.layers.reduce_min
class TestProd(TestMean):
def init_op(self):
def set_test_op(self):
self.op = paddle.fluid.layers.reduce_prod
class TestSum(TestMean):
def init_op(self):
def set_test_op(self):
self.op = paddle.fluid.layers.reduce_sum
......
......@@ -16,14 +16,8 @@ import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest,
np_dtype_to_fluid_str)
paddle.enable_static()
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -32,76 +26,84 @@ class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_feed()
self.set_data_feed()
self.set_feed_attr()
self.set_attrs()
self.set_op_attrs()
def set_feed(self):
self.feed = {
"x": np.random.uniform(size=[1, 3, 10, 10]).astype('float32'),
}
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {"x": data.astype(np.float32)}
self.feed_fp16 = {"x": data.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()]
self.feed_list = list(self.feed.keys())
self.feed_dtype = [
np_dtype_to_fluid_str(x.dtype) for x in self.feed.values()
]
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def set_attrs(self):
def set_op_attrs(self):
self.attrs = {
"shape": [30, 10],
"inplace": True,
}
def _test_base(self, run_ipu=True):
scope = fluid.core.Scope()
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
SEED = self.SEED
main_prog.random_seed = SEED
startup_prog.random_seed = SEED
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with fluid.scope_guard(scope):
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype=self.feed_dtype[0])
dtype='float32')
add = paddle.fluid.layers.elementwise_add(x, x)
out = paddle.fluid.layers.reshape(add, **self.attrs)
fetch_list = [out.name]
fetch_list = [out.name]
if run_ipu:
place = paddle.IPUPlace()
else:
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if run_ipu:
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training)
program = compiler.IPUCompiledProgram(
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
res0 = self._test_base(True)
res1 = self._test_base(False)
self.assertTrue(
np.allclose(
res0.flatten(), res1.flatten(), atol=self.atol))
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode)
self.assertTrue(res0.shape == res1.shape)
self.check(output_dict, check_shape=True)
class TestCase1(TestBase):
......
......@@ -16,13 +16,8 @@ import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
paddle.enable_static()
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -31,82 +26,92 @@ class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_feed()
self.set_attrs()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
def set_feed(self):
self.feed_shape = []
self.feed_shape.append([2, 4, 6])
@property
def fp16_enabled(self):
return True
self.feed = {}
self.feed["in_0"] = np.random.uniform(
size=self.feed_shape[0]).astype(np.float32)
def set_data_feed(self):
data = np.random.uniform(size=[2, 4, 6])
self.feed_fp32 = {"in_0": data.astype(np.float32)}
self.feed_fp16 = {"in_0": data.astype(np.float16)}
self.feed_list = list(self.feed.keys())
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def set_attrs(self):
def set_op_attrs(self):
self.attrs = {}
self.attrs['shape'] = [6, 8]
self.attrs['inplace'] = False
def _test_base(self, run_ipu=True):
scope = fluid.core.Scope()
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
SEED = self.SEED
main_prog.random_seed = SEED
startup_prog.random_seed = SEED
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with fluid.scope_guard(scope):
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
out = paddle.fluid.layers.reshape(x=x, **self.attrs)
fetch_list = [out.name]
fetch_list = [out.name]
if run_ipu:
place = paddle.IPUPlace()
else:
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if run_ipu:
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training)
program = compiler.IPUCompiledProgram(
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
res0 = self._test_base(True)
res1 = self._test_base(False)
self.assertTrue(
np.allclose(
res0.flatten(), res1.flatten(), atol=self.atol))
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode)
self.assertTrue(res0.shape == res1.shape)
self.check(output_dict, check_shape=True)
class TestCase1(TestBase):
def set_attrs(self):
def set_op_attrs(self):
self.attrs = {}
self.attrs['shape'] = [2, 3, -1, 2]
self.attrs['inplace'] = False
class TestCase2(TestBase):
def set_attrs(self):
def set_op_attrs(self):
self.attrs = {}
self.attrs['shape'] = [-1, 0, 3, 2]
self.attrs['inplace'] = False
......
......@@ -12,55 +12,52 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
import shutil
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_feed()
self.set_attrs()
def set_feed(self):
self.feed_shape = []
self.feed_shape.append([1, 3, 10, 10])
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
self.feed = {}
self.feed["in_0"] = np.random.uniform(
size=self.feed_shape[0]).astype(np.float32)
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {"in_0": data.astype(np.float32)}
self.feed_fp16 = {"in_0": data.astype(np.float16)}
self.feed_list = list(self.feed.keys())
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def set_attrs(self):
def set_op_attrs(self):
self.attrs = {}
self.attrs['steps'] = 100
self.attrs['save_at_step'] = 20
self.attrs['is_training'] = True
self.attrs['opt_type'] = 'sgd'
self.attrs['enable_fp16'] = False
self.attrs['model_path'] = tempfile.TemporaryDirectory()
def _test_base(self, save_otherwise_load):
scope = fluid.core.Scope()
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
generator = fluid.unique_name.UniqueNameGenerator()
generator = paddle.fluid.unique_name.UniqueNameGenerator()
with fluid.unique_name.guard(generator):
with fluid.scope_guard(scope):
with paddle.fluid.unique_name.guard(generator):
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
......@@ -91,12 +88,17 @@ class TestBase(IPUOpTest):
exe.run(startup_prog)
if not save_otherwise_load:
paddle.static.load(main_prog, "model/model")
paddle.static.load(main_prog, self.attrs['model_path'].name)
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(
ipu_strategy.set_graph_config(
is_training=self.attrs['is_training'])
program = compiler.IPUCompiledProgram(
ipu_strategy.set_precision_config(
enable_fp16=self.attrs['enable_fp16'])
ipu_strategy.set_options({
'save_per_n_step': self.attrs['save_at_step']
})
program = paddle.static.IpuCompiledProgram(
main_prog, ipu_strategy=ipu_strategy).compile(
self.feed_list, fetch_list)
......@@ -104,16 +106,17 @@ class TestBase(IPUOpTest):
run_steps = self.attrs['steps'] if save_otherwise_load \
else self.attrs['steps'] - self.attrs['save_at_step']
feed = self.feed_fp16 if self.attrs[
'enable_fp16'] else self.feed_fp32
for i in range(run_steps):
tmp = exe.run(program,
feed=self.feed,
fetch_list=fetch_list)
tmp = exe.run(program, feed=feed, fetch_list=fetch_list)
# currently, we update opt state every sess.run,
# will optimize
if save_otherwise_load and \
i == self.attrs['save_at_step'] - 1:
paddle.static.save(main_prog, "model/model")
paddle.static.save(main_prog,
self.attrs['model_path'].name)
if save_otherwise_load and i >= self.attrs['save_at_step']:
result.append(tmp)
......@@ -129,25 +132,65 @@ class TestBase(IPUOpTest):
self.assertTrue(
np.allclose(
res0.flatten(), res1.flatten(), atol=self.atol))
shutil.rmtree("model", True)
self.attrs['model_path'].cleanup()
class TestAdam(TestBase):
def set_attrs(self):
def set_op_attrs(self):
self.attrs = {}
self.attrs['steps'] = 100
self.attrs['save_at_step'] = 20
self.attrs['is_training'] = True
self.attrs['opt_type'] = 'adam'
self.attrs['enable_fp16'] = False
self.attrs['model_path'] = tempfile.TemporaryDirectory()
class TestLamb(TestBase):
def set_attrs(self):
def set_op_attrs(self):
self.attrs = {}
self.attrs['steps'] = 100
self.attrs['save_at_step'] = 20
self.attrs['is_training'] = True
self.attrs['opt_type'] = 'lamb'
self.attrs['enable_fp16'] = False
self.attrs['model_path'] = tempfile.TemporaryDirectory()
@unittest.skipIf(IPUOpTest.use_ipumodel(), "skip for ipumodel")
class TestSGDFP16(TestBase):
def set_op_attrs(self):
self.attrs = {}
self.attrs['steps'] = 100
self.attrs['save_at_step'] = 20
self.attrs['is_training'] = True
self.attrs['opt_type'] = 'sgd'
self.attrs['enable_fp16'] = True
self.attrs['model_path'] = tempfile.TemporaryDirectory()
@unittest.skipIf(IPUOpTest.use_ipumodel(), "skip for ipumodel")
class TestAdamFP16(TestBase):
def set_op_attrs(self):
self.attrs = {}
self.attrs['steps'] = 100
self.attrs['save_at_step'] = 20
self.attrs['is_training'] = True
self.attrs['opt_type'] = 'adam'
self.attrs['enable_fp16'] = True
self.attrs['model_path'] = tempfile.TemporaryDirectory()
@unittest.skipIf(IPUOpTest.use_ipumodel(), "skip for ipumodel")
class TestLambFP16(TestBase):
def set_op_attrs(self):
self.attrs = {}
self.attrs['steps'] = 100
self.attrs['save_at_step'] = 20
self.attrs['is_training'] = True
self.attrs['opt_type'] = 'lamb'
self.attrs['enable_fp16'] = True
self.attrs['model_path'] = tempfile.TemporaryDirectory()
if __name__ == "__main__":
......
......@@ -16,14 +16,8 @@ import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest,
np_dtype_to_fluid_str)
paddle.enable_static()
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -32,80 +26,88 @@ class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_feed()
self.set_data_feed()
self.set_feed_attr()
self.set_attrs()
self.set_op_attrs()
def set_feed(self):
self.feed = {
"x": np.random.uniform(size=[1, 3, 10, 10]).astype('float32'),
}
@property
def fp16_enabled(self):
return False
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {"x": data.astype(np.float32)}
self.feed_fp16 = {"x": data.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()]
self.feed_list = list(self.feed.keys())
self.feed_dtype = [
np_dtype_to_fluid_str(x.dtype) for x in self.feed.values()
]
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def set_attrs(self):
def set_op_attrs(self):
self.attrs = {
"scale": 1.0,
"bias": 0.0,
"bias_after_scale": True,
}
def _test_base(self, run_ipu=True):
scope = fluid.core.Scope()
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
SEED = self.SEED
main_prog.random_seed = SEED
startup_prog.random_seed = SEED
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with fluid.scope_guard(scope):
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype=self.feed_dtype[0])
dtype='float32')
out = paddle.fluid.layers.scale(x, **self.attrs)
fetch_list = [out.name]
fetch_list = [out.name]
if run_ipu:
place = paddle.IPUPlace()
else:
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if run_ipu:
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training)
program = compiler.IPUCompiledProgram(
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
res0 = self._test_base(False)
res1 = self._test_base(True)
self.assertTrue(
np.allclose(
res0.flatten(), res1.flatten(), atol=self.atol))
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.assertTrue(res0.shape == res1.shape)
self.check(output_dict)
class TestCase1(TestBase):
def set_attrs(self):
def set_op_attrs(self):
self.attrs = {
"scale": 5.0,
"bias": 0.0,
......@@ -114,7 +116,7 @@ class TestCase1(TestBase):
class TestCase2(TestBase):
def set_attrs(self):
def set_op_attrs(self):
self.attrs = {
"scale": 1.0,
"bias": 0.5,
......@@ -123,7 +125,16 @@ class TestCase2(TestBase):
class TestCase3(TestBase):
def set_attrs(self):
def set_op_attrs(self):
self.attrs = {
"scale": 5.0,
"bias": 0.7,
"bias_after_scale": True,
}
class TestCase4(TestBase):
def set_op_attrs(self):
self.attrs = {
"scale": 1.0,
"bias": 0.0,
......@@ -131,59 +142,66 @@ class TestCase3(TestBase):
}
class TestCase4(TestBase):
def set_feed(self):
self.feed = {
"x": np.random.uniform(size=[3, 3, 10, 10]).astype('float32'),
"y": np.array([3.0]).astype('float32'),
}
class TestCase5(TestBase):
def set_data_feed(self):
x = np.random.uniform(size=[3, 3, 10, 10])
y = np.array([3.0])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
def set_attrs(self):
def set_op_attrs(self):
self.attrs = {
"bias": 0.0,
"bias_after_scale": True,
}
def _test_base(self, run_ipu=True):
scope = fluid.core.Scope()
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
SEED = self.SEED
main_prog.random_seed = SEED
startup_prog.random_seed = SEED
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with fluid.scope_guard(scope):
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype=self.feed_dtype[0])
dtype='float32')
y = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype=self.feed_dtype[1])
dtype='float32')
out = paddle.fluid.layers.scale(x, scale=y, **self.attrs)
fetch_list = [out.name]
fetch_list = [out.name]
if run_ipu:
place = paddle.IPUPlace()
else:
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if run_ipu:
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training)
program = compiler.IPUCompiledProgram(
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册