未验证 提交 a279a4f8 编写于 作者: A Allen Guo 提交者: GitHub

[IPU] update ipu unittests p2 (#40069)

* update ipu UTs part2

* clean git

* rename ut

* rename ut 1

* sync api changes

* update uts for new api

* update uts for new api

* fix re-define
上级 13f2b1e3
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import unittest
import sys
sys.path.append("..")
import paddle
import paddle.fluid as fluid
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestIpuPlace(unittest.TestCase):
def test_ipu_place(self):
num_devices = fluid.core.get_ipu_device_count()
self.assertGreater(num_devices, 0)
for i in range(num_devices):
place = paddle.IPUPlace()
p = fluid.core.Place()
p.set_place(place)
self.assertTrue(p.is_ipu_place())
def test_ipu_set_device(self):
num_devices = fluid.core.get_ipu_device_count()
self.assertGreater(num_devices, 0)
for i in range(num_devices):
paddle.set_device('ipu')
device = paddle.get_device()
self.assertTrue(device == "ipus:{{0-{}}}".format(num_devices - 1))
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import unittest
import sys
import paddle
import paddle.fluid as fluid
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestIpuShard(unittest.TestCase):
def _test(self):
# build graph
a = paddle.static.data(name='data', shape=[None, 1], dtype='int32')
b = a + 2 # scale : scale * x + bias, ipu_index : no
with paddle.fluid.ipu_shard(ipu_index=1):
c = b + 1 # scale, ipu_index : 1
with paddle.fluid.ipu_shard(ipu_index=2):
d = c * 2 # scale, ipu_index : 2
with paddle.fluid.ipu_shard(ipu_index=3):
e = d + 3 # scale, ipu_index : 3
with paddle.fluid.ipu_shard(ipu_index=1):
e = e + 3 # scale, ipu_index : 1
with paddle.fluid.ipu_shard(ipu_index=2):
e = e + 3 # scale, ipu_index : 2
with paddle.fluid.ipu_shard(ipu_index=1):
f = paddle.tensor.pow(e, 2.0) # pow, ipu_index : 1
with paddle.fluid.ipu_shard(ipu_index=2):
g = f - 1 # scale, ipu_index : 2
h = g + 1 # scale, ipu_index : no
ipu_index_list = []
main_prog = paddle.static.default_main_program()
for op in main_prog.global_block().ops:
if op.desc.has_attr("ipu_index"):
ipu_index_list.append(op.desc.attr("ipu_index"))
return ipu_index_list
def test_ipu_shard(self):
ipu_index_list = self._test()
expected_ipu_index_list = [1, 2, 3, 1, 2, 1, 2]
self.assertTrue(
np.allclose(
ipu_index_list, expected_ipu_index_list, atol=0))
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
......@@ -16,9 +16,7 @@ from __future__ import print_function
import numpy as np
import unittest
import sys
import paddle
import paddle.fluid as fluid
paddle.enable_static()
......@@ -26,26 +24,69 @@ paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestIpuShard(unittest.TestCase):
def _test(self):
# build graph
a = paddle.static.data(name='data', shape=[None, 1], dtype='int32')
b = a + 2 # scale : scale * x + bias, ipu_index : no
with paddle.static.ipu_shard_guard(index=1):
c = b + 1 # scale, ipu_index : 1
with paddle.static.ipu_shard_guard(index=2):
d = c * 2 # scale, ipu_index : 2
with paddle.static.ipu_shard_guard(index=3):
e = d + 3 # scale, ipu_index : 3
with paddle.static.ipu_shard_guard(index=1):
e = e + 3 # scale, ipu_index : 1
with paddle.static.ipu_shard_guard(index=2):
e = e + 3 # scale, ipu_index : 2
with paddle.static.ipu_shard_guard(index=1):
f = paddle.tensor.pow(e, 2.0) # pow, ipu_index : 1
with paddle.static.ipu_shard_guard(index=2):
g = f - 1 # scale, ipu_index : 2
h = g + 1 # scale, ipu_index : no
ipu_index_list = []
main_prog = paddle.static.default_main_program()
for op in main_prog.global_block().ops:
if op.desc.has_attr("ipu_index"):
ipu_index_list.append(op.desc.attr("ipu_index"))
return ipu_index_list
def test_ipu_shard(self):
ipu_index_list = self._test()
expected_ipu_index_list = [1, 2, 3, 1, 2, 1, 2]
self.assertTrue(
np.allclose(
ipu_index_list, expected_ipu_index_list, atol=0))
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestIpuPipeline(unittest.TestCase):
def _test(self):
# build graph
a = paddle.static.data(name='data', shape=[None, 1], dtype='int32')
b = a + 2 # scale : scale * x + bias, ipu_stage : no
with paddle.fluid.ipu_shard(ipu_stage=1):
with paddle.static.ipu_shard_guard(stage=1):
c = b + 1 # scale, ipu_stage : 1
with paddle.fluid.ipu_shard(ipu_stage=2):
with paddle.static.ipu_shard_guard(stage=2):
d = c * 2 # scale, ipu_stage : 2
with paddle.fluid.ipu_shard(ipu_stage=3):
with paddle.static.ipu_shard_guard(stage=3):
e = d + 3 # scale, ipu_stage : 3
with paddle.fluid.ipu_shard(ipu_stage=1):
with paddle.static.ipu_shard_guard(stage=1):
e = e + 3 # scale, ipu_stage : 1
with paddle.fluid.ipu_shard(ipu_stage=2):
with paddle.static.ipu_shard_guard(stage=2):
e = e + 3 # scale, ipu_stage : 2
with paddle.fluid.ipu_shard(ipu_stage=1):
with paddle.static.ipu_shard_guard(stage=1):
f = paddle.tensor.pow(e, 2.0) # pow, ipu_stage : 1
with paddle.fluid.ipu_shard(ipu_stage=2):
with paddle.static.ipu_shard_guard(stage=2):
g = f - 1 # scale, ipu_stage : 2
h = g + 1 # scale, ipu_stage : no
......
......@@ -12,44 +12,60 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import unittest
import sys
import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.static
paddle.enable_static()
SEED = 2021
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestConvNet(unittest.TestCase):
def test_training(self):
class TestIpuStrategy(unittest.TestCase):
def test_set_options(self):
ipu_strategy = paddle.static.IpuStrategy()
all_option_names = ipu_strategy._ipu_strategy.get_all_option_names()
for option_name in all_option_names:
option = ipu_strategy._ipu_strategy.get_option(option_name)
option_type = option['type']
option_value = option['value']
if option_type in ['double']:
set_value = option_value + 0.5
elif option_type == 'uint64':
set_value = option_value + 1
elif option_type == 'bool':
set_value = not option_value
else:
continue
ipu_strategy.set_options({option_name: set_value})
new_value = ipu_strategy.get_option(option_name)
assert new_value == set_value, f"set {option_name} to {set_value} failed"
assert ipu_strategy.num_ipus == 1, "Default num_ipus must be 1"
assert ipu_strategy.is_training == True, "Default is_training is True"
assert ipu_strategy.enable_pipelining == False, \
"Default enable_pipelining is False"
assert ipu_strategy.enable_manual_shard == False, \
"Default enable_manual_shard is False"
ipu_strategy.SetGraphConfig(
num_ipus=2, is_training=False, enable_manual_shard=True)
ipu_strategy.SetPipeliningConfig(enable_pipelining=True)
assert ipu_strategy.num_ipus == 2, "Set num_ipus Failed"
assert ipu_strategy.is_training == False, "Set is_training Failed"
assert ipu_strategy.enable_pipelining == True, \
"Set enable_pipelining Failed"
def test_set_string_options(self):
ipu_strategy = paddle.static.IpuStrategy()
options = {
'cache_path': 'paddle_cache',
'log_dir': 'paddle_log',
'partials_type_matmuls': 'half',
'partials_type_matmuls': 'float',
}
ipu_strategy.set_options(options)
for k, v in options.items():
assert v == ipu_strategy.get_option(k), f"set {k} to {v} failed "
assert ipu_strategy.enable_manual_shard == True, \
"Set enable_manual_shard Failed"
def test_set_other_options(self):
ipu_strategy = paddle.static.IpuStrategy()
options = {}
options['dot_checks'] = ['0', '1', '2', '3']
options['engine_options'] = {
'debug.allowOutOfMemory': 'true',
'autoReport.directory': 'path',
'autoReport.all': 'true'
}
for k, v in options.items():
ipu_strategy.set_options({k: v})
assert v == ipu_strategy.get_option(k), f"set {k} to {v} failed "
if __name__ == "__main__":
......
......@@ -16,14 +16,8 @@ import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest,
np_dtype_to_fluid_str)
paddle.enable_static()
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -32,44 +26,52 @@ class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_feed()
self.set_data_feed()
self.set_feed_attr()
self.set_attrs()
self.set_op_attrs()
def set_feed(self):
self.feed = {
"x": np.random.uniform(size=[1, 3, 10, 10]).astype('float32'),
}
@property
def fp16_enabled(self):
return True
def set_atol(self):
self.atol = 1e-6
self.rtol = 1e-5
self.atol_fp16 = 1e-2
self.rtol_fp16 = 1e-3
def set_data_feed(self):
x = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {"x": x.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()]
self.feed_list = list(self.feed.keys())
self.feed_dtype = [
np_dtype_to_fluid_str(x.dtype) for x in self.feed.values()
]
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def set_attrs(self):
def set_op_attrs(self):
self.attrs = {
"scale": True,
"shift": True,
"begin_norm_axis": 1,
"epsilon": 1e-05,
}
self.optimizer = None
def _test_base(self, run_ipu=True):
scope = fluid.core.Scope()
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
SEED = self.SEED
main_prog.random_seed = SEED
startup_prog.random_seed = SEED
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with fluid.scope_guard(scope):
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype=self.feed_dtype[0])
dtype='float32')
if self.is_training:
ch = self.feed_shape[0][1]
......@@ -80,33 +82,38 @@ class TestBase(IPUOpTest):
out = paddle.fluid.layers.nn.layer_norm(
conv1, param_attr=scale, bias_attr=bias, **self.attrs)
else:
# scale = True
# bias = True
scale = self.attrs['scale']
bias = self.attrs['shift']
out = paddle.fluid.layers.nn.layer_norm(
x, param_attr=scale, bias_attr=bias, **self.attrs)
if self.is_training:
loss = paddle.mean(out)
adam = paddle.optimizer.Adam(learning_rate=1e-2)
adam.minimize(loss)
fetch_list = [loss.name]
else:
fetch_list = [out.name]
if run_ipu:
if self.is_training:
optimizer = None
if self.optimizer == 'sgd':
optimizer = paddle.optimizer.SGD(learning_rate=1e-2)
elif self.optimizer == 'adam':
optimizer = paddle.optimizer.Adam(learning_rate=1e-2)
elif self.optimizer == 'lamb':
optimizer = paddle.optimizer.Lamb(
learning_rate=1e-2, lamb_weight_decay=0.0)
if optimizer is not None:
optimizer.minimize(loss)
if exec_mode:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if run_ipu:
if exec_mode:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training)
program = compiler.IPUCompiledProgram(
ipu_strategy.set_graph_config(is_training=self.is_training)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
......@@ -116,12 +123,14 @@ class TestBase(IPUOpTest):
result = []
for _ in range(self.epoch):
loss_res = exe.run(program,
feed=self.feed,
feed=self.feed_fp32,
fetch_list=fetch_list)
result.append(loss_res[0])
return np.array(result)
else:
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
result = exe.run(program,
feed=self.feed_fp32,
fetch_list=fetch_list)
return result[0]
def test_base(self):
......@@ -137,7 +146,7 @@ class TestBase(IPUOpTest):
@unittest.skip('raise error')
class TestCase1(TestBase):
def set_attrs(self):
def set_op_attrs(self):
self.attrs = {
"scale": False,
"shift": True,
......@@ -148,7 +157,7 @@ class TestCase1(TestBase):
@unittest.skip('raise error')
class TestCase2(TestBase):
def set_attrs(self):
def set_op_attrs(self):
self.attrs = {
"scale": True,
"shift": False,
......@@ -158,18 +167,28 @@ class TestCase2(TestBase):
class TestCase3(TestBase):
def set_attrs(self):
def set_op_attrs(self):
self.attrs = {
"scale": True,
"shift": True,
"begin_norm_axis": 2,
"epsilon": 1e-05,
}
self.optimizer = None
class TestTrainCase1(TestBase):
def set_op_attrs(self):
self.attrs = {
"scale": True,
"shift": True,
"begin_norm_axis": 1,
"epsilon": 1e-05
}
self.optimizer = 'sgd'
def set_atol(self):
self.atol = 1e-3
self.atol = 1e-6
def set_training(self):
self.is_training = True
......@@ -178,15 +197,34 @@ class TestTrainCase1(TestBase):
class TestTrainCase2(TestBase):
def set_atol(self):
self.atol = 1e-3
self.atol = 5e-4
def set_attrs(self):
def set_op_attrs(self):
self.attrs = {
"scale": True,
"shift": True,
"begin_norm_axis": 2,
"epsilon": 1e-05,
"epsilon": 1e-05
}
self.optimizer = 'adam'
def set_training(self):
self.is_training = True
self.epoch = 10
class TestTrainCase3(TestBase):
def set_atol(self):
self.atol = 5e-3
def set_op_attrs(self):
self.attrs = {
"scale": True,
"shift": True,
"begin_norm_axis": 2,
"epsilon": 1e-05
}
self.optimizer = 'lamb'
def set_training(self):
self.is_training = True
......
......@@ -16,15 +16,9 @@ import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest,
np_dtype_to_fluid_str)
import paddle.nn.functional as F
paddle.enable_static()
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -33,72 +27,81 @@ class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_feed()
self.set_data_feed()
self.set_feed_attr()
self.set_attrs()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_feed(self):
self.feed = {
"x": np.random.uniform(size=[1, 3, 10, 10]).astype('float32')
}
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {'in_0': data.astype(np.float32)}
self.feed_fp16 = {'in_0': data.astype(np.float16)}
self.feed_list = list(self.feed_fp32.keys())
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()]
self.feed_list = list(self.feed.keys())
self.feed_dtype = [
np_dtype_to_fluid_str(x.dtype) for x in self.feed.values()
]
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def set_attrs(self):
def set_op_attrs(self):
self.attrs = {"axis": -1}
def _test_base(self, run_ipu=True):
scope = fluid.core.Scope()
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
SEED = self.SEED
main_prog.random_seed = SEED
startup_prog.random_seed = SEED
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with fluid.scope_guard(scope):
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype=self.feed_dtype[0])
dtype='float32')
out = F.log_softmax(x, **self.attrs)
fetch_list = [out.name]
if run_ipu:
place = paddle.IPUPlace()
else:
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if run_ipu:
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training)
program = compiler.IPUCompiledProgram(
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
return result[0]
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
def test_base(self):
res0 = self._test_base(False)
res1 = self._test_base(True)
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
self.assertTrue(
np.allclose(
res0.flatten(), res1.flatten(), atol=self.atol))
def test(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.assertTrue(res0.shape == res1.shape)
self.check(output_dict)
class TestCase1(TestBase):
......
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
data = np.random.uniform(size=[2, 20, 30528])
self.feed = {"in_0": data.astype('bool')}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()]
self.feed_list = list(self.feed.keys())
self.feed_dtype = [x.dtype for x in self.feed.values()]
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype="bool")
out = paddle.fluid.layers.logical_not(x)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).astype(np.int32)
self.check(output_dict, check_shape=True)
if __name__ == "__main__":
unittest.main()
......@@ -16,14 +16,8 @@ import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest,
np_dtype_to_fluid_str)
paddle.enable_static()
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -32,16 +26,25 @@ class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_attrs()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
data = np.array([[[1], [3]], [[2], [4]], [[4], [127]]])
self.feed_cpu = {"x": data.astype(np.int64)}
self.feed_ipu = {"x": data.astype(np.int32)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()]
self.feed_list = list(self.feed.keys())
self.feed_dtype = [
np_dtype_to_fluid_str(x.dtype) for x in self.feed.values()
]
self.feed_shape = [x.shape for x in self.feed_cpu.values()]
self.feed_list = list(self.feed_cpu.keys())
self.feed_dtype = [x.dtype for x in self.feed_cpu.values()]
def set_attrs(self):
def set_op_attrs(self):
self.attrs = {
"size": [128, 16],
"is_sparse": False,
......@@ -50,33 +53,20 @@ class TestBase(IPUOpTest):
"dtype": 'float32'
}
def _test_base(self, run_ipu=True):
scope = fluid.core.Scope()
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
SEED = self.SEED
main_prog.random_seed = SEED
startup_prog.random_seed = SEED
if run_ipu:
self.feed = {
"x": np.array(
[[[1], [3]], [[2], [4]], [[4], [127]]]).astype(np.int32)
}
else:
self.feed = {
"x": np.array(
[[[1], [3]], [[2], [4]], [[4], [127]]]).astype(np.int64)
}
self.set_feed_attr()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with fluid.scope_guard(scope):
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype=self.feed_dtype[0])
dtype='int64')
out = paddle.fluid.layers.embedding(x, **self.attrs)
if self.is_training:
......@@ -87,47 +77,61 @@ class TestBase(IPUOpTest):
else:
fetch_list = [out.name]
if run_ipu:
place = paddle.IPUPlace()
else:
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if run_ipu:
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training)
program = compiler.IPUCompiledProgram(
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_cpu
if exec_mode > ExecutionMode.CPU_FP32:
feed = self.feed_ipu
if self.is_training:
result = []
for _ in range(self.epoch):
loss_res = exe.run(program,
feed=self.feed,
feed=feed,
fetch_list=fetch_list)
result.append(loss_res[0])
return np.array(result)
else:
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
res0 = self._test_base(False)
res1 = self._test_base(True)
def test(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and (not self.fp16_enabled or
self.is_training):
break
self.assertTrue(
np.allclose(
res0.flatten(), res1.flatten(), atol=self.atol))
output_dict[mode] = self._test_base(mode).flatten()
self.assertTrue(res0.shape == res1.shape)
self.check(output_dict)
class TestTrainCase1(TestBase):
def set_atol(self):
self.atol = 1e-7
self.rtol = 1e-6
self.atol_fp16 = 1e-3
self.rtol_fp16 = 1e-3
def set_training(self):
self.is_training = True
self.epoch = 10
......
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
x = np.array([[[1], [3]], [[2], [4]], [[4], [127]]])
self.feed_cpu = {"x": x.astype(np.int64)}
self.feed_ipu = {"x": x.astype(np.int32)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_cpu.values()]
self.feed_list = list(self.feed_cpu.keys())
self.feed_dtype = [x.dtype for x in self.feed_cpu.values()]
def set_op_attrs(self):
self.attrs = {
"num_embeddings": 128,
"embedding_dim": 16,
"sparse": False,
"padding_idx": -1,
"weight_attr": None
}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='int64')
embedding = paddle.nn.Embedding(**self.attrs)
out = embedding(x)
if self.is_training:
loss = paddle.mean(out)
adam = paddle.optimizer.Adam(learning_rate=1e-2)
adam.minimize(loss)
fetch_list = [loss.name]
else:
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_cpu
if exec_mode > ExecutionMode.CPU_FP32:
feed = self.feed_ipu
if self.is_training:
result = []
for _ in range(self.epoch):
loss_res = exe.run(program,
feed=feed,
fetch_list=fetch_list)
result.append(loss_res[0])
return np.array(result)
else:
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and (not self.fp16_enabled or
self.is_training):
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
class TestTrainCase1(TestBase):
def set_atol(self):
self.atol = 1e-7
self.rtol = 1e-6
self.atol_fp16 = 1e-3
self.rtol_fp16 = 1e-3
def set_training(self):
self.is_training = True
self.epoch = 10
if __name__ == "__main__":
unittest.main()
......@@ -19,7 +19,7 @@ import unittest
import sys
import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.static
from paddle.optimizer.lr import LRScheduler
paddle.enable_static()
......@@ -71,8 +71,8 @@ class TestConvNet(unittest.TestCase):
feed_list = [image.name]
fetch_list = [loss.name]
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=True)
program = compiler.IPUCompiledProgram(
ipu_strategy.set_graph_config(is_training=True)
program = paddle.static.IpuCompiledProgram(
main_prog, ipu_strategy=ipu_strategy).compile(feed_list,
fetch_list)
else:
......
......@@ -16,14 +16,8 @@ import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import (IPUOpTest,
np_dtype_to_fluid_str)
paddle.enable_static()
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -32,85 +26,93 @@ class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_feed()
self.set_data_feed()
self.set_feed_attr()
self.set_attrs()
self.set_op_attrs()
def set_feed(self):
self.feed = {
"x": np.random.uniform(size=[2, 3]).astype('float32'),
"y": np.random.uniform(size=[3, 2]).astype('float32'),
}
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
x = np.random.uniform(size=[20, 30])
y = np.random.uniform(size=[30, 20])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()]
self.feed_list = list(self.feed.keys())
self.feed_dtype = [
np_dtype_to_fluid_str(x.dtype) for x in self.feed.values()
]
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def set_attrs(self):
def set_op_attrs(self):
self.attrs = {
"transpose_x": False,
"transpose_y": False,
"alpha": 1.0,
}
def _test_base(self, run_ipu=True):
scope = fluid.core.Scope()
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
SEED = self.SEED
main_prog.random_seed = SEED
startup_prog.random_seed = SEED
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with fluid.scope_guard(scope):
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype=self.feed_dtype[0])
dtype='float32')
y = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype=self.feed_dtype[1])
dtype='float32')
out = paddle.fluid.layers.matmul(x, y, **self.attrs)
fetch_list = [out.name]
if run_ipu:
place = paddle.IPUPlace()
else:
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if run_ipu:
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.SetGraphConfig(is_training=self.is_training)
program = compiler.IPUCompiledProgram(
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
res0 = self._test_base(False)
res1 = self._test_base(True)
self.assertTrue(
np.allclose(
res0.flatten(), res1.flatten(), atol=self.atol))
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.assertTrue(res0.shape == res1.shape)
self.check(output_dict)
class TestCase1(TestBase):
def set_attrs(self):
def set_op_attrs(self):
self.attrs = {
"transpose_x": True,
"transpose_y": True,
......@@ -119,55 +121,64 @@ class TestCase1(TestBase):
class TestCase2(TestBase):
def set_attrs(self):
def set_op_attrs(self):
self.attrs = {
"transpose_x": True,
"transpose_y": True,
"alpha": 3.14,
}
def set_atol(self):
self.atol = 1e-10
self.rtol = 1e-6
self.atol_fp16 = 1e-2
self.rtol_fp16 = 1e-3
class TestCase3(TestBase):
def set_feed(self):
self.feed = {
"x": np.random.uniform(size=[5, 4, 2, 3]).astype('float32'),
"y": np.random.uniform(size=[5, 4, 3, 2]).astype('float32'),
}
def set_data_feed(self):
x = np.random.uniform(size=[5, 4, 3, 2])
y = np.random.uniform(size=[5, 4, 2, 3])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
class TestCase4(TestBase):
def set_feed(self):
self.feed = {
"x": np.random.uniform(size=[4, 2, 3]).astype('float32'),
"y": np.random.uniform(size=[4, 3, 2]).astype('float32'),
}
def set_data_feed(self):
x = np.random.uniform(size=[4, 3, 2])
y = np.random.uniform(size=[4, 2, 3])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
class TestCase5(TestBase):
def set_feed(self):
self.feed = {
"x": np.random.uniform(size=[4, 2, 3]).astype('float32'),
"y": np.random.uniform(size=[3, 2]).astype('float32'),
}
def set_data_feed(self):
x = np.random.uniform(size=[4, 2, 3])
y = np.random.uniform(size=[3, 2])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
class TestCase6(TestBase):
def set_feed(self):
self.feed = {
"x": np.random.uniform(size=[3]).astype('float32'),
"y": np.random.uniform(size=[3]).astype('float32'),
}
def set_data_feed(self):
x = np.random.uniform(size=[3])
self.feed_fp32 = {"x": x.astype(np.float32), "y": x.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": x.astype(np.float16)}
@unittest.skip("not supported")
class TestCase6_2(TestCase6):
def set_feed(self):
self.feed = {
"x": np.random.uniform(size=[3]).astype('float32'),
"y": np.random.uniform(size=[3]).astype('float32'),
}
def set_data_feed(self):
x = np.random.uniform(size=[3])
self.feed_fp32 = {"x": x.astype(np.float32), "y": x.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": x.astype(np.float16)}
def set_attrs(self):
def set_op_attrs(self):
self.attrs = {
"transpose_x": True,
"transpose_y": True,
......@@ -176,27 +187,36 @@ class TestCase6_2(TestCase6):
class TestCase7(TestBase):
def set_feed(self):
self.feed = {
"x": np.random.uniform(size=[3, 1]).astype('float32'),
"y": np.random.uniform(size=[1, 2]).astype('float32'),
}
def set_data_feed(self):
x = np.random.uniform(size=[1, 12, 128, 64])
y = np.random.uniform(size=[1, 12, 128, 64])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
def set_op_attrs(self):
self.attrs = {"transpose_x": False, "transpose_y": True, "alpha": 0.125}
class TestCase8(TestBase):
def set_data_feed(self):
x = np.random.uniform(size=[3, 1])
y = np.random.uniform(size=[1, 2])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
@unittest.skip("not supported")
class TestCase7_2(TestBase):
def set_feed(self):
self.feed = {
"x": np.random.uniform(size=[3]).astype('float32'),
"y": np.random.uniform(size=[2]).astype('float32'),
}
# equal to
# self.feed = {
# "x": np.random.uniform(size=[3, 1]).astype('float32'),
# "y": np.random.uniform(size=[1, 2]).astype('float32'),
# }
class TestCase8_2(TestBase):
def set_data_feed(self):
x = np.random.uniform(size=[3])
y = np.random.uniform(size=[2])
def set_attrs(self):
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
def set_op_attrs(self):
self.attrs = {
"transpose_x": True,
"transpose_y": True,
......@@ -205,12 +225,12 @@ class TestCase7_2(TestBase):
@unittest.skip("dim > 4 is not supported")
class TestCase8(TestBase):
def set_feed(self):
self.feed = {
"x": np.random.uniform(size=[6, 5, 4, 2, 3]).astype('float32'),
"y": np.random.uniform(size=[6, 5, 4, 3, 2]).astype('float32'),
}
class TestCase9(TestBase):
def set_data_feed(self):
x = np.random.uniform(size=[6, 5, 4, 2, 3])
self.feed_fp32 = {"x": x.astype(np.float32), "y": x.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": x.astype(np.float16)}
if __name__ == "__main__":
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册