未验证 提交 94acf7c8 编写于 作者: A Allen Guo 提交者: GitHub

update UTs 3 (#42519)

上级 832e58d6
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 2, 20])
self.feed_fp32 = {"in_0": data.astype(np.float32)}
......@@ -47,59 +43,22 @@ class TestBase(IPUOpTest):
def set_op_attrs(self):
self.attrs = {"axis": -1}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
out = paddle.fluid.layers.softmax(x, **self.attrs)
self.fetch_list = [out.name]
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
self.check(output_dict)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase):
......
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
import paddle.nn.functional as F
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
def set_data_feed(self):
x = np.random.uniform(size=[3, 7])
label = np.arange(3).reshape([3, 1])
self.feed_fp32 = {
"x": x.astype(np.float32),
"label": label.astype(np.int64)
}
self.feed_fp16 = {
"x": x.astype(np.float16),
"label": label.astype(np.int32)
}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def set_op_attrs(self):
self.attrs = {'soft_label': False, }
@IPUOpTest.static_graph
def build_model(self, on_ipu):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32")
if on_ipu:
label = paddle.static.data(
name=self.feed_list[1], shape=self.feed_shape[1], dtype='int32')
else:
label = paddle.static.data(
name=self.feed_list[1], shape=self.feed_shape[1], dtype='int64')
out = F.softmax_with_cross_entropy(x, label, **self.attrs)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
if self.is_ipu_mode(exec_mode):
self.feed_fp32['label'] = self.feed_fp32['label'].astype(np.int32)
self.run_op_test(exec_mode)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model(self.is_ipu_mode(m))
self.run_model(m)
self.check()
class TestCase1(TestBase):
def set_op_attrs(self):
self.attrs = {
'soft_label': False,
'ignore_index': 1,
}
class TestCase2(TestBase):
def set_data_feed(self):
x = np.random.uniform(size=[30, 70])
label = np.arange(30).reshape([30, 1])
self.feed_fp32 = {
"x": x.astype(np.float32),
"label": label.astype(np.int64)
}
self.feed_fp16 = {
"x": x.astype(np.float16),
"label": label.astype(np.int32)
}
if __name__ == "__main__":
unittest.main()
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -30,13 +30,8 @@ class TestBase(IPUOpTest):
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
data1 = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {'x': data1.astype(np.float32)}
self.feed_fp16 = {'x': data1.astype(np.float16)}
......@@ -47,61 +42,24 @@ class TestBase(IPUOpTest):
def set_op_attrs(self):
self.attrs = {"num_or_sections": [1, 1, 1], "axis": 1}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
out = paddle.split(x, **self.attrs)
fetch_list = [fetch.name for fetch in out]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if (mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled
) or mode == ExecutionMode.IPU_POPART_FP16:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
self.fetch_list = [fetch.name for fetch in out]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
for k, v in self.output_dict.items():
self.output_dict[k] = np.concatenate([vv.flatten() for vv in v])
self.check()
class TestCase1(TestBase):
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 1, 5])
self.feed_fp32 = {"in_0": data.astype(np.float32)}
......@@ -47,59 +43,22 @@ class TestBase(IPUOpTest):
def set_op_attrs(self):
self.attrs = {"axes": [0]}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
out = paddle.fluid.layers.squeeze(x, **self.attrs)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode)
self.check(output_dict, check_shape=True)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase):
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
x = np.random.uniform(size=[1, 2])
y = np.random.uniform(size=[1, 2])
......@@ -57,67 +53,26 @@ class TestBase(IPUOpTest):
def set_op_attrs(self):
self.attrs = {"axis": 0}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
y = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype='float32')
name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32')
z = paddle.static.data(
name=self.feed_list[2],
shape=self.feed_shape[2],
dtype='float32')
name=self.feed_list[2], shape=self.feed_shape[2], dtype='float32')
out = paddle.fluid.layers.stack([x, y, z], **self.attrs)
self.fetch_list = [out.name]
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode)
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
self.check(output_dict, check_shape=True)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase):
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
x = np.random.uniform(size=[1, 3, 2, 2])
y = np.random.uniform(size=[1, 3, 2, 2])
......@@ -48,134 +44,52 @@ class TestBase(IPUOpTest):
def set_op_attrs(self):
self.attrs = {}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
y = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype='float32')
name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32')
out = paddle.fluid.layers.sum([x, y], **self.attrs)
self.fetch_list = [out.name]
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode)
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
self.check(output_dict, check_shape=True)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
@unittest.skip('')
class TestCase1(TestBase):
def set_feed(self):
def set_data_feed(self):
x = np.random.uniform(size=[1, 3, 2, 2])
y = np.random.uniform(size=[1, 3, 2, 2])
z = np.random.uniform(size=[1, 3, 2, 2])
self.feed_fp32 = {
"x": x.astype(np.float32),
"y": y.astype(np.float32),
"z": y.astype(np.float32)
"z": z.astype(np.float32)
}
self.feed_fp16 = {
"x": x.astype(np.float16),
"y": y.astype(np.float16),
"z": y.astype(np.float16)
"z": z.astype(np.float16)
}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
y = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype='float32')
name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32')
z = paddle.static.data(
name=self.feed_list[2],
shape=self.feed_shape[2],
dtype='float32')
name=self.feed_list[2], shape=self.feed_shape[2], dtype='float32')
out = paddle.fluid.layers.sum([x, y, z], **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
iipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
return result[0]
self.fetch_list = [out.name]
if __name__ == "__main__":
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -31,10 +31,6 @@ class TestTopKOp(IPUOpTest):
self.set_test_op()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_test_op(self):
self.op = paddle.fluid.layers.topk
......@@ -53,20 +49,10 @@ class TestTopKOp(IPUOpTest):
if not self.use_k_as_const_variable:
self.attrs["k"] = 3
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
if not self.use_k_as_const_variable:
topk_values, topk_indices = self.op(x, **self.attrs)
else:
......@@ -74,48 +60,24 @@ class TestTopKOp(IPUOpTest):
K_t = paddle.fluid.layers.fill_constant(
shape=[1], dtype='int32', value=self.k, name="in_2")
topk_values, topk_indices = self.op(x, K_t, **self.attrs)
self.fetch_list = [topk_values.name, topk_indices.name]
fetch_list = [topk_values.name, topk_indices.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
def test_base(self):
value_dict = {}
index_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
value, index = self._test_base(mode)
value_dict[mode] = value
index_dict[mode] = index
self.check(value_dict)
self.check(index_dict)
for k, v in self.output_dict.items():
value_dict[k] = v[0]
index_dict[k] = v[1]
self.check(output_dict=value_dict)
self.check(output_dict=index_dict)
class TestCase2(TestTopKOp):
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {"x": data.astype(np.float32)}
......@@ -47,59 +43,22 @@ class TestBase(IPUOpTest):
def set_op_attrs(self):
self.attrs = {"perm": [0, 2, 3, 1]}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
out = paddle.fluid.layers.transpose(x, **self.attrs)
self.fetch_list = [out.name]
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def test(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict, check_shape=True)
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check(check_shape=True)
class TestCase1(TestBase):
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
data = np.random.uniform(size=[1, 2, 3])
self.feed_fp32 = {"x": data.astype(np.float32)}
......@@ -47,59 +43,22 @@ class TestBase(IPUOpTest):
def set_op_attrs(self):
self.attrs = {"axes": 0}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
out = paddle.fluid.layers.unsqueeze(x, **self.attrs)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict, check_shape=True)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check(check_shape=True)
class TestCase1(TestBase):
......
......@@ -50,52 +50,37 @@ class TestWeightSharing(IPUOpTest):
def set_op_attrs(self):
self.attrs = {}
def _test_base(self, run_ipu=True):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='int64')
name=self.feed_list[0], shape=self.feed_shape[0], dtype='int64')
with paddle.static.ipu_shard_guard(index=0, stage=0):
y = paddle.fluid.layers.embedding(
input=x,
size=[768, 768],
dtype='float32',
param_attr=paddle.fluid.ParamAttr(
name='word_embedding'),
param_attr=paddle.fluid.ParamAttr(name='word_embedding'),
is_sparse=False)
with paddle.static.ipu_shard_guard(index=1, stage=1):
z = paddle.fluid.layers.fc(
input=y,
size=768,
param_attr=paddle.fluid.ParamAttr(name="fc"))
input=y, size=768, param_attr=paddle.fluid.ParamAttr(name="fc"))
with paddle.static.ipu_shard_guard(index=0, stage=2):
out = paddle.fluid.layers.matmul(
x=z,
y=main_prog.global_block().var('word_embedding'),
y=self.main_prog.global_block().var('word_embedding'),
transpose_y=True)
self.feed_list = [x.name]
self.fetch_list = [out.name]
fetch_list = [out.name]
def run_model(self, run_ipu):
self.build_model()
if run_ipu:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
exe.run(self.startup_prog)
if run_ipu:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(
num_ipus=2,
......@@ -104,18 +89,18 @@ class TestWeightSharing(IPUOpTest):
ipu_strategy.set_pipelining_config(
enable_pipelining=True, batches_per_step=3)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
self.main_prog, ipu_strategy=ipu_strategy).compile(
self.feed_list, self.fetch_list)
else:
program = main_prog
program = self.main_prog
feed = self.feed_ipu if run_ipu else self.feed_cpu
result = exe.run(program, feed=feed, fetch_list=fetch_list)
result = exe.run(program, feed=feed, fetch_list=self.fetch_list)
return result[0]
def test_base(self):
res0 = self._test_base(False)
res1 = self._test_base(True)
res0 = self.run_model(False)
res1 = self.run_model(True)
self.assertTrue(
np.allclose(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册