未验证 提交 94acf7c8 编写于 作者: A Allen Guo 提交者: GitHub

update UTs 3 (#42519)

上级 832e58d6
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest): ...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 2, 20]) data = np.random.uniform(size=[1, 3, 2, 20])
self.feed_fp32 = {"in_0": data.astype(np.float32)} self.feed_fp32 = {"in_0": data.astype(np.float32)}
...@@ -47,59 +43,22 @@ class TestBase(IPUOpTest): ...@@ -47,59 +43,22 @@ class TestBase(IPUOpTest):
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {"axis": -1} self.attrs = {"axis": -1}
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED out = paddle.fluid.layers.softmax(x, **self.attrs)
startup_prog.random_seed = self.SEED self.fetch_list = [out.name]
with paddle.static.scope_guard(scope): def run_model(self, exec_mode):
with paddle.static.program_guard(main_prog, startup_prog): self.run_op_test(exec_mode)
x = paddle.static.data(
name=self.feed_list[0], def test(self):
shape=self.feed_shape[0], for m in IPUOpTest.ExecutionMode:
dtype='float32') if not self.skip_mode(m):
self.build_model()
out = paddle.fluid.layers.softmax(x, **self.attrs) self.run_model(m)
self.check()
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
class TestCase1(TestBase): class TestCase1(TestBase):
......
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
import paddle.nn.functional as F
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
def set_data_feed(self):
x = np.random.uniform(size=[3, 7])
label = np.arange(3).reshape([3, 1])
self.feed_fp32 = {
"x": x.astype(np.float32),
"label": label.astype(np.int64)
}
self.feed_fp16 = {
"x": x.astype(np.float16),
"label": label.astype(np.int32)
}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def set_op_attrs(self):
self.attrs = {'soft_label': False, }
@IPUOpTest.static_graph
def build_model(self, on_ipu):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype="float32")
if on_ipu:
label = paddle.static.data(
name=self.feed_list[1], shape=self.feed_shape[1], dtype='int32')
else:
label = paddle.static.data(
name=self.feed_list[1], shape=self.feed_shape[1], dtype='int64')
out = F.softmax_with_cross_entropy(x, label, **self.attrs)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
if self.is_ipu_mode(exec_mode):
self.feed_fp32['label'] = self.feed_fp32['label'].astype(np.int32)
self.run_op_test(exec_mode)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model(self.is_ipu_mode(m))
self.run_model(m)
self.check()
class TestCase1(TestBase):
def set_op_attrs(self):
self.attrs = {
'soft_label': False,
'ignore_index': 1,
}
class TestCase2(TestBase):
def set_data_feed(self):
x = np.random.uniform(size=[30, 70])
label = np.arange(30).reshape([30, 1])
self.feed_fp32 = {
"x": x.astype(np.float32),
"label": label.astype(np.int64)
}
self.feed_fp16 = {
"x": x.astype(np.float16),
"label": label.astype(np.int32)
}
if __name__ == "__main__":
unittest.main()
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -30,13 +30,8 @@ class TestBase(IPUOpTest): ...@@ -30,13 +30,8 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
data1 = np.random.uniform(size=[1, 3, 10, 10]) data1 = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {'x': data1.astype(np.float32)} self.feed_fp32 = {'x': data1.astype(np.float32)}
self.feed_fp16 = {'x': data1.astype(np.float16)} self.feed_fp16 = {'x': data1.astype(np.float16)}
...@@ -47,61 +42,24 @@ class TestBase(IPUOpTest): ...@@ -47,61 +42,24 @@ class TestBase(IPUOpTest):
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {"num_or_sections": [1, 1, 1], "axis": 1} self.attrs = {"num_or_sections": [1, 1, 1], "axis": 1}
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED out = paddle.split(x, **self.attrs)
startup_prog.random_seed = self.SEED self.fetch_list = [fetch.name for fetch in out]
with paddle.static.scope_guard(scope): def run_model(self, exec_mode):
with paddle.static.program_guard(main_prog, startup_prog): self.run_op_test(exec_mode)
x = paddle.static.data(
name=self.feed_list[0], def test(self):
shape=self.feed_shape[0], for m in IPUOpTest.ExecutionMode:
dtype='float32') if not self.skip_mode(m):
self.build_model()
out = paddle.split(x, **self.attrs) self.run_model(m)
for k, v in self.output_dict.items():
fetch_list = [fetch.name for fetch in out] self.output_dict[k] = np.concatenate([vv.flatten() for vv in v])
self.check()
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if (mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled
) or mode == ExecutionMode.IPU_POPART_FP16:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
class TestCase1(TestBase): class TestCase1(TestBase):
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest): ...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 1, 5]) data = np.random.uniform(size=[1, 3, 1, 5])
self.feed_fp32 = {"in_0": data.astype(np.float32)} self.feed_fp32 = {"in_0": data.astype(np.float32)}
...@@ -47,59 +43,22 @@ class TestBase(IPUOpTest): ...@@ -47,59 +43,22 @@ class TestBase(IPUOpTest):
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {"axes": [0]} self.attrs = {"axes": [0]}
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED out = paddle.fluid.layers.squeeze(x, **self.attrs)
startup_prog.random_seed = self.SEED self.fetch_list = [out.name]
with paddle.static.scope_guard(scope): def run_model(self, exec_mode):
with paddle.static.program_guard(main_prog, startup_prog): self.run_op_test(exec_mode)
x = paddle.static.data(
name=self.feed_list[0], def test(self):
shape=self.feed_shape[0], for m in IPUOpTest.ExecutionMode:
dtype='float32') if not self.skip_mode(m):
self.build_model()
out = paddle.fluid.layers.squeeze(x, **self.attrs) self.run_model(m)
self.check()
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode)
self.check(output_dict, check_shape=True)
class TestCase1(TestBase): class TestCase1(TestBase):
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest): ...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
x = np.random.uniform(size=[1, 2]) x = np.random.uniform(size=[1, 2])
y = np.random.uniform(size=[1, 2]) y = np.random.uniform(size=[1, 2])
...@@ -57,67 +53,26 @@ class TestBase(IPUOpTest): ...@@ -57,67 +53,26 @@ class TestBase(IPUOpTest):
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {"axis": 0} self.attrs = {"axis": 0}
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED y = paddle.static.data(
startup_prog.random_seed = self.SEED name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32')
z = paddle.static.data(
with paddle.static.scope_guard(scope): name=self.feed_list[2], shape=self.feed_shape[2], dtype='float32')
with paddle.static.program_guard(main_prog, startup_prog): out = paddle.fluid.layers.stack([x, y, z], **self.attrs)
x = paddle.static.data( self.fetch_list = [out.name]
name=self.feed_list[0],
shape=self.feed_shape[0], def run_model(self, exec_mode):
dtype='float32') self.run_op_test(exec_mode)
y = paddle.static.data(
name=self.feed_list[1], def test(self):
shape=self.feed_shape[1], for m in IPUOpTest.ExecutionMode:
dtype='float32') if not self.skip_mode(m):
z = paddle.static.data( self.build_model()
name=self.feed_list[2], self.run_model(m)
shape=self.feed_shape[2], self.check()
dtype='float32')
out = paddle.fluid.layers.stack([x, y, z], **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode)
self.check(output_dict, check_shape=True)
class TestCase1(TestBase): class TestCase1(TestBase):
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest): ...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
x = np.random.uniform(size=[1, 3, 2, 2]) x = np.random.uniform(size=[1, 3, 2, 2])
y = np.random.uniform(size=[1, 3, 2, 2]) y = np.random.uniform(size=[1, 3, 2, 2])
...@@ -48,134 +44,52 @@ class TestBase(IPUOpTest): ...@@ -48,134 +44,52 @@ class TestBase(IPUOpTest):
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED y = paddle.static.data(
startup_prog.random_seed = self.SEED name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32')
out = paddle.fluid.layers.sum([x, y], **self.attrs)
with paddle.static.scope_guard(scope): self.fetch_list = [out.name]
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
y = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype='float32')
out = paddle.fluid.layers.sum([x, y], **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self): def run_model(self, exec_mode):
output_dict = {} self.run_op_test(exec_mode)
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode)
self.check(output_dict, check_shape=True) def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
@unittest.skip('')
class TestCase1(TestBase): class TestCase1(TestBase):
def set_feed(self): def set_data_feed(self):
x = np.random.uniform(size=[1, 3, 2, 2]) x = np.random.uniform(size=[1, 3, 2, 2])
y = np.random.uniform(size=[1, 3, 2, 2]) y = np.random.uniform(size=[1, 3, 2, 2])
z = np.random.uniform(size=[1, 3, 2, 2]) z = np.random.uniform(size=[1, 3, 2, 2])
self.feed_fp32 = { self.feed_fp32 = {
"x": x.astype(np.float32), "x": x.astype(np.float32),
"y": y.astype(np.float32), "y": y.astype(np.float32),
"z": y.astype(np.float32) "z": z.astype(np.float32)
} }
self.feed_fp16 = { self.feed_fp16 = {
"x": x.astype(np.float16), "x": x.astype(np.float16),
"y": y.astype(np.float16), "y": y.astype(np.float16),
"z": y.astype(np.float16) "z": z.astype(np.float16)
} }
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED y = paddle.static.data(
startup_prog.random_seed = self.SEED name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32')
z = paddle.static.data(
with paddle.static.scope_guard(scope): name=self.feed_list[2], shape=self.feed_shape[2], dtype='float32')
with paddle.static.program_guard(main_prog, startup_prog): out = paddle.fluid.layers.sum([x, y, z], **self.attrs)
x = paddle.static.data( self.fetch_list = [out.name]
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
y = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype='float32')
z = paddle.static.data(
name=self.feed_list[2],
shape=self.feed_shape[2],
dtype='float32')
out = paddle.fluid.layers.sum([x, y, z], **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
iipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
return result[0]
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -31,10 +31,6 @@ class TestTopKOp(IPUOpTest): ...@@ -31,10 +31,6 @@ class TestTopKOp(IPUOpTest):
self.set_test_op() self.set_test_op()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_test_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.topk self.op = paddle.fluid.layers.topk
...@@ -53,69 +49,35 @@ class TestTopKOp(IPUOpTest): ...@@ -53,69 +49,35 @@ class TestTopKOp(IPUOpTest):
if not self.use_k_as_const_variable: if not self.use_k_as_const_variable:
self.attrs["k"] = 3 self.attrs["k"] = 3
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED if not self.use_k_as_const_variable:
startup_prog.random_seed = self.SEED topk_values, topk_indices = self.op(x, **self.attrs)
else:
with paddle.static.scope_guard(scope): # !important, popart cannot accept non const tensor
with paddle.static.program_guard(main_prog, startup_prog): K_t = paddle.fluid.layers.fill_constant(
x = paddle.static.data( shape=[1], dtype='int32', value=self.k, name="in_2")
name=self.feed_list[0], topk_values, topk_indices = self.op(x, K_t, **self.attrs)
shape=self.feed_shape[0], self.fetch_list = [topk_values.name, topk_indices.name]
dtype='float32')
def run_model(self, exec_mode):
if not self.use_k_as_const_variable: self.run_op_test(exec_mode)
topk_values, topk_indices = self.op(x, **self.attrs)
else: def test(self):
# !important, popart cannot accept non const tensor for m in IPUOpTest.ExecutionMode:
K_t = paddle.fluid.layers.fill_constant( if not self.skip_mode(m):
shape=[1], dtype='int32', value=self.k, name="in_2") self.build_model()
topk_values, topk_indices = self.op(x, K_t, **self.attrs) self.run_model(m)
fetch_list = [topk_values.name, topk_indices.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result
def test_base(self):
value_dict = {} value_dict = {}
index_dict = {} index_dict = {}
for mode in ExecutionMode: for k, v in self.output_dict.items():
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: value_dict[k] = v[0]
break index_dict[k] = v[1]
value, index = self._test_base(mode) self.check(output_dict=value_dict)
value_dict[mode] = value self.check(output_dict=index_dict)
index_dict[mode] = index
self.check(value_dict)
self.check(index_dict)
class TestCase2(TestTopKOp): class TestCase2(TestTopKOp):
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest): ...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10]) data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {"x": data.astype(np.float32)} self.feed_fp32 = {"x": data.astype(np.float32)}
...@@ -47,59 +43,22 @@ class TestBase(IPUOpTest): ...@@ -47,59 +43,22 @@ class TestBase(IPUOpTest):
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {"perm": [0, 2, 3, 1]} self.attrs = {"perm": [0, 2, 3, 1]}
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED out = paddle.fluid.layers.transpose(x, **self.attrs)
startup_prog.random_seed = self.SEED self.fetch_list = [out.name]
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
out = paddle.fluid.layers.transpose(x, **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test(self): def run_model(self, exec_mode):
output_dict = {} self.run_op_test(exec_mode)
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict, check_shape=True) def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check(check_shape=True)
class TestCase1(TestBase): class TestCase1(TestBase):
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest): ...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
data = np.random.uniform(size=[1, 2, 3]) data = np.random.uniform(size=[1, 2, 3])
self.feed_fp32 = {"x": data.astype(np.float32)} self.feed_fp32 = {"x": data.astype(np.float32)}
...@@ -47,59 +43,22 @@ class TestBase(IPUOpTest): ...@@ -47,59 +43,22 @@ class TestBase(IPUOpTest):
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {"axes": 0} self.attrs = {"axes": 0}
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED out = paddle.fluid.layers.unsqueeze(x, **self.attrs)
startup_prog.random_seed = self.SEED self.fetch_list = [out.name]
with paddle.static.scope_guard(scope): def run_model(self, exec_mode):
with paddle.static.program_guard(main_prog, startup_prog): self.run_op_test(exec_mode)
x = paddle.static.data(
name=self.feed_list[0], def test(self):
shape=self.feed_shape[0], for m in IPUOpTest.ExecutionMode:
dtype='float32') if not self.skip_mode(m):
self.build_model()
out = paddle.fluid.layers.unsqueeze(x, **self.attrs) self.run_model(m)
self.check(check_shape=True)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict, check_shape=True)
class TestCase1(TestBase): class TestCase1(TestBase):
......
...@@ -50,72 +50,57 @@ class TestWeightSharing(IPUOpTest): ...@@ -50,72 +50,57 @@ class TestWeightSharing(IPUOpTest):
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
def _test_base(self, run_ipu=True): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='int64')
main_prog.random_seed = self.SEED with paddle.static.ipu_shard_guard(index=0, stage=0):
startup_prog.random_seed = self.SEED y = paddle.fluid.layers.embedding(
input=x,
with paddle.static.scope_guard(scope): size=[768, 768],
with paddle.static.program_guard(main_prog, startup_prog): dtype='float32',
x = paddle.static.data( param_attr=paddle.fluid.ParamAttr(name='word_embedding'),
name=self.feed_list[0], is_sparse=False)
shape=self.feed_shape[0], with paddle.static.ipu_shard_guard(index=1, stage=1):
dtype='int64') z = paddle.fluid.layers.fc(
input=y, size=768, param_attr=paddle.fluid.ParamAttr(name="fc"))
with paddle.static.ipu_shard_guard(index=0, stage=0): with paddle.static.ipu_shard_guard(index=0, stage=2):
y = paddle.fluid.layers.embedding( out = paddle.fluid.layers.matmul(
input=x, x=z,
size=[768, 768], y=self.main_prog.global_block().var('word_embedding'),
dtype='float32', transpose_y=True)
param_attr=paddle.fluid.ParamAttr( self.feed_list = [x.name]
name='word_embedding'), self.fetch_list = [out.name]
is_sparse=False)
def run_model(self, run_ipu):
with paddle.static.ipu_shard_guard(index=1, stage=1): self.build_model()
z = paddle.fluid.layers.fc( if run_ipu:
input=y, place = paddle.IPUPlace()
size=768, else:
param_attr=paddle.fluid.ParamAttr(name="fc")) place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
with paddle.static.ipu_shard_guard(index=0, stage=2): exe.run(self.startup_prog)
out = paddle.fluid.layers.matmul( if run_ipu:
x=z, ipu_strategy = paddle.static.IpuStrategy()
y=main_prog.global_block().var('word_embedding'), ipu_strategy.set_graph_config(
transpose_y=True) num_ipus=2,
is_training=self.is_training,
fetch_list = [out.name] enable_manual_shard=True)
ipu_strategy.set_pipelining_config(
if run_ipu: enable_pipelining=True, batches_per_step=3)
place = paddle.IPUPlace() program = paddle.static.IpuCompiledProgram(
else: self.main_prog, ipu_strategy=ipu_strategy).compile(
place = paddle.CPUPlace() self.feed_list, self.fetch_list)
exe = paddle.static.Executor(place) else:
exe.run(startup_prog) program = self.main_prog
if run_ipu: feed = self.feed_ipu if run_ipu else self.feed_cpu
feed_list = self.feed_list result = exe.run(program, feed=feed, fetch_list=self.fetch_list)
ipu_strategy = paddle.static.IpuStrategy() return result[0]
ipu_strategy.set_graph_config(
num_ipus=2,
is_training=self.is_training,
enable_manual_shard=True)
ipu_strategy.set_pipelining_config(
enable_pipelining=True, batches_per_step=3)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_ipu if run_ipu else self.feed_cpu
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self): def test_base(self):
res0 = self._test_base(False) res0 = self.run_model(False)
res1 = self._test_base(True) res1 = self.run_model(True)
self.assertTrue( self.assertTrue(
np.allclose( np.allclose(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册