未验证 提交 001dab0b 编写于 作者: A Allen Guo 提交者: GitHub

update UTs 2 (#42518)

上级 063a3509
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest): ...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10]) data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {"in_0": data.astype(np.float32)} self.feed_fp32 = {"in_0": data.astype(np.float32)}
...@@ -46,59 +42,22 @@ class TestBase(IPUOpTest): ...@@ -46,59 +42,22 @@ class TestBase(IPUOpTest):
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED out = paddle.fluid.layers.mean(x)
startup_prog.random_seed = self.SEED self.fetch_list = [out.name]
with paddle.static.scope_guard(scope): def run_model(self, exec_mode):
with paddle.static.program_guard(main_prog, startup_prog): self.run_op_test(exec_mode)
x = paddle.static.data(
name=self.feed_list[0], def test(self):
shape=self.feed_shape[0], for m in IPUOpTest.ExecutionMode:
dtype='float32') if not self.skip_mode(m):
self.build_model()
out = paddle.fluid.layers.mean(x) self.run_model(m)
self.check()
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -18,7 +18,7 @@ import numpy as np ...@@ -18,7 +18,7 @@ import numpy as np
import paddle import paddle
import paddle.static import paddle.static
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionModeFull from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -28,10 +28,7 @@ class TestBase(IPUOpTest): ...@@ -28,10 +28,7 @@ class TestBase(IPUOpTest):
self.set_atol() self.set_atol()
self.set_data_feed() self.set_data_feed()
self.set_feed_attr() self.set_feed_attr()
self.set_attrs()
@property
def fp16_enabled(self):
return True
def set_atol(self): def set_atol(self):
self.atol = 1e-6 self.atol = 1e-6
...@@ -42,7 +39,6 @@ class TestBase(IPUOpTest): ...@@ -42,7 +39,6 @@ class TestBase(IPUOpTest):
def set_data_feed(self): def set_data_feed(self):
data = np.random.uniform(size=[1, 10, 27, 27]) data = np.random.uniform(size=[1, 10, 27, 27])
self.feed_fp32 = {"in_0": data.astype(np.float32)} self.feed_fp32 = {"in_0": data.astype(np.float32)}
self.feed_fp16 = {"in_0": data.astype(np.float16)}
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
...@@ -54,86 +50,126 @@ class TestBase(IPUOpTest): ...@@ -54,86 +50,126 @@ class TestBase(IPUOpTest):
for var_name in to_fp16_var_names: for var_name in to_fp16_var_names:
assert (block.var(var_name).dtype, paddle.float16) assert (block.var(var_name).dtype, paddle.float16)
def _test_base(self, exec_mode): def set_attrs(self):
generator = paddle.fluid.unique_name.UniqueNameGenerator() self.num_ipus = 1
scope = paddle.static.Scope() self.enable_pipelining = False
main_prog = paddle.static.Program() self.enable_manual_shard = False
startup_prog = paddle.static.Program() self.batches_per_step = 1
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED @IPUOpTest.static_graph
def build_model(self):
with paddle.fluid.unique_name.guard(generator): x = paddle.static.data(
with paddle.static.scope_guard(scope): name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( # using fp32
name=self.feed_list[0], x = paddle.static.nn.conv2d(input=x, num_filters=3, filter_size=3)
shape=self.feed_shape[0], x = paddle.static.nn.batch_norm(x, act='relu')
dtype='float32') x = F.max_pool2d(x, kernel_size=2, stride=2)
# using fp32 # using fp16
x = paddle.static.nn.conv2d( with paddle.static.amp.fp16_guard():
input=x, num_filters=3, filter_size=3) x = paddle.static.nn.conv2d(input=x, num_filters=6, filter_size=3)
x = paddle.static.nn.batch_norm(x, act='relu') x = paddle.static.nn.batch_norm(x, act='relu')
x = F.max_pool2d(x, kernel_size=2, stride=2) x = F.max_pool2d(x, kernel_size=2, stride=2)
# using fp16 # using fp32
with paddle.static.amp.fp16_guard(): x = paddle.static.nn.fc(x, size=10)
x = paddle.static.nn.conv2d( loss = paddle.mean(x)
input=x, num_filters=6, filter_size=3) self.fetch_list = [loss.name]
x = paddle.static.nn.batch_norm(x, act='relu')
x = F.max_pool2d(x, kernel_size=2, stride=2) def run_model(self, exec_mode):
# cast model to fp16
# using fp32 if self.is_fp16_mode(exec_mode):
x = paddle.static.nn.fc(x, size=10) amp_list = paddle.static.amp.CustomOpLists()
loss = paddle.mean(x) amp_list.unsupported_list = {}
fetch_list = [loss.name] to_fp16_var_names = paddle.static.amp.cast_model_to_fp16(
self.main_prog, amp_list, use_fp16_guard=True)
if exec_mode == ExecutionModeFull.CPU_FP32: self.dtype_check(self.main_prog, to_fp16_var_names)
place = paddle.CPUPlace()
else: if self.is_ipu_mode(exec_mode):
place = paddle.IPUPlace() place = paddle.CPUPlace()
else:
# cast model to fp16 place = paddle.IPUPlace()
if exec_mode == ExecutionModeFull.IPU_MIXED_PRECISION: exe = paddle.static.Executor(place)
to_fp16_var_names = paddle.static.amp.cast_model_to_fp16( exe.run(self.startup_prog)
main_prog, self.amp_list)
self.dtype_check(main_prog, to_fp16_var_names) # cast parameters to fp16
if exec_mode == IPUOpTest.ExecutionMode.IPU_FP16:
exe = paddle.static.Executor(place) paddle.static.amp.cast_parameters_to_fp16(
exe.run(startup_prog) paddle.CPUPlace(),
self.main_prog,
# cast parameters to fp16 to_fp16_var_names=to_fp16_var_names)
if exec_mode == ExecutionModeFull.IPU_MIXED_PRECISION:
paddle.static.amp.cast_parameters_to_fp16( if self.is_ipu_mode(exec_mode):
paddle.CPUPlace(), ipu_strategy = paddle.static.IpuStrategy()
main_prog, ipu_strategy.set_graph_config(
to_fp16_var_names=to_fp16_var_names) is_training=False,
num_ipus=self.num_ipus,
if exec_mode != ExecutionModeFull.CPU_FP32: enable_manual_shard=self.enable_manual_shard)
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy.set_pipelining_config(
ipu_strategy.set_graph_config(is_training=False) enable_pipelining=self.enable_pipelining,
if exec_mode == ExecutionModeFull.IPU_POPART_FP16: batches_per_step=self.batches_per_step)
ipu_strategy.set_precision_config(enable_fp16=True) program = paddle.static.IpuCompiledProgram(
program = paddle.static.IpuCompiledProgram( self.main_prog, ipu_strategy=ipu_strategy).compile(
main_prog, ipu_strategy=ipu_strategy).compile( self.feed_list, self.fetch_list)
self.feed_list, fetch_list) else:
else: program = self.main_prog
program = main_prog
result = exe.run(program,
feed = self.feed_fp32 feed=self.feed_fp32,
result = exe.run(program, feed=feed, fetch_list=fetch_list) fetch_list=self.fetch_list)
return result[0] self.output_dict[exec_mode] = result[0]
def test(self):
for m in IPUOpTest.ExecutionMode:
self.build_model()
self.run_model(m)
self.check()
class TestPipline(TestBase):
@IPUOpTest.static_graph
def build_model(self, exec_mode):
feed_shape = list(self.feed_shape[0])
if self.is_ipu_mode(exec_mode):
feed_shape[0] = 1
x = paddle.static.data(
name=self.feed_list[0], shape=feed_shape, dtype='float32')
with paddle.static.ipu_shard_guard(index=0, stage=0):
# using fp32
x = paddle.static.nn.conv2d(input=x, num_filters=3, filter_size=3)
x = paddle.static.nn.batch_norm(x, act='relu')
x = F.max_pool2d(x, kernel_size=2, stride=2)
with paddle.static.ipu_shard_guard(index=1, stage=1):
# using fp16
with paddle.static.amp.fp16_guard():
x = paddle.static.nn.conv2d(
input=x, num_filters=6, filter_size=3)
x = paddle.static.nn.batch_norm(x, act='relu')
x = F.max_pool2d(x, kernel_size=2, stride=2)
with paddle.static.ipu_shard_guard(index=2, stage=2):
# using fp32
x = paddle.static.nn.fc(x, size=10)
loss = paddle.mean(x)
self.fetch_list = [loss.name]
def set_data_feed(self):
data = np.random.uniform(size=[3, 10, 27, 27])
self.feed_fp32 = {"in_0": data.astype(np.float32)}
def set_attrs(self):
self.num_ipus = 3
self.enable_pipelining = True
self.enable_manual_shard = True
self.batches_per_step = 3
def test(self): def test(self):
output_dict = {} for m in IPUOpTest.ExecutionMode:
for mode in ExecutionModeFull: self.build_model(m)
if mode == ExecutionModeFull.IPU_POPART_FP16: self.run_model(m)
continue # skip check results
if mode > ExecutionModeFull.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -18,7 +18,7 @@ import numpy as np ...@@ -18,7 +18,7 @@ import numpy as np
import paddle import paddle
import paddle.static import paddle.static
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionModeFull from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -29,10 +29,7 @@ class TestBase(IPUOpTest): ...@@ -29,10 +29,7 @@ class TestBase(IPUOpTest):
self.set_training() self.set_training()
self.set_data_feed() self.set_data_feed()
self.set_feed_attr() self.set_feed_attr()
self.set_attrs()
@property
def fp16_enabled(self):
return True
def set_atol(self): def set_atol(self):
self.atol = 2e-6 self.atol = 2e-6
...@@ -47,104 +44,149 @@ class TestBase(IPUOpTest): ...@@ -47,104 +44,149 @@ class TestBase(IPUOpTest):
def set_data_feed(self): def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 28, 28]) data = np.random.uniform(size=[1, 3, 28, 28])
self.feed_fp32 = {"in_0": data.astype(np.float32)} self.feed_fp32 = {"in_0": data.astype(np.float32)}
self.feed_fp16 = {"in_0": data.astype(np.float16)}
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys()) self.feed_list = list(self.feed_fp32.keys())
def set_attrs(self):
self.num_ipus = 1
self.enable_pipelining = False
self.enable_manual_shard = False
self.batches_per_step = 1
def dtype_check(self, program, to_fp16_var_names): def dtype_check(self, program, to_fp16_var_names):
block = program.global_block() block = program.global_block()
assert len(to_fp16_var_names) > 0 assert len(to_fp16_var_names) > 0
for var_name in to_fp16_var_names: for var_name in to_fp16_var_names:
assert (block.var(var_name).dtype, paddle.float16) assert (block.var(var_name).dtype, paddle.float16)
def _test_base(self, exec_mode): @IPUOpTest.static_graph
generator = paddle.fluid.unique_name.UniqueNameGenerator() def build_model(self):
scope = paddle.static.Scope() x = paddle.static.data(
main_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED # using fp32
startup_prog.random_seed = self.SEED x = paddle.static.nn.conv2d(input=x, num_filters=3, filter_size=3)
x = paddle.static.nn.batch_norm(x, act='relu')
with paddle.fluid.unique_name.guard(generator): x = F.max_pool2d(x, kernel_size=2, stride=2)
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): # using fp16
x = paddle.static.data( with paddle.static.amp.fp16_guard():
name=self.feed_list[0], x = paddle.static.nn.conv2d(input=x, num_filters=6, filter_size=3)
shape=self.feed_shape[0], x = paddle.static.nn.batch_norm(x, act='relu')
dtype='float32') x = F.max_pool2d(x, kernel_size=2, stride=2)
# using fp32 # using fp32
x = paddle.static.nn.conv2d( x = paddle.static.nn.fc(x, size=10)
input=x, num_filters=3, filter_size=3) loss = paddle.mean(x)
x = paddle.static.nn.batch_norm(x, act='relu')
x = F.max_pool2d(x, kernel_size=2, stride=2) # optimizer
optimizer = paddle.optimizer.Adam(learning_rate=1e-2)
# using fp16 optimizer.minimize(loss, self.startup_prog)
with paddle.static.amp.fp16_guard(): self.fetch_list = [loss.name]
x = paddle.static.nn.conv2d(
input=x, num_filters=6, filter_size=3) def run_model(self, exec_mode):
x = paddle.static.nn.batch_norm(x, act='relu') # cast model to fp16
x = F.max_pool2d(x, kernel_size=2, stride=2) if self.is_fp16_mode(exec_mode):
amp_list = paddle.static.amp.CustomOpLists()
# using fp32 amp_list.unsupported_list = {}
x = paddle.static.nn.fc(x, size=10) to_fp16_var_names = paddle.static.amp.cast_model_to_fp16(
loss = paddle.mean(x) self.main_prog, amp_list)
self.dtype_check(self.main_prog, to_fp16_var_names)
# optimizer
optimizer = paddle.optimizer.Adam(learning_rate=1e-2) if self.is_ipu_mode(exec_mode):
optimizer.minimize(loss, startup_prog) place = paddle.CPUPlace()
fetch_list = [loss.name] else:
place = paddle.IPUPlace()
# cast model to fp16 exe = paddle.static.Executor(place)
if exec_mode == ExecutionModeFull.IPU_MIXED_PRECISION: exe.run(self.startup_prog)
to_fp16_var_names = paddle.static.amp.cast_model_to_fp16(
main_prog, self.amp_list) # cast parameters to fp16
self.dtype_check(main_prog, to_fp16_var_names) if self.is_fp16_mode(exec_mode):
paddle.static.amp.cast_parameters_to_fp16(
if exec_mode == ExecutionModeFull.CPU_FP32: paddle.CPUPlace(),
place = paddle.CPUPlace() self.main_prog,
else: to_fp16_var_names=to_fp16_var_names)
place = paddle.IPUPlace()
exe = paddle.static.Executor(place) if self.is_ipu_mode(exec_mode):
exe.run(startup_prog) ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(
# cast parameters to fp16 is_training=self.is_training,
if exec_mode == ExecutionModeFull.IPU_MIXED_PRECISION: num_ipus=self.num_ipus,
paddle.static.amp.cast_parameters_to_fp16( enable_manual_shard=self.enable_manual_shard)
paddle.CPUPlace(), ipu_strategy.set_pipelining_config(
main_prog, enable_pipelining=self.enable_pipelining,
to_fp16_var_names=to_fp16_var_names) batches_per_step=self.batches_per_step)
program = paddle.static.IpuCompiledProgram(
if exec_mode != ExecutionModeFull.CPU_FP32: self.main_prog, ipu_strategy=ipu_strategy).compile(
ipu_strategy = paddle.static.IpuStrategy() self.feed_list, self.fetch_list)
ipu_strategy.set_graph_config(is_training=self.is_training) else:
if exec_mode == ExecutionModeFull.IPU_POPART_FP16: program = self.main_prog
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram( result = []
main_prog, ipu_strategy=ipu_strategy).compile( for _ in range(self.epoch):
self.feed_list, fetch_list) out = exe.run(program,
else: feed=self.feed_fp32,
program = main_prog fetch_list=self.fetch_list)
result.append(out)
feed = self.feed_fp32 self.output_dict[exec_mode] = result
result = []
for i in range(self.epoch): def test(self):
out = exe.run(program, feed=feed, fetch_list=fetch_list) for m in IPUOpTest.ExecutionMode:
result.append(out) self.build_model()
return np.array(result) self.run_model(m)
self.check()
def test_base(self):
output_dict = {}
for mode in ExecutionModeFull: class TestPipline(TestBase):
if mode == ExecutionModeFull.IPU_POPART_FP16: @IPUOpTest.static_graph
continue def build_model(self, exec_mode):
if mode > ExecutionModeFull.IPU_FP32 and not self.fp16_enabled: feed_shape = list(self.feed_shape[0])
break if self.is_ipu_mode(exec_mode):
output_dict[mode] = self._test_base(mode).flatten() feed_shape[0] = 1
x = paddle.static.data(
self.check(output_dict) name=self.feed_list[0], shape=feed_shape, dtype='float32')
with paddle.static.ipu_shard_guard(index=0, stage=0):
# using fp32
x = paddle.static.nn.conv2d(input=x, num_filters=3, filter_size=3)
x = paddle.static.nn.batch_norm(x, act='relu')
x = F.max_pool2d(x, kernel_size=2, stride=2)
with paddle.static.ipu_shard_guard(index=1, stage=1):
# using fp16
with paddle.static.amp.fp16_guard():
x = paddle.static.nn.conv2d(
input=x, num_filters=6, filter_size=3)
x = paddle.static.nn.batch_norm(x, act='relu')
x = F.max_pool2d(x, kernel_size=2, stride=2)
with paddle.static.ipu_shard_guard(index=2, stage=2):
# using fp32
x = paddle.static.nn.fc(x, size=10)
loss = paddle.mean(x)
# optimizer
optimizer = paddle.optimizer.Adam(learning_rate=1e-2)
optimizer.minimize(loss, self.startup_prog)
self.fetch_list = [loss.name]
def set_data_feed(self):
data = np.random.uniform(size=[5, 10, 27, 27])
self.feed_fp32 = {"in_0": data.astype(np.float32)}
def set_attrs(self):
self.num_ipus = 3
self.enable_pipelining = True
self.enable_manual_shard = True
self.batches_per_step = 5
def test(self):
for m in IPUOpTest.ExecutionMode:
self.build_model(m)
self.run_model(m)
# skip check results
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest): ...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
x = np.random.uniform(size=[2, 5]) x = np.random.uniform(size=[2, 5])
y = np.random.uniform(size=[5, 3]) y = np.random.uniform(size=[5, 3])
...@@ -51,63 +47,24 @@ class TestBase(IPUOpTest): ...@@ -51,63 +47,24 @@ class TestBase(IPUOpTest):
"y_num_col_dims": 1, "y_num_col_dims": 1,
} }
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED y = paddle.static.data(
startup_prog.random_seed = self.SEED name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32')
out = paddle.fluid.layers.mul(x, y, **self.attrs)
with paddle.static.scope_guard(scope): self.fetch_list = [out.name]
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( def run_model(self, exec_mode):
name=self.feed_list[0], self.run_op_test(exec_mode)
shape=self.feed_shape[0],
dtype='float32') def test(self):
y = paddle.static.data( for m in IPUOpTest.ExecutionMode:
name=self.feed_list[1], if not self.skip_mode(m):
shape=self.feed_shape[1], self.build_model()
dtype='float32') self.run_model(m)
self.check()
out = paddle.fluid.layers.mul(x, y, **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
class TestCase1(TestBase): class TestCase1(TestBase):
......
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
def set_data_feed(self):
x = np.ones([1, 10])
y = np.zeros([1, 10])
self.feed_fp32 = {
"x": x.astype(np.float32),
"y": y.astype(np.float32),
}
self.feed_fp16 = {
"x": x.astype(np.float16),
"y": y.astype(np.float16),
}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def set_op_attrs(self):
self.attrs = {}
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
y = paddle.static.data(
name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32')
out = paddle.fluid.layers.not_equal(x, y, **self.attrs)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase):
def set_data_feed(self):
x = np.ones([1, 10])
y = np.ones([1, 10])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
class TestCase2(TestBase):
def set_data_feed(self):
x = np.ones([1, 10])
y = np.arange(0, 10).reshape([1, 10])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestScalar(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
def set_data_feed(self):
x = np.ones([1, 10])
y = 0.5
self.feed_fp32 = {"x": x.astype(np.float32), }
self.feed_fp16 = {"x": x.astype(np.float16), }
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def set_op_attrs(self):
self.attrs = {}
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
out = (x != 0.5)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
if __name__ == "__main__":
unittest.main()
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -30,74 +30,34 @@ class TestBase(IPUOpTest): ...@@ -30,74 +30,34 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
data1 = np.array([[1], [1], [3], [0]]) data1 = np.array([[1], [1], [3], [0]])
self.feed_fp32 = {'x': data1.astype(np.int32)}
self.feed = {'x': data1.astype(np.int32)} self.feed_fp16 = {'x': data1.astype(np.int32)}
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed.keys()) self.feed_list = list(self.feed_fp32.keys())
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {"depth": 4, "allow_out_of_range": False} self.attrs = {"depth": 4, "allow_out_of_range": False}
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='int32')
main_prog.random_seed = self.SEED out = paddle.fluid.layers.one_hot(x, **self.attrs)
startup_prog.random_seed = self.SEED self.fetch_list = [out.name]
with paddle.static.scope_guard(scope): def run_model(self, exec_mode):
with paddle.static.program_guard(main_prog, startup_prog): self.run_op_test(exec_mode)
x = paddle.static.data(
name=self.feed_list[0], def test(self):
shape=self.feed_shape[0], for m in IPUOpTest.ExecutionMode:
dtype='int32') if not self.skip_mode(m):
self.build_model()
out = paddle.fluid.layers.one_hot(x, **self.attrs) self.run_model(m)
self.check()
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if (mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled):
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
@unittest.skip('does not support allow_out_of_range=True') @unittest.skip('does not support allow_out_of_range=True')
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -30,74 +30,34 @@ class TestBase(IPUOpTest): ...@@ -30,74 +30,34 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
data1 = np.array([[1], [1], [3], [0]]) data1 = np.array([[1], [1], [3], [0]])
self.feed_fp32 = {'x': data1.astype(np.int32)}
self.feed = {'x': data1.astype(np.int32)} self.feed_fp16 = {'x': data1.astype(np.int32)}
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed.keys()) self.feed_list = list(self.feed_fp32.keys())
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {"depth": 4, "allow_out_of_range": False} self.attrs = {"depth": 4, "allow_out_of_range": False}
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='int32')
main_prog.random_seed = self.SEED out = paddle.fluid.input.one_hot(x, **self.attrs)
startup_prog.random_seed = self.SEED self.fetch_list = [out.name]
with paddle.static.scope_guard(scope): def run_model(self, exec_mode):
with paddle.static.program_guard(main_prog, startup_prog): self.run_op_test(exec_mode)
x = paddle.static.data(
name=self.feed_list[0], def test(self):
shape=self.feed_shape[0], for m in IPUOpTest.ExecutionMode:
dtype='int32') if not self.skip_mode(m):
self.build_model()
out = paddle.fluid.input.one_hot(x, **self.attrs) self.run_model(m)
self.check()
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if (mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled):
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
@unittest.skip('does not support allow_out_of_range=True') @unittest.skip('does not support allow_out_of_range=True')
......
...@@ -12,8 +12,6 @@ ...@@ -12,8 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import print_function
import numpy as np import numpy as np
import unittest import unittest
import paddle import paddle
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest): ...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10]) data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {'in_0': data.astype(np.float32)} self.feed_fp32 = {'in_0': data.astype(np.float32)}
...@@ -56,59 +52,22 @@ class TestBase(IPUOpTest): ...@@ -56,59 +52,22 @@ class TestBase(IPUOpTest):
"data_format": 'NCHW', "data_format": 'NCHW',
} }
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED out = paddle.fluid.layers.pool2d(x, **self.attrs)
startup_prog.random_seed = self.SEED self.fetch_list = [out.name]
with paddle.static.scope_guard(scope): def run_model(self, exec_mode):
with paddle.static.program_guard(main_prog, startup_prog): self.run_op_test(exec_mode)
x = paddle.static.data(
name=self.feed_list[0], def test(self):
shape=self.feed_shape[0], for m in IPUOpTest.ExecutionMode:
dtype='float32') if not self.skip_mode(m):
self.build_model()
out = paddle.fluid.layers.pool2d(x, **self.attrs) self.run_model(m)
self.check()
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
class TestCase1(TestBase): class TestCase1(TestBase):
...@@ -180,5 +139,21 @@ class TestCase6(TestBase): ...@@ -180,5 +139,21 @@ class TestCase6(TestBase):
self.attrs['exclusive'] = False self.attrs['exclusive'] = False
class TestAdaptive(TestBase):
def set_op_attrs(self):
self.attrs = {
"pool_size": 1,
"pool_type": 'avg',
"require_index": False
}
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
out = paddle.fluid.layers.adaptive_pool2d(x, **self.attrs)
self.fetch_list = [out.name]
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest): ...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10]) data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {'in_0': data.astype(np.float32)} self.feed_fp32 = {'in_0': data.astype(np.float32)}
...@@ -56,59 +52,22 @@ class TestBase(IPUOpTest): ...@@ -56,59 +52,22 @@ class TestBase(IPUOpTest):
"data_format": 'NCHW', "data_format": 'NCHW',
} }
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED out = paddle.fluid.layers.pool2d(x, **self.attrs)
startup_prog.random_seed = self.SEED self.fetch_list = [out.name]
with paddle.static.scope_guard(scope): def run_model(self, exec_mode):
with paddle.static.program_guard(main_prog, startup_prog): self.run_op_test(exec_mode)
x = paddle.static.data(
name=self.feed_list[0], def test(self):
shape=self.feed_shape[0], for m in IPUOpTest.ExecutionMode:
dtype='float32') if not self.skip_mode(m):
self.build_model()
out = paddle.fluid.layers.pool2d(x, **self.attrs) self.run_model(m)
self.check()
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
class TestCase1(TestBase): class TestCase1(TestBase):
...@@ -179,5 +138,21 @@ class TestCase6(TestBase): ...@@ -179,5 +138,21 @@ class TestCase6(TestBase):
self.attrs['exclusive'] = False self.attrs['exclusive'] = False
class TestAdaptive(TestBase):
def set_op_attrs(self):
self.attrs = {
"pool_size": 1,
"pool_type": 'max',
"require_index": False
}
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
out = paddle.fluid.layers.adaptive_pool2d(x, **self.attrs)
self.fetch_list = [out.name]
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest): ...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 2, 2]) data = np.random.uniform(size=[1, 3, 2, 2])
self.feed_fp32 = {"x": data.astype(np.float32)} self.feed_fp32 = {"x": data.astype(np.float32)}
...@@ -47,59 +43,22 @@ class TestBase(IPUOpTest): ...@@ -47,59 +43,22 @@ class TestBase(IPUOpTest):
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {"factor": 2.0} self.attrs = {"factor": 2.0}
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED out = paddle.fluid.layers.pow(x, **self.attrs)
startup_prog.random_seed = self.SEED self.fetch_list = [out.name]
with paddle.static.scope_guard(scope): def run_model(self, exec_mode):
with paddle.static.program_guard(main_prog, startup_prog): self.run_op_test(exec_mode)
x = paddle.static.data(
name=self.feed_list[0], def test(self):
shape=self.feed_shape[0], for m in IPUOpTest.ExecutionMode:
dtype='float32') if not self.skip_mode(m):
self.build_model()
out = paddle.fluid.layers.pow(x, **self.attrs) self.run_model(m)
self.check()
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
class TestCase1(TestBase): class TestCase1(TestBase):
...@@ -119,54 +78,14 @@ class TestCase1(TestBase): ...@@ -119,54 +78,14 @@ class TestCase1(TestBase):
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED factor = paddle.static.data(
startup_prog.random_seed = self.SEED name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32')
out = paddle.fluid.layers.pow(x, factor=factor, **self.attrs)
with paddle.static.scope_guard(scope): self.fetch_list = [out.name]
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
factor = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype='float32')
out = paddle.fluid.layers.pow(x, factor=factor, **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -30,82 +30,48 @@ class TestBase(IPUOpTest): ...@@ -30,82 +30,48 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return False
def set_data_feed(self): def set_data_feed(self):
self.feed = { data = np.random.uniform(size=[1, 3, 3, 3]).astype('float32')
"x": np.random.uniform(size=[1, 3, 3, 3]).astype('float32'), self.feed_fp32 = {"x": data.astype(np.float32)}
} self.feed_fp16 = {"x": data.astype(np.float16)}
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed.keys()) self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed.values()] self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
def _test_base(self, run_ipu=True): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0],
main_prog.random_seed = self.SEED shape=self.feed_shape[0],
startup_prog.random_seed = self.SEED dtype=self.feed_dtype[0])
out = paddle.fluid.layers.conv2d(x, num_filters=3, filter_size=3)
with paddle.static.scope_guard(scope): out = paddle.fluid.layers.Print(out, **self.attrs)
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( if self.is_training:
name=self.feed_list[0], loss = paddle.mean(out)
shape=self.feed_shape[0], adam = paddle.optimizer.Adam(learning_rate=1e-2)
dtype=self.feed_dtype[0]) adam.minimize(loss)
out = paddle.fluid.layers.conv2d( self.fetch_list = [loss.name]
x, num_filters=3, filter_size=3) else:
out = paddle.fluid.layers.Print(out, **self.attrs) self.fetch_list = [out.name]
if self.is_training: def run_model(self, exec_mode):
loss = paddle.mean(out) self.run_op_test(exec_mode)
adam = paddle.optimizer.Adam(learning_rate=1e-2)
adam.minimize(loss)
fetch_list = [loss.name]
else:
fetch_list = [out.name]
if run_ipu:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if run_ipu:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
if self.is_training:
result = []
for _ in range(self.epoch):
loss_res = exe.run(program,
feed=self.feed,
fetch_list=fetch_list)
result.append(loss_res[0])
return np.array(result)
else:
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
return result[0]
def test(self): def test(self):
res0 = self._test_base(False) for m in IPUOpTest.ExecutionMode:
res1 = self._test_base(True) if not self.skip_mode(m):
self.build_model()
self.assertTrue( self.run_model(m)
np.allclose(
res0.flatten(), res1.flatten(), atol=self.atol))
self.assertTrue(res0.shape == res1.shape)
class TestCase1(TestBase): class TestCase1(TestBase):
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -28,10 +28,6 @@ class TestMean(IPUOpTest): ...@@ -28,10 +28,6 @@ class TestMean(IPUOpTest):
self.set_training() self.set_training()
self.set_test_op() self.set_test_op()
@property
def fp16_enabled(self):
return True
def set_test_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.reduce_mean self.op = paddle.fluid.layers.reduce_mean
...@@ -40,59 +36,22 @@ class TestMean(IPUOpTest): ...@@ -40,59 +36,22 @@ class TestMean(IPUOpTest):
self.feed_list = list(self.feed_fp32.keys()) self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed_fp32.values()] self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED out = self.op(x, **self.attrs)
startup_prog.random_seed = self.SEED self.fetch_list = [out.name]
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
out = self.op(x, **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def run_test_base(self): def run_model(self, exec_mode):
output_dict = {} self.run_op_test(exec_mode)
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict) def run_test_base(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
def set_data_feed0(self): def set_data_feed0(self):
data = np.random.uniform(size=[2, 4]) data = np.random.uniform(size=[2, 4])
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest): ...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10]) data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {"x": data.astype(np.float32)} self.feed_fp32 = {"x": data.astype(np.float32)}
...@@ -50,60 +46,23 @@ class TestBase(IPUOpTest): ...@@ -50,60 +46,23 @@ class TestBase(IPUOpTest):
"inplace": True, "inplace": True,
} }
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED add = paddle.fluid.layers.elementwise_add(x, x)
startup_prog.random_seed = self.SEED out = paddle.fluid.layers.reshape(add, **self.attrs)
self.fetch_list = [out.name]
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog): def run_model(self, exec_mode):
x = paddle.static.data( self.run_op_test(exec_mode)
name=self.feed_list[0],
shape=self.feed_shape[0], def test(self):
dtype='float32') for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
add = paddle.fluid.layers.elementwise_add(x, x) self.build_model()
out = paddle.fluid.layers.reshape(add, **self.attrs) self.run_model(m)
self.check()
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode)
self.check(output_dict, check_shape=True)
class TestCase1(TestBase): class TestCase1(TestBase):
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest): ...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
data = np.random.uniform(size=[2, 4, 6]) data = np.random.uniform(size=[2, 4, 6])
self.feed_fp32 = {"in_0": data.astype(np.float32)} self.feed_fp32 = {"in_0": data.astype(np.float32)}
...@@ -48,59 +44,22 @@ class TestBase(IPUOpTest): ...@@ -48,59 +44,22 @@ class TestBase(IPUOpTest):
self.attrs['shape'] = [6, 8] self.attrs['shape'] = [6, 8]
self.attrs['inplace'] = False self.attrs['inplace'] = False
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED out = paddle.fluid.layers.reshape(x=x, **self.attrs)
startup_prog.random_seed = self.SEED self.fetch_list = [out.name]
with paddle.static.scope_guard(scope): def run_model(self, exec_mode):
with paddle.static.program_guard(main_prog, startup_prog): self.run_op_test(exec_mode)
x = paddle.static.data(
name=self.feed_list[0], def test(self):
shape=self.feed_shape[0], for m in IPUOpTest.ExecutionMode:
dtype='float32') if not self.skip_mode(m):
self.build_model()
out = paddle.fluid.layers.reshape(x=x, **self.attrs) self.run_model(m)
self.check()
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode)
self.check(output_dict, check_shape=True)
class TestCase1(TestBase): class TestCase1(TestBase):
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -51,59 +51,22 @@ class TestBase(IPUOpTest): ...@@ -51,59 +51,22 @@ class TestBase(IPUOpTest):
"bias_after_scale": True, "bias_after_scale": True,
} }
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED out = paddle.fluid.layers.scale(x, **self.attrs)
startup_prog.random_seed = self.SEED self.fetch_list = [out.name]
with paddle.static.scope_guard(scope): def run_model(self, exec_mode):
with paddle.static.program_guard(main_prog, startup_prog): self.run_op_test(exec_mode)
x = paddle.static.data(
name=self.feed_list[0], def test(self):
shape=self.feed_shape[0], for m in IPUOpTest.ExecutionMode:
dtype='float32') if not self.skip_mode(m):
self.build_model()
out = paddle.fluid.layers.scale(x, **self.attrs) self.run_model(m)
self.check()
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
class TestCase1(TestBase): class TestCase1(TestBase):
...@@ -155,54 +118,14 @@ class TestCase5(TestBase): ...@@ -155,54 +118,14 @@ class TestCase5(TestBase):
"bias_after_scale": True, "bias_after_scale": True,
} }
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED y = paddle.static.data(
startup_prog.random_seed = self.SEED name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32')
out = paddle.fluid.layers.scale(x, scale=y, **self.attrs)
with paddle.static.scope_guard(scope): self.fetch_list = [out.name]
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
y = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype='float32')
out = paddle.fluid.layers.scale(x, scale=y, **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
if __name__ == "__main__": if __name__ == "__main__":
......
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
self.set_attrs()
def set_training(self):
self.is_training = True
self.epoch = 100
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10]).astype('float32')
self.feed_fp32 = {"image": data.astype(np.float32)}
self.feed_fp16 = {"image": data.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def set_attrs(self):
self.attrs = {
"optimizer": 'lamb',
"weight_decay": 0.0,
"scaled_optimizer_state": True
}
@IPUOpTest.static_graph
def build_model(self):
image = paddle.static.data(
name='image', shape=[1, 3, 10, 10], dtype='float32')
conv1 = paddle.static.nn.conv2d(
image, num_filters=3, filter_size=3, bias_attr=False)
loss = paddle.mean(conv1)
weight_decay = self.attrs['weight_decay']
opt = paddle.optimizer.Adam(
learning_rate=1e-1, weight_decay=weight_decay)
if self.attrs['optimizer'] == 'lamb':
opt = paddle.optimizer.Lamb(
learning_rate=1e-1, lamb_weight_decay=weight_decay)
opt.minimize(loss)
self.feed_list = [image.name]
self.fetch_list = [loss.name]
def run_model(self, exec_mode):
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if self.is_ipu_mode(exec_mode):
if "use_no_bias_optimizer" in self.attrs.keys():
ipu_strategy.set_options({
"use_no_bias_optimizer": self.attrs["use_no_bias_optimizer"]
})
if "scaled_optimizer_state" in self.attrs.keys():
ipu_strategy.set_options({
"scaled_optimizer_state":
self.attrs["scaled_optimizer_state"]
})
self.run_op_test(exec_mode, ipu_strategy=ipu_strategy)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestScaledAdam(TestBase):
def set_attrs(self):
self.attrs = {
"optimizer": 'adam',
"weight_decay": 0.0,
"scaled_optimizer_state": True
}
def set_atol(self):
super().set_atol()
self.atol = 1e-5
self.rtol = 1e-5
@unittest.skip('cpu do not support AdamNoBias')
class TestScaledAdamNoBias(TestBase):
def set_attrs(self):
self.attrs = {
"optimizer": 'adam',
"weight_decay": 0.0,
"use_no_bias_optimizer": True,
"scaled_optimizer_state": True
}
@unittest.skip('cpu do not support LambNoBias')
class TestScaledLambNoBias(TestBase):
def set_attrs(self):
self.attrs = {
"optimizer": 'lamb',
"weight_decay": 0.0,
"use_no_bias_optimizer": True,
"scaled_optimizer_state": True
}
if __name__ == "__main__":
unittest.main()
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest): ...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_atol(self): def set_atol(self):
self.atol = 3e-6 self.atol = 3e-6
self.rtol = 1e-5 self.rtol = 1e-5
...@@ -52,67 +48,32 @@ class TestBase(IPUOpTest): ...@@ -52,67 +48,32 @@ class TestBase(IPUOpTest):
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED conv1 = paddle.static.nn.conv2d(
startup_prog.random_seed = self.SEED x, num_filters=3, filter_size=3, bias_attr=False)
conv2 = paddle.static.nn.conv2d(
with paddle.static.scope_guard(scope): conv1, num_filters=3, filter_size=3, bias_attr=False)
with paddle.static.program_guard(main_prog, startup_prog): conv3 = paddle.static.nn.conv2d(
x = paddle.static.data( conv2, num_filters=3, filter_size=3, bias_attr=False)
name=self.feed_list[0], conv4 = paddle.static.nn.conv2d(
shape=self.feed_shape[0], conv3, num_filters=3, filter_size=3, bias_attr=False)
dtype='float32') self.fetch_list = [conv4.name]
conv1 = paddle.static.nn.conv2d( def run_model(self, exec_mode):
x, num_filters=3, filter_size=3, bias_attr=False) ipu_strategy = paddle.static.IpuStrategy()
conv2 = paddle.static.nn.conv2d( ipu_strategy.set_graph_config(
conv1, num_filters=3, filter_size=3, bias_attr=False) is_training=self.is_training, micro_batch_size=2)
conv3 = paddle.static.nn.conv2d( self.run_op_test(exec_mode, ipu_strategy)
conv2, num_filters=3, filter_size=3, bias_attr=False)
conv4 = paddle.static.nn.conv2d(
conv3, num_filters=3, filter_size=3, bias_attr=False)
fetch_list = [conv4.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(
is_training=self.is_training, micro_batch_size=2)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test(self): def test(self):
output_dict = {} for m in IPUOpTest.ExecutionMode:
for mode in ExecutionMode: if not self.skip_mode(m):
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: self.build_model()
break self.run_model(m)
output_dict[mode] = self._test_base(mode).flatten() self.check()
self.check(output_dict)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest): ...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
data = np.random.uniform(size=[4, 5, 6]) data = np.random.uniform(size=[4, 5, 6])
self.feed_fp32 = {"in_0": data.astype(np.float32)} self.feed_fp32 = {"in_0": data.astype(np.float32)}
...@@ -51,59 +47,22 @@ class TestBase(IPUOpTest): ...@@ -51,59 +47,22 @@ class TestBase(IPUOpTest):
"ends": [3, 2, 4], "ends": [3, 2, 4],
} }
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED out = paddle.fluid.layers.slice(x, **self.attrs)
startup_prog.random_seed = self.SEED self.fetch_list = [out.name]
with paddle.static.scope_guard(scope): def run_model(self, exec_mode):
with paddle.static.program_guard(main_prog, startup_prog): self.run_op_test(exec_mode)
x = paddle.static.data(
name=self.feed_list[0], def test(self):
shape=self.feed_shape[0], for m in IPUOpTest.ExecutionMode:
dtype='float32') if not self.skip_mode(m):
self.build_model()
out = paddle.fluid.layers.slice(x, **self.attrs) self.run_model(m)
self.check()
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode)
self.check(output_dict, check_shape=True)
class TestCase1(TestBase): class TestCase1(TestBase):
...@@ -135,54 +94,17 @@ class TestCase2(TestBase): ...@@ -135,54 +94,17 @@ class TestCase2(TestBase):
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {"axes": [0, 1, 2]} self.attrs = {"axes": [0, 1, 2]}
def _test_base(self, run_ipu=True): @IPUOpTest.static_graph
scope = fluid.core.Scope() def build_model(self):
main_prog = paddle.static.Program() x = paddle.static.data(
startup_prog = paddle.static.Program() name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
main_prog.random_seed = self.SEED starts = paddle.static.data(
startup_prog.random_seed = self.SEED name=self.feed_list[1], shape=self.feed_shape[1], dtype='int32')
ends = paddle.static.data(
with fluid.scope_guard(scope): name=self.feed_list[2], shape=self.feed_shape[2], dtype='int32')
with paddle.static.program_guard(main_prog, startup_prog): out = paddle.fluid.layers.slice(
x = paddle.static.data( x, starts=starts, ends=ends, **self.attrs)
name=self.feed_list[0], self.fetch_list = [out.name]
shape=self.feed_shape[0],
dtype='float32')
starts = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype='int32')
ends = paddle.static.data(
name=self.feed_list[2],
shape=self.feed_shape[2],
dtype='int32')
out = paddle.fluid.layers.slice(
x, starts=starts, ends=ends, **self.attrs)
fetch_list = [out.name]
if run_ipu:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if run_ipu:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
pass
if __name__ == "__main__": if __name__ == "__main__":
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册