未验证 提交 063a3509 编写于 作者: A Allen Guo 提交者: GitHub

update UTs 1 (#42517)

上级 63d4d05a
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import shutil
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
def set_atol(self):
self.atol = 1e-6
self.rtol = 1e-5
self.atol_fp16 = 1e-2
self.rtol_fp16 = 1e-3
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {"in_0": data.astype(np.float32)}
self.feed_fp16 = {"in_0": data.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def set_op_attrs(self):
self.attrs = {}
self.attrs['steps'] = 100
self.attrs['save_at_step'] = 20
self.attrs['is_training'] = True
self.attrs['opt_type'] = 'sgd'
self.attrs['path'] = 'model'
self.attrs['model_name'] = 'test'
def _test_save(self):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
generator = paddle.fluid.unique_name.UniqueNameGenerator()
self.full_name = '/'.join(
[self.attrs['path'], self.attrs['model_name']])
with paddle.fluid.unique_name.guard(generator):
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
scale = paddle.fluid.layers.scale(
x, scale=1.0, bias=0.0, bias_after_scale=True)
conv = paddle.static.nn.conv2d(
scale,
num_filters=3,
filter_size=3,
bias_attr=False,
name='conv2d')
loss = paddle.mean(conv)
if self.attrs['is_training']:
if self.attrs['opt_type'] == 'sgd':
sgd = paddle.optimizer.SGD(learning_rate=1e-2)
sgd.minimize(loss)
elif self.attrs['opt_type'] == 'adam':
adam = paddle.optimizer.Adam(learning_rate=1e-2)
adam.minimize(loss)
elif self.attrs['opt_type'] == 'lamb':
lamb = paddle.optimizer.Lamb(learning_rate=1e-2)
lamb.minimize(loss)
fetch_list = [loss.name]
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=True)
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog, ipu_strategy=ipu_strategy).compile(
self.feed_list, fetch_list)
for _ in range(self.attrs['steps']):
exe.run(program, feed=self.feed_fp16, fetch_list=fetch_list)
paddle.static.save_inference_model(
self.full_name, x, loss, exe, program=program.org_program)
def _test_load(self, run_ipu):
if run_ipu:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
[inference_program, feed_target_names, fetch_targets] = (
paddle.static.load_inference_model(self.full_name, exe))
if run_ipu:
feed_list = feed_target_names
fetch_list = [fetch_targets[0].name]
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=False)
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
inference_program,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = inference_program
feed = self.feed_fp16 if run_ipu else self.feed_fp32
result = []
for i in range(10):
feed["in_0"] += np.array([1.1 * i]).astype(feed["in_0"].dtype)
out = exe.run(program, feed=feed, fetch_list=[fetch_targets])
result.append(out)
return np.array(result)
def test_base(self):
self._test_save()
cpu_res = self._test_load(False)
ipu_res = self._test_load(True).astype(np.float32)
self.assertTrue(
np.allclose(
cpu_res, ipu_res, rtol=self.rtol_fp16, atol=self.atol_fp16))
shutil.rmtree(self.attrs['path'], True)
if __name__ == "__main__":
unittest.main()
...@@ -16,9 +16,8 @@ import unittest ...@@ -16,9 +16,8 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -31,10 +30,6 @@ class TestBase(IPUOpTest): ...@@ -31,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_atol(self): def set_atol(self):
self.atol = 5e-6 self.atol = 5e-6
self.rtol = 1e-5 self.rtol = 1e-5
...@@ -54,20 +49,10 @@ class TestBase(IPUOpTest): ...@@ -54,20 +49,10 @@ class TestBase(IPUOpTest):
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = fluid.core.Scope() def build_model(self):
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with fluid.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
shape=self.feed_shape[0],
dtype='float32')
conv1 = paddle.static.nn.conv2d( conv1 = paddle.static.nn.conv2d(
x, num_filters=3, filter_size=3, bias_attr=False) x, num_filters=3, filter_size=3, bias_attr=False)
conv2 = paddle.static.nn.conv2d( conv2 = paddle.static.nn.conv2d(
...@@ -76,58 +61,20 @@ class TestBase(IPUOpTest): ...@@ -76,58 +61,20 @@ class TestBase(IPUOpTest):
conv3 = paddle.static.nn.conv2d( conv3 = paddle.static.nn.conv2d(
add1, num_filters=8, filter_size=8, bias_attr=False) add1, num_filters=8, filter_size=8, bias_attr=False)
out = paddle.fluid.layers.relu(conv3, **self.attrs) out = paddle.fluid.layers.relu(conv3, **self.attrs)
self.fetch_list = [out.name]
fetch_list = [out.name] def run_model(self, exec_mode):
self.run_op_test(exec_mode)
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list) def test(self):
return result[0] for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
def test_base(self): self.build_model()
output_dict = {} self.run_model(m)
for mode in ExecutionMode: self.check()
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
class TestIntInput(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
class TestIntInput(TestBase):
def set_data_feed(self): def set_data_feed(self):
embedding = np.random.uniform(size=[10, 20]) embedding = np.random.uniform(size=[10, 20])
indice = np.array([1, 3, 5]).astype(np.int32) indice = np.array([1, 3, 5]).astype(np.int32)
...@@ -140,71 +87,14 @@ class TestIntInput(IPUOpTest): ...@@ -140,71 +87,14 @@ class TestIntInput(IPUOpTest):
"indice": indice, "indice": indice,
} }
def set_feed_attr(self): @IPUOpTest.static_graph
self.feed_shape = [x.shape for x in self.feed_fp32.values()] def build_model(self):
self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def set_op_attrs(self):
self.attrs = {}
def _test_base(self, exec_mode):
scope = fluid.core.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with fluid.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
shape=self.feed_shape[0],
dtype='float32')
y = paddle.static.data( y = paddle.static.data(
name=self.feed_list[1], name=self.feed_list[1], shape=self.feed_shape[1], dtype='int32')
shape=self.feed_shape[1],
dtype='int32')
out = paddle.fluid.layers.gather(x, index=y) out = paddle.fluid.layers.gather(x, index=y)
self.fetch_list = [out.name]
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return np.array(result)
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest): ...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
x = np.random.uniform(size=[10, 20]) x = np.random.uniform(size=[10, 20])
y = np.array([1, 3, 5]) y = np.array([1, 3, 5])
...@@ -47,63 +43,24 @@ class TestBase(IPUOpTest): ...@@ -47,63 +43,24 @@ class TestBase(IPUOpTest):
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
shape=self.feed_shape[0],
dtype='float32')
y = paddle.static.data( y = paddle.static.data(
name=self.feed_list[1], name=self.feed_list[1], shape=self.feed_shape[1], dtype='int32')
shape=self.feed_shape[1],
dtype='int32')
out = paddle.fluid.layers.gather(x, index=y, **self.attrs) out = paddle.fluid.layers.gather(x, index=y, **self.attrs)
self.fetch_list = [out.name]
fetch_list = [out.name] def run_model(self, exec_mode):
self.run_op_test(exec_mode)
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test(self): def test(self):
output_dict = {} for m in IPUOpTest.ExecutionMode:
for mode in ExecutionMode: if not self.skip_mode(m):
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: self.build_model()
break self.run_model(m)
output_dict[mode] = self._test_base(mode).flatten() self.check()
self.check(output_dict)
class TestCase1(TestBase): class TestCase1(TestBase):
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest): ...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10]) data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {'in_0': data.astype(np.float32)} self.feed_fp32 = {'in_0': data.astype(np.float32)}
...@@ -46,59 +42,22 @@ class TestBase(IPUOpTest): ...@@ -46,59 +42,22 @@ class TestBase(IPUOpTest):
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {"approximate": False} self.attrs = {"approximate": False}
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
shape=self.feed_shape[0],
dtype='float32')
out = paddle.fluid.layers.gelu(x, **self.attrs) out = paddle.fluid.layers.gelu(x, **self.attrs)
self.fetch_list = [out.name]
fetch_list = [out.name] def run_model(self, exec_mode):
self.run_op_test(exec_mode)
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test(self): def test(self):
output_dict = {} for m in IPUOpTest.ExecutionMode:
for mode in ExecutionMode: if not self.skip_mode(m):
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: self.build_model()
break self.run_model(m)
output_dict[mode] = self._test_base(mode).flatten() self.check()
self.check(output_dict)
class TestCase1(TestBase): class TestCase1(TestBase):
......
...@@ -28,19 +28,26 @@ class TestBase(IPUOpTest): ...@@ -28,19 +28,26 @@ class TestBase(IPUOpTest):
self.set_data_feed() self.set_data_feed()
self.set_feed_attr() self.set_feed_attr()
self.set_attrs() self.set_attrs()
self.set_training()
@property
def fp16_enabled(self):
return False
def set_atol(self): def set_atol(self):
super().set_atol()
self.atol = 1e-6 self.atol = 1e-6
self.rtol = 1e-5
def set_data_feed(self): def set_data_feed(self):
self.feed = { self.feed_fp32 = {
"image": np.random.uniform(size=[1, 3, 10, 10]).astype('float32'), "image": np.random.uniform(size=[1, 3, 10, 10]).astype('float32'),
} }
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed.keys()) self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed.values()] self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def set_attrs(self): def set_attrs(self):
self.attrs = { self.attrs = {
...@@ -48,21 +55,18 @@ class TestBase(IPUOpTest): ...@@ -48,21 +55,18 @@ class TestBase(IPUOpTest):
"weight_decay": 0.0, "weight_decay": 0.0,
} }
def _test_optimizer(self, run_ipu=True): def set_training(self):
scope = paddle.static.Scope() self.is_training = True
main_prog = paddle.static.Program() self.epoch = 100
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
np.random.seed(self.SEED)
with paddle.static.scope_guard(scope): @IPUOpTest.static_graph
with paddle.static.program_guard(main_prog, startup_prog): def build_model(self):
image = paddle.static.data( image = paddle.static.data(
name='image', shape=[1, 3, 10, 10], dtype='float32') name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
conv1 = paddle.static.nn.conv2d( conv1 = paddle.static.nn.conv2d(
image, num_filters=3, filter_size=3, bias_attr=False) image, num_filters=3, filter_size=3, bias_attr=False)
loss = paddle.mean(conv1) loss = paddle.mean(conv1)
self.fetch_list = [loss.name]
weight_decay = self.attrs['weight_decay'] weight_decay = self.attrs['weight_decay']
# Only support ClipGradByGlobalNorm # Only support ClipGradByGlobalNorm
...@@ -73,9 +77,7 @@ class TestBase(IPUOpTest): ...@@ -73,9 +77,7 @@ class TestBase(IPUOpTest):
grad_clip=clip) grad_clip=clip)
elif self.attrs['optimizer'] == 'adam': elif self.attrs['optimizer'] == 'adam':
opt = paddle.optimizer.Adam( opt = paddle.optimizer.Adam(
learning_rate=1e-1, learning_rate=1e-1, weight_decay=weight_decay, grad_clip=clip)
weight_decay=weight_decay,
grad_clip=clip)
elif self.attrs['optimizer'] == 'lamb': elif self.attrs['optimizer'] == 'lamb':
opt = paddle.optimizer.Lamb( opt = paddle.optimizer.Lamb(
learning_rate=1e-1, learning_rate=1e-1,
...@@ -83,41 +85,18 @@ class TestBase(IPUOpTest): ...@@ -83,41 +85,18 @@ class TestBase(IPUOpTest):
grad_clip=clip) grad_clip=clip)
else: else:
raise ValueError( raise ValueError(
f"Not supported optimizer {self.attrs['optimizer']} for test" f"Not supported optimizer {self.attrs['optimizer']} for test")
)
opt.minimize(loss) opt.minimize(loss)
if run_ipu: def run_model(self, exec_mode):
place = paddle.IPUPlace() self.run_op_test(exec_mode)
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if run_ipu:
feed_list = [image.name]
fetch_list = [loss.name]
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=True)
program = paddle.static.IpuCompiledProgram(
main_prog, ipu_strategy=ipu_strategy).compile(feed_list,
fetch_list)
else:
program = main_prog
result = []
for epoch in range(100):
loss_res = exe.run(program, feed=self.feed, fetch_list=[loss])
result.append(loss_res)
return np.array(result)
def test(self): def test(self):
# cpu and ipu dimenstion mismatch, cpu:(100, 1, 1), ipu:(100, 1) for m in IPUOpTest.ExecutionMode:
ipu_loss = self._test_optimizer(True).flatten() if not self.skip_mode(m):
cpu_loss = self._test_optimizer(False).flatten() self.build_model()
self.run_model(m)
self.assertTrue(np.allclose(ipu_loss, cpu_loss, atol=self.atol)) self.check()
class TestAdam(TestBase): class TestAdam(TestBase):
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -28,73 +28,30 @@ class TestGreaterThan(IPUOpTest): ...@@ -28,73 +28,30 @@ class TestGreaterThan(IPUOpTest):
self.set_training() self.set_training()
self.set_test_op() self.set_test_op()
@property
def fp16_enabled(self):
return True
def set_test_op(self): def set_test_op(self):
self.op = paddle.fluid.layers.greater_than self.op = paddle.fluid.layers.greater_than
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
shape=self.feed_shape[0],
dtype='float32')
y = paddle.static.data( y = paddle.static.data(
name=self.feed_list[1], name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32')
shape=self.feed_shape[1],
dtype='float32')
out = self.op(x, y, **self.attrs) out = self.op(x, y, **self.attrs)
self.fetch_list = [out.name]
fetch_list = [out.name] def run_model(self, exec_mode):
self.run_op_test(exec_mode)
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def run_test_base(self): def run_test_base(self):
output_dict = {} for m in IPUOpTest.ExecutionMode:
for mode in ExecutionMode: if not self.skip_mode(m):
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: self.build_model()
break self.run_model(m)
output_dict[mode] = self._test_base(mode).flatten().astype(np.int32) self.check()
self.check(output_dict)
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest): ...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_atol(self): def set_atol(self):
self.atol = 3e-6 self.atol = 3e-6
self.rtol = 1e-6 self.rtol = 1e-6
...@@ -56,20 +52,10 @@ class TestBase(IPUOpTest): ...@@ -56,20 +52,10 @@ class TestBase(IPUOpTest):
"data_layout": 'NCHW', "data_layout": 'NCHW',
} }
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
shape=self.feed_shape[0],
dtype='float32')
if self.is_training: if self.is_training:
ch = self.feed_shape[0][1] ch = self.feed_shape[0][1]
conv1 = paddle.static.nn.conv2d( conv1 = paddle.static.nn.conv2d(
...@@ -81,61 +67,21 @@ class TestBase(IPUOpTest): ...@@ -81,61 +67,21 @@ class TestBase(IPUOpTest):
loss = paddle.mean(out) loss = paddle.mean(out)
adam = paddle.optimizer.Adam(learning_rate=1e-2) adam = paddle.optimizer.Adam(learning_rate=1e-2)
adam.minimize(loss) adam.minimize(loss)
self.fetch_list = [loss.name]
else: else:
out = paddle.fluid.layers.nn.group_norm( out = paddle.fluid.layers.nn.group_norm(
x, param_attr=True, bias_attr=True, **self.attrs) x, param_attr=True, bias_attr=True, **self.attrs)
self.fetch_list = [out.name]
if self.is_training: def run_model(self, exec_mode):
fetch_list = [loss.name] self.run_op_test(exec_mode)
else:
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
if self.is_training:
result = []
for _ in range(self.epoch):
loss_res = exe.run(program,
feed=feed,
fetch_list=fetch_list)
result.append(loss_res[0])
return np.array(result)
else:
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
if mode > ExecutionMode.IPU_FP32 and self.is_training:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict) def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase): class TestCase1(TestBase):
...@@ -150,7 +96,7 @@ class TestCase1(TestBase): ...@@ -150,7 +96,7 @@ class TestCase1(TestBase):
class TestTrainCase1(TestBase): class TestTrainCase1(TestBase):
def set_training(self): def set_training(self):
self.is_training = True self.is_training = True
self.epoch = 10 self.epoch = 20
@unittest.skipIf(IPUOpTest.use_ipumodel(), "skip for ipumodel") @unittest.skipIf(IPUOpTest.use_ipumodel(), "skip for ipumodel")
...@@ -170,7 +116,7 @@ class TestTrainCase2(TestBase): ...@@ -170,7 +116,7 @@ class TestTrainCase2(TestBase):
def set_training(self): def set_training(self):
self.is_training = True self.is_training = True
self.epoch = 10 self.epoch = 20
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest): ...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_atol(self): def set_atol(self):
self.atol = 1e-6 self.atol = 1e-6
self.rtol = 1e-5 self.rtol = 1e-5
...@@ -52,19 +48,10 @@ class TestBase(IPUOpTest): ...@@ -52,19 +48,10 @@ class TestBase(IPUOpTest):
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {"epsilon": 1e-05} self.attrs = {"epsilon": 1e-05}
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
shape=self.feed_shape[0],
dtype='float32')
if self.is_training: if self.is_training:
ch = self.feed_shape[0][1] ch = self.feed_shape[0][1]
...@@ -77,61 +64,21 @@ class TestBase(IPUOpTest): ...@@ -77,61 +64,21 @@ class TestBase(IPUOpTest):
loss = paddle.mean(out) loss = paddle.mean(out)
adam = paddle.optimizer.Adam(learning_rate=1e-2) adam = paddle.optimizer.Adam(learning_rate=1e-2)
adam.minimize(loss) adam.minimize(loss)
self.fetch_list = [loss.name]
else: else:
out = paddle.fluid.layers.nn.instance_norm( out = paddle.fluid.layers.nn.instance_norm(
x, param_attr=True, bias_attr=True, **self.attrs) x, param_attr=True, bias_attr=True, **self.attrs)
self.fetch_list = [out.name]
if self.is_training: def run_model(self, exec_mode):
fetch_list = [loss.name] self.run_op_test(exec_mode)
else:
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
if self.is_training:
result = []
for _ in range(self.epoch):
loss_res = exe.run(program,
feed=feed,
fetch_list=fetch_list)
result.append(loss_res)
return np.array(result)
else:
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test(self): def test(self):
output_dict = {} for m in IPUOpTest.ExecutionMode:
for mode in ExecutionMode: if not self.skip_mode(m):
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: self.build_model()
break self.run_model(m)
if mode > ExecutionMode.IPU_FP32 and self.is_training: self.check()
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
class TestTrainCase1(TestBase): class TestTrainCase1(TestBase):
......
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestIpuShard(unittest.TestCase):
def _test(self):
# build graph
a = paddle.static.data(name='data', shape=[None, 1], dtype='int32')
b = a + 2 # scale : scale * x + bias, ipu_index : no
with paddle.static.ipu_shard_guard(index=1):
c = b + 1 # scale, ipu_index : 1
with paddle.static.ipu_shard_guard(index=2):
d = c * 2 # scale, ipu_index : 2
with paddle.static.ipu_shard_guard(index=3):
e = d + 3 # scale, ipu_index : 3
with paddle.static.ipu_shard_guard(index=1):
e = e + 3 # scale, ipu_index : 1
with paddle.static.ipu_shard_guard(index=2):
e = e + 3 # scale, ipu_index : 2
with paddle.static.ipu_shard_guard(index=1):
f = paddle.tensor.pow(e, 2.0) # pow, ipu_index : 1
with paddle.static.ipu_shard_guard(index=2):
g = f - 1 # scale, ipu_index : 2
h = g + 1 # scale, ipu_index : no
ipu_index_list = []
main_prog = paddle.static.default_main_program()
for op in main_prog.global_block().ops:
if op.desc.has_attr("ipu_index"):
ipu_index_list.append(op.desc.attr("ipu_index"))
return ipu_index_list
def test_ipu_shard(self):
ipu_index_list = self._test()
expected_ipu_index_list = [1, 2, 3, 1, 2, 1, 2]
self.assertTrue(
np.allclose(
ipu_index_list, expected_ipu_index_list, atol=0))
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestIpuPipeline(unittest.TestCase):
def _test(self):
# build graph
a = paddle.static.data(name='data', shape=[None, 1], dtype='int32')
b = a + 2 # scale : scale * x + bias, ipu_stage : no
with paddle.static.ipu_shard_guard(stage=1):
c = b + 1 # scale, ipu_stage : 1
with paddle.static.ipu_shard_guard(stage=2):
d = c * 2 # scale, ipu_stage : 2
with paddle.static.ipu_shard_guard(stage=3):
e = d + 3 # scale, ipu_stage : 3
with paddle.static.ipu_shard_guard(stage=1):
e = e + 3 # scale, ipu_stage : 1
with paddle.static.ipu_shard_guard(stage=2):
e = e + 3 # scale, ipu_stage : 2
with paddle.static.ipu_shard_guard(stage=1):
f = paddle.tensor.pow(e, 2.0) # pow, ipu_stage : 1
with paddle.static.ipu_shard_guard(stage=2):
g = f - 1 # scale, ipu_stage : 2
h = g + 1 # scale, ipu_stage : no
ipu_index_list = []
main_prog = paddle.static.default_main_program()
for op in main_prog.global_block().ops:
if op.desc.has_attr("ipu_stage"):
ipu_index_list.append(op.desc.attr("ipu_stage"))
return ipu_index_list
def test_ipu_shard(self):
ipu_index_list = self._test()
expected_ipu_index_list = [1, 2, 3, 1, 2, 1, 2]
self.assertTrue(
np.allclose(
ipu_index_list, expected_ipu_index_list, atol=0))
if __name__ == "__main__":
unittest.main()
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest): ...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_atol(self): def set_atol(self):
self.atol = 1e-6 self.atol = 1e-6
self.rtol = 1e-5 self.rtol = 1e-5
...@@ -59,20 +55,10 @@ class TestBase(IPUOpTest): ...@@ -59,20 +55,10 @@ class TestBase(IPUOpTest):
} }
self.optimizer = None self.optimizer = None
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
shape=self.feed_shape[0],
dtype='float32')
if self.is_training: if self.is_training:
ch = self.feed_shape[0][1] ch = self.feed_shape[0][1]
conv1 = paddle.static.nn.conv2d( conv1 = paddle.static.nn.conv2d(
...@@ -81,14 +67,14 @@ class TestBase(IPUOpTest): ...@@ -81,14 +67,14 @@ class TestBase(IPUOpTest):
bias = paddle.ParamAttr(trainable=True) bias = paddle.ParamAttr(trainable=True)
out = paddle.fluid.layers.nn.layer_norm( out = paddle.fluid.layers.nn.layer_norm(
conv1, param_attr=scale, bias_attr=bias, **self.attrs) conv1, param_attr=scale, bias_attr=bias, **self.attrs)
loss = paddle.mean(out)
self.fetch_list = [loss.name]
else: else:
scale = self.attrs['scale'] scale = self.attrs['scale']
bias = self.attrs['shift'] bias = self.attrs['shift']
out = paddle.fluid.layers.nn.layer_norm( out = paddle.fluid.layers.nn.layer_norm(
x, param_attr=scale, bias_attr=bias, **self.attrs) x, param_attr=scale, bias_attr=bias, **self.attrs)
loss = paddle.mean(out) self.fetch_list = [out.name]
fetch_list = [loss.name]
if self.is_training: if self.is_training:
optimizer = None optimizer = None
...@@ -102,46 +88,15 @@ class TestBase(IPUOpTest): ...@@ -102,46 +88,15 @@ class TestBase(IPUOpTest):
if optimizer is not None: if optimizer is not None:
optimizer.minimize(loss) optimizer.minimize(loss)
if exec_mode: def run_model(self, exec_mode):
place = paddle.IPUPlace() self.run_op_test(exec_mode)
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
if self.is_training:
result = []
for _ in range(self.epoch):
loss_res = exe.run(program,
feed=self.feed_fp32,
fetch_list=fetch_list)
result.append(loss_res[0])
return np.array(result)
else:
result = exe.run(program,
feed=self.feed_fp32,
fetch_list=fetch_list)
return result[0]
def test_base(self): def test(self):
res0 = self._test_base(False) for m in IPUOpTest.ExecutionMode:
res1 = self._test_base(True) if not self.skip_mode(m):
self.build_model()
self.assertTrue( self.run_model(m)
np.allclose( self.check()
res0.flatten(), res1.flatten(), atol=self.atol))
self.assertTrue(res0.shape == res1.shape)
@unittest.skip('raise error') @unittest.skip('raise error')
...@@ -188,33 +143,17 @@ class TestTrainCase1(TestBase): ...@@ -188,33 +143,17 @@ class TestTrainCase1(TestBase):
self.optimizer = 'sgd' self.optimizer = 'sgd'
def set_atol(self): def set_atol(self):
super().set_atol()
self.atol = 1e-6 self.atol = 1e-6
def set_training(self): def set_training(self):
self.is_training = True self.is_training = True
self.epoch = 10 self.epoch = 20
class TestTrainCase2(TestBase):
def set_atol(self):
self.atol = 5e-4
def set_op_attrs(self):
self.attrs = {
"scale": True,
"shift": True,
"begin_norm_axis": 2,
"epsilon": 1e-05
}
self.optimizer = 'adam'
def set_training(self):
self.is_training = True
self.epoch = 10
class TestTrainCase3(TestBase): class TestTrainCase3(TestBase):
def set_atol(self): def set_atol(self):
super().set_atol()
self.atol = 5e-3 self.atol = 5e-3
def set_op_attrs(self): def set_op_attrs(self):
...@@ -228,7 +167,7 @@ class TestTrainCase3(TestBase): ...@@ -228,7 +167,7 @@ class TestTrainCase3(TestBase):
def set_training(self): def set_training(self):
self.is_training = True self.is_training = True
self.epoch = 10 self.epoch = 20
# not support `layer_norm(x, param_attr=False, bias_attr=False, **self.attrs)` # not support `layer_norm(x, param_attr=False, bias_attr=False, **self.attrs)`
......
...@@ -18,7 +18,7 @@ import numpy as np ...@@ -18,7 +18,7 @@ import numpy as np
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -31,10 +31,6 @@ class TestBase(IPUOpTest): ...@@ -31,10 +31,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10]) data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {'in_0': data.astype(np.float32)} self.feed_fp32 = {'in_0': data.astype(np.float32)}
...@@ -49,59 +45,22 @@ class TestBase(IPUOpTest): ...@@ -49,59 +45,22 @@ class TestBase(IPUOpTest):
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {"axis": -1} self.attrs = {"axis": -1}
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
shape=self.feed_shape[0],
dtype='float32')
out = F.log_softmax(x, **self.attrs) out = F.log_softmax(x, **self.attrs)
self.fetch_list = [out.name]
fetch_list = [out.name] def run_model(self, exec_mode):
self.run_op_test(exec_mode)
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test(self): def test(self):
output_dict = {} for m in IPUOpTest.ExecutionMode:
for mode in ExecutionMode: if not self.skip_mode(m):
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: self.build_model()
break self.run_model(m)
output_dict[mode] = self._test_base(mode).flatten() self.check()
self.check(output_dict)
class TestCase1(TestBase): class TestCase1(TestBase):
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -29,68 +29,32 @@ class TestBase(IPUOpTest): ...@@ -29,68 +29,32 @@ class TestBase(IPUOpTest):
self.set_data_feed() self.set_data_feed()
self.set_feed_attr() self.set_feed_attr()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
data = np.random.uniform(size=[2, 20, 30528]) data = np.random.uniform(size=[2, 20, 30528])
self.feed = {"in_0": data.astype('bool')} self.feed_fp32 = {"in_0": data.astype('bool')}
self.feed_fp16 = {"in_0": data.astype('bool')}
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed.keys()) self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed.values()] self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope): @IPUOpTest.static_graph
with paddle.static.program_guard(main_prog, startup_prog): def build_model(self):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0], shape=self.feed_shape[0], dtype="bool")
shape=self.feed_shape[0],
dtype="bool")
out = paddle.fluid.layers.logical_not(x) out = paddle.fluid.layers.logical_not(x)
self.fetch_list = [out.name]
fetch_list = [out.name] def run_model(self, exec_mode):
self.run_op_test(exec_mode)
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).astype(np.int32)
self.check(output_dict, check_shape=True) def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -38,15 +38,8 @@ class TestLogicalAnd(IPUOpTest): ...@@ -38,15 +38,8 @@ class TestLogicalAnd(IPUOpTest):
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0],
shape=self.feed_shape[0], shape=self.feed_shape[0],
...@@ -55,52 +48,28 @@ class TestLogicalAnd(IPUOpTest): ...@@ -55,52 +48,28 @@ class TestLogicalAnd(IPUOpTest):
name=self.feed_list[1], name=self.feed_list[1],
shape=self.feed_shape[1], shape=self.feed_shape[1],
dtype=self.feed_dtype[1]) dtype=self.feed_dtype[1])
out = self.op(x, y, **self.attrs) out = self.op(x, y, **self.attrs)
self.fetch_list = [out.name]
fetch_list = [out.name] def run_model(self, exec_mode):
self.run_op_test(exec_mode)
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
return result[0]
def run_test_base(self): def run_test_base(self):
output_dict = {} for m in IPUOpTest.ExecutionMode:
for mode in ExecutionMode: if not self.skip_mode(m):
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled: self.build_model()
break self.run_model(m)
output_dict[mode] = self._test_base(mode).astype(np.int32) self.check()
self.check(output_dict, check_shape=True)
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed.keys()) self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = ['bool', 'bool'] self.feed_dtype = ['bool', 'bool']
def set_data_feed0(self): def set_data_feed0(self):
x = np.random.choice([True, False], size=(1, 3, 5, 5)) x = np.random.choice([True, False], size=(1, 3, 5, 5))
y = np.random.choice([True, False], size=(1, 3, 5, 5)) y = np.random.choice([True, False], size=(1, 3, 5, 5))
self.feed = { self.feed_fp32 = {
"x": x.astype('bool'), "x": x.astype('bool'),
"y": y.astype('bool'), "y": y.astype('bool'),
} }
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -30,19 +30,15 @@ class TestBase(IPUOpTest): ...@@ -30,19 +30,15 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
data = np.array([[[1], [3]], [[2], [4]], [[4], [127]]]) data = np.array([[[1], [3]], [[2], [4]], [[4], [127]]])
self.feed_cpu = {"x": data.astype(np.int64)} self.feed_fp32 = {"x": data.astype(np.int64)}
self.feed_ipu = {"x": data.astype(np.int32)} self.feed_fp16 = {"x": data.astype(np.int32)}
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_cpu.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_cpu.keys()) self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed_cpu.values()] self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = { self.attrs = {
...@@ -53,76 +49,30 @@ class TestBase(IPUOpTest): ...@@ -53,76 +49,30 @@ class TestBase(IPUOpTest):
"dtype": 'float32' "dtype": 'float32'
} }
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0], shape=self.feed_shape[0], dtype='int64')
shape=self.feed_shape[0],
dtype='int64')
out = paddle.fluid.layers.embedding(x, **self.attrs) out = paddle.fluid.layers.embedding(x, **self.attrs)
if self.is_training: if self.is_training:
loss = paddle.mean(out) loss = paddle.mean(out)
adam = paddle.optimizer.Adam(learning_rate=1e-2) adam = paddle.optimizer.Adam(learning_rate=1e-2)
adam.minimize(loss) adam.minimize(loss)
fetch_list = [loss.name] self.fetch_list = [loss.name]
else:
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else: else:
program = main_prog self.fetch_list = [out.name]
feed = self.feed_cpu def run_model(self, exec_mode):
if exec_mode > ExecutionMode.CPU_FP32: if self.is_ipu_mode(exec_mode):
feed = self.feed_ipu self.feed_fp32['x'] = self.feed_fp32['x'].astype(np.int32)
self.run_op_test(exec_mode)
if self.is_training:
result = []
for _ in range(self.epoch):
loss_res = exe.run(program,
feed=feed,
fetch_list=fetch_list)
result.append(loss_res[0])
return np.array(result)
else:
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test(self): def test(self):
output_dict = {} for m in IPUOpTest.ExecutionMode:
for mode in ExecutionMode: if not self.skip_mode(m):
if mode > ExecutionMode.IPU_FP32 and (not self.fp16_enabled or self.build_model()
self.is_training): self.run_model(m)
break self.check()
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
class TestTrainCase1(TestBase): class TestTrainCase1(TestBase):
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -30,19 +30,15 @@ class TestBase(IPUOpTest): ...@@ -30,19 +30,15 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
x = np.array([[[1], [3]], [[2], [4]], [[4], [127]]]) x = np.array([[[1], [3]], [[2], [4]], [[4], [127]]])
self.feed_cpu = {"x": x.astype(np.int64)} self.feed_fp32 = {"x": x.astype(np.int64)}
self.feed_ipu = {"x": x.astype(np.int32)} self.feed_fp16 = {"x": x.astype(np.int32)}
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_cpu.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_cpu.keys()) self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed_cpu.values()] self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = { self.attrs = {
...@@ -53,76 +49,31 @@ class TestBase(IPUOpTest): ...@@ -53,76 +49,31 @@ class TestBase(IPUOpTest):
"weight_attr": None "weight_attr": None
} }
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0], shape=self.feed_shape[0], dtype='int64')
shape=self.feed_shape[0],
dtype='int64')
embedding = paddle.nn.Embedding(**self.attrs) embedding = paddle.nn.Embedding(**self.attrs)
out = embedding(x) out = embedding(x)
if self.is_training: if self.is_training:
loss = paddle.mean(out) loss = paddle.mean(out)
adam = paddle.optimizer.Adam(learning_rate=1e-2) adam = paddle.optimizer.Adam(learning_rate=1e-2)
adam.minimize(loss) adam.minimize(loss)
fetch_list = [loss.name] self.fetch_list = [loss.name]
else: else:
fetch_list = [out.name] self.fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32: def run_model(self, exec_mode):
place = paddle.CPUPlace() if self.is_ipu_mode(exec_mode):
else: self.feed_fp32['x'] = self.feed_fp32['x'].astype(np.int32)
place = paddle.IPUPlace() self.run_op_test(exec_mode)
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_cpu
if exec_mode > ExecutionMode.CPU_FP32:
feed = self.feed_ipu
if self.is_training:
result = []
for _ in range(self.epoch):
loss_res = exe.run(program,
feed=feed,
fetch_list=fetch_list)
result.append(loss_res[0])
return np.array(result)
else:
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test(self): def test(self):
output_dict = {} for m in IPUOpTest.ExecutionMode:
for mode in ExecutionMode: if not self.skip_mode(m):
if mode > ExecutionMode.IPU_FP32 and (not self.fp16_enabled or self.build_model()
self.is_training): self.run_model(m)
break self.check()
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
class TestTrainCase1(TestBase): class TestTrainCase1(TestBase):
......
...@@ -12,89 +12,75 @@ ...@@ -12,89 +12,75 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import print_function
import numpy as np import numpy as np
import unittest import unittest
import sys
import paddle import paddle
import paddle.fluid as fluid
import paddle.static import paddle.static
from paddle.optimizer.lr import LRScheduler from paddle.optimizer.lr import LRScheduler
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
paddle.enable_static()
SEED = 2021
class LR_New(LRScheduler): class LR_New(LRScheduler):
def __init__(self, learning_rate=1.0, last_epoch=-1, verbose=False): def __init__(self, learning_rate=1e-5, last_epoch=-1, verbose=False):
super(LR_New, self).__init__(learning_rate, last_epoch, verbose) super(LR_New, self).__init__(learning_rate, last_epoch, verbose)
def get_lr(self): def get_lr(self):
self.base_lr = self.base_lr + 1 self.base_lr = self.base_lr + 1e-4
self.last_epoch = self.last_epoch + 1 self.last_epoch = self.last_epoch + 1
return self.base_lr return self.base_lr
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU") "core is not compiled with IPU")
class TestConvNet(unittest.TestCase): class TestConvNet(IPUOpTest):
def _test(self, run_ipu=True): @IPUOpTest.static_graph
scope = fluid.core.Scope() def build_model(self):
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = SEED
startup_prog.random_seed = SEED
np.random.seed(SEED)
np_image = np.random.rand(1, 3, 10, 10).astype(np.float32)
with fluid.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
image = paddle.static.data( image = paddle.static.data(
name='image', shape=[1, 3, 10, 10], dtype='float32') name='image', shape=[1, 3, 10, 10], dtype='float32')
conv1 = paddle.static.nn.conv2d( conv1 = paddle.static.nn.conv2d(
image, num_filters=3, filter_size=3, bias_attr=False) image, num_filters=3, filter_size=3, bias_attr=False)
loss = paddle.mean(conv1) loss = paddle.mean(conv1)
sgd = paddle.optimizer.SGD(learning_rate=LR_New()) opt = paddle.optimizer.Lamb(learning_rate=LR_New())
sgd.minimize(loss) opt.minimize(loss)
self.feed_list = [image.name]
self.fetch_list = [loss.name]
def run_model(self, run_ipu=True):
self.build_model()
if run_ipu: if run_ipu:
place = paddle.IPUPlace() place = paddle.IPUPlace()
else: else:
place = paddle.CPUPlace() place = paddle.CPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(self.startup_prog)
if run_ipu: if run_ipu:
feed_list = [image.name]
fetch_list = [loss.name]
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=True) ipu_strategy.set_graph_config(is_training=True)
program = paddle.static.IpuCompiledProgram( program = paddle.static.IpuCompiledProgram(
main_prog, ipu_strategy=ipu_strategy).compile(feed_list, self.main_prog, ipu_strategy=ipu_strategy).compile(
fetch_list) self.feed_list, self.fetch_list)
else: else:
program = main_prog program = self.main_prog
result = [] result = []
for epoch in range(100): for _ in range(100):
if hasattr(program, "lr_sheduler"): if hasattr(program, "lr_sheduler"):
program.lr_sheduler.step() program.lr_sheduler.step()
loss_res = exe.run(program, loss_res = exe.run(program,
feed={image.name: np_image}, feed=self.feed,
fetch_list=[loss]) fetch_list=self.fetch_list)
result.append(loss_res) result.append(loss_res)
return np.array(result) return np.array(result)
def test_training(self): def test_training(self):
data = np.random.rand(1, 3, 10, 10).astype(np.float32)
self.feed = {'image': data}
# cpu and ipu dimenstion mismatch, cpu:(100, 1, 1), ipu:(100, 1) # cpu and ipu dimenstion mismatch, cpu:(100, 1, 1), ipu:(100, 1)
ipu_loss = self._test(True).flatten() ipu_loss = self.run_model(True).flatten()
cpu_loss = self._test(False).flatten() cpu_loss = self.run_model(False).flatten()
self.assertTrue(np.allclose(ipu_loss, cpu_loss, atol=1e-4)) self.assertTrue(np.allclose(ipu_loss, cpu_loss, atol=1e-10))
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest): ...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
x = np.random.uniform(size=[20, 30]) x = np.random.uniform(size=[20, 30])
y = np.random.uniform(size=[30, 20]) y = np.random.uniform(size=[30, 20])
...@@ -52,63 +48,25 @@ class TestBase(IPUOpTest): ...@@ -52,63 +48,25 @@ class TestBase(IPUOpTest):
"alpha": 1.0, "alpha": 1.0,
} }
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
shape=self.feed_shape[0],
dtype='float32')
y = paddle.static.data( y = paddle.static.data(
name=self.feed_list[1], name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32')
shape=self.feed_shape[1],
dtype='float32')
out = paddle.fluid.layers.matmul(x, y, **self.attrs) out = paddle.fluid.layers.matmul(x, y, **self.attrs)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
fetch_list = [out.name] def test(self):
for m in IPUOpTest.ExecutionMode:
if exec_mode == ExecutionMode.CPU_FP32: if not self.skip_mode(m):
place = paddle.CPUPlace() self.build_model()
else: self.run_model(m)
place = paddle.IPUPlace() self.check()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
class TestCase1(TestBase): class TestCase1(TestBase):
......
...@@ -26,7 +26,7 @@ def set_serialize_factor(serialize_factor): ...@@ -26,7 +26,7 @@ def set_serialize_factor(serialize_factor):
op._set_attr('serialize_factor', serialize_factor) op._set_attr('serialize_factor', serialize_factor)
@unittest.skipIf(not paddle.is_compiled_with_ipu() or IPUOpTest.use_ipumodel(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU") "core is not compiled with IPU")
class TestBase(IPUOpTest): class TestBase(IPUOpTest):
def setUp(self): def setUp(self):
...@@ -38,8 +38,8 @@ class TestBase(IPUOpTest): ...@@ -38,8 +38,8 @@ class TestBase(IPUOpTest):
def set_data_feed(self): def set_data_feed(self):
self.feed = { self.feed = {
"x": np.random.uniform(size=[2048, 3072]).astype('float32'), "x": np.random.uniform(size=[16, 32]).astype('float32'),
"y": np.random.uniform(size=[3072, 2048]).astype('float32'), "y": np.random.uniform(size=[32, 16]).astype('float32'),
} }
def set_feed_attr(self): def set_feed_attr(self):
...@@ -50,15 +50,8 @@ class TestBase(IPUOpTest): ...@@ -50,15 +50,8 @@ class TestBase(IPUOpTest):
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {"transpose_x": False, "transpose_y": False} self.attrs = {"transpose_x": False, "transpose_y": False}
def _test_base(self, run_ipu=True): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0],
shape=self.feed_shape[0], shape=self.feed_shape[0],
...@@ -67,41 +60,37 @@ class TestBase(IPUOpTest): ...@@ -67,41 +60,37 @@ class TestBase(IPUOpTest):
name=self.feed_list[1], name=self.feed_list[1],
shape=self.feed_shape[1], shape=self.feed_shape[1],
dtype=self.feed_dtype[1]) dtype=self.feed_dtype[1])
# decrator maybe the best choice, but need to modify api # decrator maybe the best choice, but need to modify api
out = paddle.matmul(x, y, **self.attrs) out = paddle.matmul(x, y, **self.attrs)
set_serialize_factor(4) set_serialize_factor(4)
self.fetch_list = [out.name]
fetch_list = [out.name] def run_model(self, run_ipu):
self.build_model()
if run_ipu: if run_ipu:
place = paddle.IPUPlace() place = paddle.IPUPlace()
else: else:
place = paddle.CPUPlace() place = paddle.CPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(self.startup_prog)
if run_ipu: if run_ipu:
feed_list = self.feed_list feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training) ipu_strategy.set_graph_config(is_training=self.is_training)
program = paddle.static.IpuCompiledProgram( program = paddle.static.IpuCompiledProgram(
main_prog, self.main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list) ipu_strategy=ipu_strategy).compile(feed_list, self.fetch_list)
else: else:
program = main_prog program = self.main_prog
result = exe.run(program, feed=self.feed, fetch_list=self.fetch_list)
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
return result[0] return result[0]
def test_base(self): def test_base(self):
res0 = self._test_base(False) res0 = self.run_model(False)
res1 = self._test_base(True) res1 = self.run_model(True)
self.assertTrue( self.assertTrue(
np.allclose( np.allclose(
res0.flatten(), res1.flatten(), atol=self.atol)) res0.flatten(), res1.flatten(), atol=self.atol))
self.assertTrue(res0.shape == res1.shape) self.assertTrue(res0.shape == res1.shape)
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(), @unittest.skipIf(not paddle.is_compiled_with_ipu(),
...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest): ...@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self): def set_data_feed(self):
x = np.random.uniform(size=[2, 3]) x = np.random.uniform(size=[2, 3])
y = np.random.uniform(size=[3, 2]) y = np.random.uniform(size=[3, 2])
...@@ -48,63 +44,24 @@ class TestBase(IPUOpTest): ...@@ -48,63 +44,24 @@ class TestBase(IPUOpTest):
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {"transpose_x": False, "transpose_y": False} self.attrs = {"transpose_x": False, "transpose_y": False}
def _test_base(self, exec_mode): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data( x = paddle.static.data(
name=self.feed_list[0], name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
shape=self.feed_shape[0],
dtype='float32')
y = paddle.static.data( y = paddle.static.data(
name=self.feed_list[1], name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32')
shape=self.feed_shape[1],
dtype='float32')
out = paddle.matmul(x, y, **self.attrs) out = paddle.matmul(x, y, **self.attrs)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
fetch_list = [out.name] def test(self):
for m in IPUOpTest.ExecutionMode:
if exec_mode == ExecutionMode.CPU_FP32: if not self.skip_mode(m):
place = paddle.CPUPlace() self.build_model()
else: self.run_model(m)
place = paddle.IPUPlace() self.check()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
class TestCase1(TestBase): class TestCase1(TestBase):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册