未验证 提交 063a3509 编写于 作者: A Allen Guo 提交者: GitHub

update UTs 1 (#42517)

上级 63d4d05a
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import shutil
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
def set_atol(self):
self.atol = 1e-6
self.rtol = 1e-5
self.atol_fp16 = 1e-2
self.rtol_fp16 = 1e-3
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {"in_0": data.astype(np.float32)}
self.feed_fp16 = {"in_0": data.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def set_op_attrs(self):
self.attrs = {}
self.attrs['steps'] = 100
self.attrs['save_at_step'] = 20
self.attrs['is_training'] = True
self.attrs['opt_type'] = 'sgd'
self.attrs['path'] = 'model'
self.attrs['model_name'] = 'test'
def _test_save(self):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
generator = paddle.fluid.unique_name.UniqueNameGenerator()
self.full_name = '/'.join(
[self.attrs['path'], self.attrs['model_name']])
with paddle.fluid.unique_name.guard(generator):
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
scale = paddle.fluid.layers.scale(
x, scale=1.0, bias=0.0, bias_after_scale=True)
conv = paddle.static.nn.conv2d(
scale,
num_filters=3,
filter_size=3,
bias_attr=False,
name='conv2d')
loss = paddle.mean(conv)
if self.attrs['is_training']:
if self.attrs['opt_type'] == 'sgd':
sgd = paddle.optimizer.SGD(learning_rate=1e-2)
sgd.minimize(loss)
elif self.attrs['opt_type'] == 'adam':
adam = paddle.optimizer.Adam(learning_rate=1e-2)
adam.minimize(loss)
elif self.attrs['opt_type'] == 'lamb':
lamb = paddle.optimizer.Lamb(learning_rate=1e-2)
lamb.minimize(loss)
fetch_list = [loss.name]
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=True)
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog, ipu_strategy=ipu_strategy).compile(
self.feed_list, fetch_list)
for _ in range(self.attrs['steps']):
exe.run(program, feed=self.feed_fp16, fetch_list=fetch_list)
paddle.static.save_inference_model(
self.full_name, x, loss, exe, program=program.org_program)
def _test_load(self, run_ipu):
if run_ipu:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
[inference_program, feed_target_names, fetch_targets] = (
paddle.static.load_inference_model(self.full_name, exe))
if run_ipu:
feed_list = feed_target_names
fetch_list = [fetch_targets[0].name]
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=False)
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
inference_program,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = inference_program
feed = self.feed_fp16 if run_ipu else self.feed_fp32
result = []
for i in range(10):
feed["in_0"] += np.array([1.1 * i]).astype(feed["in_0"].dtype)
out = exe.run(program, feed=feed, fetch_list=[fetch_targets])
result.append(out)
return np.array(result)
def test_base(self):
self._test_save()
cpu_res = self._test_load(False)
ipu_res = self._test_load(True).astype(np.float32)
self.assertTrue(
np.allclose(
cpu_res, ipu_res, rtol=self.rtol_fp16, atol=self.atol_fp16))
shutil.rmtree(self.attrs['path'], True)
if __name__ == "__main__":
unittest.main()
......@@ -16,9 +16,8 @@ import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -31,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_atol(self):
self.atol = 5e-6
self.rtol = 1e-5
......@@ -54,80 +49,32 @@ class TestBase(IPUOpTest):
def set_op_attrs(self):
self.attrs = {}
def _test_base(self, exec_mode):
scope = fluid.core.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with fluid.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
conv1 = paddle.static.nn.conv2d(
x, num_filters=3, filter_size=3, bias_attr=False)
conv2 = paddle.static.nn.conv2d(
x, num_filters=3, filter_size=3, bias_attr=False)
add1 = conv1 + conv2
conv3 = paddle.static.nn.conv2d(
add1, num_filters=8, filter_size=8, bias_attr=False)
out = paddle.fluid.layers.relu(conv3, **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
class TestIntInput(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
conv1 = paddle.static.nn.conv2d(
x, num_filters=3, filter_size=3, bias_attr=False)
conv2 = paddle.static.nn.conv2d(
x, num_filters=3, filter_size=3, bias_attr=False)
add1 = conv1 + conv2
conv3 = paddle.static.nn.conv2d(
add1, num_filters=8, filter_size=8, bias_attr=False)
out = paddle.fluid.layers.relu(conv3, **self.attrs)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestIntInput(TestBase):
def set_data_feed(self):
embedding = np.random.uniform(size=[10, 20])
indice = np.array([1, 3, 5]).astype(np.int32)
......@@ -140,71 +87,14 @@ class TestIntInput(IPUOpTest):
"indice": indice,
}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def set_op_attrs(self):
self.attrs = {}
def _test_base(self, exec_mode):
scope = fluid.core.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with fluid.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
y = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype='int32')
out = paddle.fluid.layers.gather(x, index=y)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return np.array(result)
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
y = paddle.static.data(
name=self.feed_list[1], shape=self.feed_shape[1], dtype='int32')
out = paddle.fluid.layers.gather(x, index=y)
self.fetch_list = [out.name]
if __name__ == "__main__":
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
x = np.random.uniform(size=[10, 20])
y = np.array([1, 3, 5])
......@@ -47,63 +43,24 @@ class TestBase(IPUOpTest):
def set_op_attrs(self):
self.attrs = {}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
y = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype='int32')
out = paddle.fluid.layers.gather(x, index=y, **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
y = paddle.static.data(
name=self.feed_list[1], shape=self.feed_shape[1], dtype='int32')
out = paddle.fluid.layers.gather(x, index=y, **self.attrs)
self.fetch_list = [out.name]
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def test(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase):
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {'in_0': data.astype(np.float32)}
......@@ -46,59 +42,22 @@ class TestBase(IPUOpTest):
def set_op_attrs(self):
self.attrs = {"approximate": False}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
out = paddle.fluid.layers.gelu(x, **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
out = paddle.fluid.layers.gelu(x, **self.attrs)
self.fetch_list = [out.name]
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def test(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase):
......
......@@ -28,19 +28,26 @@ class TestBase(IPUOpTest):
self.set_data_feed()
self.set_feed_attr()
self.set_attrs()
self.set_training()
@property
def fp16_enabled(self):
return False
def set_atol(self):
super().set_atol()
self.atol = 1e-6
self.rtol = 1e-5
def set_data_feed(self):
self.feed = {
self.feed_fp32 = {
"image": np.random.uniform(size=[1, 3, 10, 10]).astype('float32'),
}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()]
self.feed_list = list(self.feed.keys())
self.feed_dtype = [x.dtype for x in self.feed.values()]
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def set_attrs(self):
self.attrs = {
......@@ -48,76 +55,48 @@ class TestBase(IPUOpTest):
"weight_decay": 0.0,
}
def _test_optimizer(self, run_ipu=True):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
np.random.seed(self.SEED)
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
image = paddle.static.data(
name='image', shape=[1, 3, 10, 10], dtype='float32')
conv1 = paddle.static.nn.conv2d(
image, num_filters=3, filter_size=3, bias_attr=False)
loss = paddle.mean(conv1)
weight_decay = self.attrs['weight_decay']
# Only support ClipGradByGlobalNorm
clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0)
if self.attrs['optimizer'] == 'sgd':
opt = paddle.optimizer.SGD(learning_rate=1e-1,
weight_decay=weight_decay,
grad_clip=clip)
elif self.attrs['optimizer'] == 'adam':
opt = paddle.optimizer.Adam(
learning_rate=1e-1,
weight_decay=weight_decay,
grad_clip=clip)
elif self.attrs['optimizer'] == 'lamb':
opt = paddle.optimizer.Lamb(
learning_rate=1e-1,
lamb_weight_decay=weight_decay,
grad_clip=clip)
else:
raise ValueError(
f"Not supported optimizer {self.attrs['optimizer']} for test"
)
opt.minimize(loss)
if run_ipu:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if run_ipu:
feed_list = [image.name]
fetch_list = [loss.name]
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=True)
program = paddle.static.IpuCompiledProgram(
main_prog, ipu_strategy=ipu_strategy).compile(feed_list,
fetch_list)
else:
program = main_prog
result = []
for epoch in range(100):
loss_res = exe.run(program, feed=self.feed, fetch_list=[loss])
result.append(loss_res)
return np.array(result)
def set_training(self):
self.is_training = True
self.epoch = 100
@IPUOpTest.static_graph
def build_model(self):
image = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
conv1 = paddle.static.nn.conv2d(
image, num_filters=3, filter_size=3, bias_attr=False)
loss = paddle.mean(conv1)
self.fetch_list = [loss.name]
weight_decay = self.attrs['weight_decay']
# Only support ClipGradByGlobalNorm
clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0)
if self.attrs['optimizer'] == 'sgd':
opt = paddle.optimizer.SGD(learning_rate=1e-1,
weight_decay=weight_decay,
grad_clip=clip)
elif self.attrs['optimizer'] == 'adam':
opt = paddle.optimizer.Adam(
learning_rate=1e-1, weight_decay=weight_decay, grad_clip=clip)
elif self.attrs['optimizer'] == 'lamb':
opt = paddle.optimizer.Lamb(
learning_rate=1e-1,
lamb_weight_decay=weight_decay,
grad_clip=clip)
else:
raise ValueError(
f"Not supported optimizer {self.attrs['optimizer']} for test")
opt.minimize(loss)
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def test(self):
# cpu and ipu dimenstion mismatch, cpu:(100, 1, 1), ipu:(100, 1)
ipu_loss = self._test_optimizer(True).flatten()
cpu_loss = self._test_optimizer(False).flatten()
self.assertTrue(np.allclose(ipu_loss, cpu_loss, atol=self.atol))
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestAdam(TestBase):
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -28,73 +28,30 @@ class TestGreaterThan(IPUOpTest):
self.set_training()
self.set_test_op()
@property
def fp16_enabled(self):
return True
def set_test_op(self):
self.op = paddle.fluid.layers.greater_than
def set_op_attrs(self):
self.attrs = {}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
y = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype='float32')
out = self.op(x, y, **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
y = paddle.static.data(
name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32')
out = self.op(x, y, **self.attrs)
self.fetch_list = [out.name]
def run_test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten().astype(np.int32)
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
self.check(output_dict)
def run_test_base(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_atol(self):
self.atol = 3e-6
self.rtol = 1e-6
......@@ -56,86 +52,36 @@ class TestBase(IPUOpTest):
"data_layout": 'NCHW',
}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
if self.is_training:
ch = self.feed_shape[0][1]
conv1 = paddle.static.nn.conv2d(
x, num_filters=ch, filter_size=3, bias_attr=False)
scale = paddle.ParamAttr(trainable=True)
bias = paddle.ParamAttr(trainable=True)
out = paddle.fluid.layers.nn.group_norm(
conv1, param_attr=scale, bias_attr=bias, **self.attrs)
loss = paddle.mean(out)
adam = paddle.optimizer.Adam(learning_rate=1e-2)
adam.minimize(loss)
else:
out = paddle.fluid.layers.nn.group_norm(
x, param_attr=True, bias_attr=True, **self.attrs)
if self.is_training:
fetch_list = [loss.name]
else:
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
if self.is_training:
result = []
for _ in range(self.epoch):
loss_res = exe.run(program,
feed=feed,
fetch_list=fetch_list)
result.append(loss_res[0])
return np.array(result)
else:
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
if mode > ExecutionMode.IPU_FP32 and self.is_training:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
if self.is_training:
ch = self.feed_shape[0][1]
conv1 = paddle.static.nn.conv2d(
x, num_filters=ch, filter_size=3, bias_attr=False)
scale = paddle.ParamAttr(trainable=True)
bias = paddle.ParamAttr(trainable=True)
out = paddle.fluid.layers.nn.group_norm(
conv1, param_attr=scale, bias_attr=bias, **self.attrs)
loss = paddle.mean(out)
adam = paddle.optimizer.Adam(learning_rate=1e-2)
adam.minimize(loss)
self.fetch_list = [loss.name]
else:
out = paddle.fluid.layers.nn.group_norm(
x, param_attr=True, bias_attr=True, **self.attrs)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase):
......@@ -150,7 +96,7 @@ class TestCase1(TestBase):
class TestTrainCase1(TestBase):
def set_training(self):
self.is_training = True
self.epoch = 10
self.epoch = 20
@unittest.skipIf(IPUOpTest.use_ipumodel(), "skip for ipumodel")
......@@ -170,7 +116,7 @@ class TestTrainCase2(TestBase):
def set_training(self):
self.is_training = True
self.epoch = 10
self.epoch = 20
if __name__ == "__main__":
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_atol(self):
self.atol = 1e-6
self.rtol = 1e-5
......@@ -52,86 +48,37 @@ class TestBase(IPUOpTest):
def set_op_attrs(self):
self.attrs = {"epsilon": 1e-05}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
if self.is_training:
ch = self.feed_shape[0][1]
conv1 = paddle.static.nn.conv2d(
x, num_filters=ch, filter_size=3, bias_attr=False)
scale = paddle.ParamAttr(trainable=True)
bias = paddle.ParamAttr(trainable=True)
out = paddle.fluid.layers.nn.instance_norm(
conv1, param_attr=scale, bias_attr=bias, **self.attrs)
loss = paddle.mean(out)
adam = paddle.optimizer.Adam(learning_rate=1e-2)
adam.minimize(loss)
else:
out = paddle.fluid.layers.nn.instance_norm(
x, param_attr=True, bias_attr=True, **self.attrs)
if self.is_training:
fetch_list = [loss.name]
else:
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
if self.is_training:
result = []
for _ in range(self.epoch):
loss_res = exe.run(program,
feed=feed,
fetch_list=fetch_list)
result.append(loss_res)
return np.array(result)
else:
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
if self.is_training:
ch = self.feed_shape[0][1]
conv1 = paddle.static.nn.conv2d(
x, num_filters=ch, filter_size=3, bias_attr=False)
scale = paddle.ParamAttr(trainable=True)
bias = paddle.ParamAttr(trainable=True)
out = paddle.fluid.layers.nn.instance_norm(
conv1, param_attr=scale, bias_attr=bias, **self.attrs)
loss = paddle.mean(out)
adam = paddle.optimizer.Adam(learning_rate=1e-2)
adam.minimize(loss)
self.fetch_list = [loss.name]
else:
out = paddle.fluid.layers.nn.instance_norm(
x, param_attr=True, bias_attr=True, **self.attrs)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def test(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
if mode > ExecutionMode.IPU_FP32 and self.is_training:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestTrainCase1(TestBase):
......
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestIpuShard(unittest.TestCase):
def _test(self):
# build graph
a = paddle.static.data(name='data', shape=[None, 1], dtype='int32')
b = a + 2 # scale : scale * x + bias, ipu_index : no
with paddle.static.ipu_shard_guard(index=1):
c = b + 1 # scale, ipu_index : 1
with paddle.static.ipu_shard_guard(index=2):
d = c * 2 # scale, ipu_index : 2
with paddle.static.ipu_shard_guard(index=3):
e = d + 3 # scale, ipu_index : 3
with paddle.static.ipu_shard_guard(index=1):
e = e + 3 # scale, ipu_index : 1
with paddle.static.ipu_shard_guard(index=2):
e = e + 3 # scale, ipu_index : 2
with paddle.static.ipu_shard_guard(index=1):
f = paddle.tensor.pow(e, 2.0) # pow, ipu_index : 1
with paddle.static.ipu_shard_guard(index=2):
g = f - 1 # scale, ipu_index : 2
h = g + 1 # scale, ipu_index : no
ipu_index_list = []
main_prog = paddle.static.default_main_program()
for op in main_prog.global_block().ops:
if op.desc.has_attr("ipu_index"):
ipu_index_list.append(op.desc.attr("ipu_index"))
return ipu_index_list
def test_ipu_shard(self):
ipu_index_list = self._test()
expected_ipu_index_list = [1, 2, 3, 1, 2, 1, 2]
self.assertTrue(
np.allclose(
ipu_index_list, expected_ipu_index_list, atol=0))
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestIpuPipeline(unittest.TestCase):
def _test(self):
# build graph
a = paddle.static.data(name='data', shape=[None, 1], dtype='int32')
b = a + 2 # scale : scale * x + bias, ipu_stage : no
with paddle.static.ipu_shard_guard(stage=1):
c = b + 1 # scale, ipu_stage : 1
with paddle.static.ipu_shard_guard(stage=2):
d = c * 2 # scale, ipu_stage : 2
with paddle.static.ipu_shard_guard(stage=3):
e = d + 3 # scale, ipu_stage : 3
with paddle.static.ipu_shard_guard(stage=1):
e = e + 3 # scale, ipu_stage : 1
with paddle.static.ipu_shard_guard(stage=2):
e = e + 3 # scale, ipu_stage : 2
with paddle.static.ipu_shard_guard(stage=1):
f = paddle.tensor.pow(e, 2.0) # pow, ipu_stage : 1
with paddle.static.ipu_shard_guard(stage=2):
g = f - 1 # scale, ipu_stage : 2
h = g + 1 # scale, ipu_stage : no
ipu_index_list = []
main_prog = paddle.static.default_main_program()
for op in main_prog.global_block().ops:
if op.desc.has_attr("ipu_stage"):
ipu_index_list.append(op.desc.attr("ipu_stage"))
return ipu_index_list
def test_ipu_shard(self):
ipu_index_list = self._test()
expected_ipu_index_list = [1, 2, 3, 1, 2, 1, 2]
self.assertTrue(
np.allclose(
ipu_index_list, expected_ipu_index_list, atol=0))
if __name__ == "__main__":
unittest.main()
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_atol(self):
self.atol = 1e-6
self.rtol = 1e-5
......@@ -59,89 +55,48 @@ class TestBase(IPUOpTest):
}
self.optimizer = None
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
if self.is_training:
ch = self.feed_shape[0][1]
conv1 = paddle.static.nn.conv2d(
x, num_filters=ch, filter_size=3, bias_attr=False)
scale = paddle.ParamAttr(trainable=True)
bias = paddle.ParamAttr(trainable=True)
out = paddle.fluid.layers.nn.layer_norm(
conv1, param_attr=scale, bias_attr=bias, **self.attrs)
else:
scale = self.attrs['scale']
bias = self.attrs['shift']
out = paddle.fluid.layers.nn.layer_norm(
x, param_attr=scale, bias_attr=bias, **self.attrs)
loss = paddle.mean(out)
fetch_list = [loss.name]
if self.is_training:
optimizer = None
if self.optimizer == 'sgd':
optimizer = paddle.optimizer.SGD(learning_rate=1e-2)
elif self.optimizer == 'adam':
optimizer = paddle.optimizer.Adam(learning_rate=1e-2)
elif self.optimizer == 'lamb':
optimizer = paddle.optimizer.Lamb(
learning_rate=1e-2, lamb_weight_decay=0.0)
if optimizer is not None:
optimizer.minimize(loss)
if exec_mode:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
if self.is_training:
result = []
for _ in range(self.epoch):
loss_res = exe.run(program,
feed=self.feed_fp32,
fetch_list=fetch_list)
result.append(loss_res[0])
return np.array(result)
else:
result = exe.run(program,
feed=self.feed_fp32,
fetch_list=fetch_list)
return result[0]
def test_base(self):
res0 = self._test_base(False)
res1 = self._test_base(True)
self.assertTrue(
np.allclose(
res0.flatten(), res1.flatten(), atol=self.atol))
self.assertTrue(res0.shape == res1.shape)
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
if self.is_training:
ch = self.feed_shape[0][1]
conv1 = paddle.static.nn.conv2d(
x, num_filters=ch, filter_size=3, bias_attr=False)
scale = paddle.ParamAttr(trainable=True)
bias = paddle.ParamAttr(trainable=True)
out = paddle.fluid.layers.nn.layer_norm(
conv1, param_attr=scale, bias_attr=bias, **self.attrs)
loss = paddle.mean(out)
self.fetch_list = [loss.name]
else:
scale = self.attrs['scale']
bias = self.attrs['shift']
out = paddle.fluid.layers.nn.layer_norm(
x, param_attr=scale, bias_attr=bias, **self.attrs)
self.fetch_list = [out.name]
if self.is_training:
optimizer = None
if self.optimizer == 'sgd':
optimizer = paddle.optimizer.SGD(learning_rate=1e-2)
elif self.optimizer == 'adam':
optimizer = paddle.optimizer.Adam(learning_rate=1e-2)
elif self.optimizer == 'lamb':
optimizer = paddle.optimizer.Lamb(
learning_rate=1e-2, lamb_weight_decay=0.0)
if optimizer is not None:
optimizer.minimize(loss)
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
@unittest.skip('raise error')
......@@ -188,33 +143,17 @@ class TestTrainCase1(TestBase):
self.optimizer = 'sgd'
def set_atol(self):
super().set_atol()
self.atol = 1e-6
def set_training(self):
self.is_training = True
self.epoch = 10
class TestTrainCase2(TestBase):
def set_atol(self):
self.atol = 5e-4
def set_op_attrs(self):
self.attrs = {
"scale": True,
"shift": True,
"begin_norm_axis": 2,
"epsilon": 1e-05
}
self.optimizer = 'adam'
def set_training(self):
self.is_training = True
self.epoch = 10
self.epoch = 20
class TestTrainCase3(TestBase):
def set_atol(self):
super().set_atol()
self.atol = 5e-3
def set_op_attrs(self):
......@@ -228,7 +167,7 @@ class TestTrainCase3(TestBase):
def set_training(self):
self.is_training = True
self.epoch = 10
self.epoch = 20
# not support `layer_norm(x, param_attr=False, bias_attr=False, **self.attrs)`
......
......@@ -18,7 +18,7 @@ import numpy as np
import paddle
import paddle.nn.functional as F
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -31,10 +31,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {'in_0': data.astype(np.float32)}
......@@ -49,59 +45,22 @@ class TestBase(IPUOpTest):
def set_op_attrs(self):
self.attrs = {"axis": -1}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
out = F.log_softmax(x, **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
out = F.log_softmax(x, **self.attrs)
self.fetch_list = [out.name]
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def test(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase):
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -29,68 +29,32 @@ class TestBase(IPUOpTest):
self.set_data_feed()
self.set_feed_attr()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
data = np.random.uniform(size=[2, 20, 30528])
self.feed = {"in_0": data.astype('bool')}
self.feed_fp32 = {"in_0": data.astype('bool')}
self.feed_fp16 = {"in_0": data.astype('bool')}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()]
self.feed_list = list(self.feed.keys())
self.feed_dtype = [x.dtype for x in self.feed.values()]
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype="bool")
out = paddle.fluid.layers.logical_not(x)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).astype(np.int32)
self.check(output_dict, check_shape=True)
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype="bool")
out = paddle.fluid.layers.logical_not(x)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
if __name__ == "__main__":
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -38,69 +38,38 @@ class TestLogicalAnd(IPUOpTest):
def set_op_attrs(self):
self.attrs = {}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype=self.feed_dtype[0])
y = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype=self.feed_dtype[1])
out = self.op(x, y, **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
return result[0]
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype=self.feed_dtype[0])
y = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype=self.feed_dtype[1])
out = self.op(x, y, **self.attrs)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def run_test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).astype(np.int32)
self.check(output_dict, check_shape=True)
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()]
self.feed_list = list(self.feed.keys())
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = ['bool', 'bool']
def set_data_feed0(self):
x = np.random.choice([True, False], size=(1, 3, 5, 5))
y = np.random.choice([True, False], size=(1, 3, 5, 5))
self.feed = {
self.feed_fp32 = {
"x": x.astype('bool'),
"y": y.astype('bool'),
}
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -30,19 +30,15 @@ class TestBase(IPUOpTest):
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
data = np.array([[[1], [3]], [[2], [4]], [[4], [127]]])
self.feed_cpu = {"x": data.astype(np.int64)}
self.feed_ipu = {"x": data.astype(np.int32)}
self.feed_fp32 = {"x": data.astype(np.int64)}
self.feed_fp16 = {"x": data.astype(np.int32)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_cpu.values()]
self.feed_list = list(self.feed_cpu.keys())
self.feed_dtype = [x.dtype for x in self.feed_cpu.values()]
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def set_op_attrs(self):
self.attrs = {
......@@ -53,76 +49,30 @@ class TestBase(IPUOpTest):
"dtype": 'float32'
}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='int64')
out = paddle.fluid.layers.embedding(x, **self.attrs)
if self.is_training:
loss = paddle.mean(out)
adam = paddle.optimizer.Adam(learning_rate=1e-2)
adam.minimize(loss)
fetch_list = [loss.name]
else:
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_cpu
if exec_mode > ExecutionMode.CPU_FP32:
feed = self.feed_ipu
if self.is_training:
result = []
for _ in range(self.epoch):
loss_res = exe.run(program,
feed=feed,
fetch_list=fetch_list)
result.append(loss_res[0])
return np.array(result)
else:
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='int64')
out = paddle.fluid.layers.embedding(x, **self.attrs)
if self.is_training:
loss = paddle.mean(out)
adam = paddle.optimizer.Adam(learning_rate=1e-2)
adam.minimize(loss)
self.fetch_list = [loss.name]
else:
self.fetch_list = [out.name]
def run_model(self, exec_mode):
if self.is_ipu_mode(exec_mode):
self.feed_fp32['x'] = self.feed_fp32['x'].astype(np.int32)
self.run_op_test(exec_mode)
def test(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and (not self.fp16_enabled or
self.is_training):
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestTrainCase1(TestBase):
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -30,19 +30,15 @@ class TestBase(IPUOpTest):
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
x = np.array([[[1], [3]], [[2], [4]], [[4], [127]]])
self.feed_cpu = {"x": x.astype(np.int64)}
self.feed_ipu = {"x": x.astype(np.int32)}
self.feed_fp32 = {"x": x.astype(np.int64)}
self.feed_fp16 = {"x": x.astype(np.int32)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_cpu.values()]
self.feed_list = list(self.feed_cpu.keys())
self.feed_dtype = [x.dtype for x in self.feed_cpu.values()]
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def set_op_attrs(self):
self.attrs = {
......@@ -53,76 +49,31 @@ class TestBase(IPUOpTest):
"weight_attr": None
}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='int64')
embedding = paddle.nn.Embedding(**self.attrs)
out = embedding(x)
if self.is_training:
loss = paddle.mean(out)
adam = paddle.optimizer.Adam(learning_rate=1e-2)
adam.minimize(loss)
fetch_list = [loss.name]
else:
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_cpu
if exec_mode > ExecutionMode.CPU_FP32:
feed = self.feed_ipu
if self.is_training:
result = []
for _ in range(self.epoch):
loss_res = exe.run(program,
feed=feed,
fetch_list=fetch_list)
result.append(loss_res[0])
return np.array(result)
else:
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='int64')
embedding = paddle.nn.Embedding(**self.attrs)
out = embedding(x)
if self.is_training:
loss = paddle.mean(out)
adam = paddle.optimizer.Adam(learning_rate=1e-2)
adam.minimize(loss)
self.fetch_list = [loss.name]
else:
self.fetch_list = [out.name]
def run_model(self, exec_mode):
if self.is_ipu_mode(exec_mode):
self.feed_fp32['x'] = self.feed_fp32['x'].astype(np.int32)
self.run_op_test(exec_mode)
def test(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and (not self.fp16_enabled or
self.is_training):
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestTrainCase1(TestBase):
......
......@@ -12,89 +12,75 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import unittest
import sys
import paddle
import paddle.fluid as fluid
import paddle.static
from paddle.optimizer.lr import LRScheduler
paddle.enable_static()
SEED = 2021
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
class LR_New(LRScheduler):
def __init__(self, learning_rate=1.0, last_epoch=-1, verbose=False):
def __init__(self, learning_rate=1e-5, last_epoch=-1, verbose=False):
super(LR_New, self).__init__(learning_rate, last_epoch, verbose)
def get_lr(self):
self.base_lr = self.base_lr + 1
self.base_lr = self.base_lr + 1e-4
self.last_epoch = self.last_epoch + 1
return self.base_lr
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestConvNet(unittest.TestCase):
def _test(self, run_ipu=True):
scope = fluid.core.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = SEED
startup_prog.random_seed = SEED
np.random.seed(SEED)
np_image = np.random.rand(1, 3, 10, 10).astype(np.float32)
with fluid.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
image = paddle.static.data(
name='image', shape=[1, 3, 10, 10], dtype='float32')
conv1 = paddle.static.nn.conv2d(
image, num_filters=3, filter_size=3, bias_attr=False)
loss = paddle.mean(conv1)
sgd = paddle.optimizer.SGD(learning_rate=LR_New())
sgd.minimize(loss)
if run_ipu:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if run_ipu:
feed_list = [image.name]
fetch_list = [loss.name]
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=True)
program = paddle.static.IpuCompiledProgram(
main_prog, ipu_strategy=ipu_strategy).compile(feed_list,
fetch_list)
else:
program = main_prog
result = []
for epoch in range(100):
if hasattr(program, "lr_sheduler"):
program.lr_sheduler.step()
loss_res = exe.run(program,
feed={image.name: np_image},
fetch_list=[loss])
result.append(loss_res)
return np.array(result)
class TestConvNet(IPUOpTest):
@IPUOpTest.static_graph
def build_model(self):
image = paddle.static.data(
name='image', shape=[1, 3, 10, 10], dtype='float32')
conv1 = paddle.static.nn.conv2d(
image, num_filters=3, filter_size=3, bias_attr=False)
loss = paddle.mean(conv1)
opt = paddle.optimizer.Lamb(learning_rate=LR_New())
opt.minimize(loss)
self.feed_list = [image.name]
self.fetch_list = [loss.name]
def run_model(self, run_ipu=True):
self.build_model()
if run_ipu:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(self.startup_prog)
if run_ipu:
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=True)
program = paddle.static.IpuCompiledProgram(
self.main_prog, ipu_strategy=ipu_strategy).compile(
self.feed_list, self.fetch_list)
else:
program = self.main_prog
result = []
for _ in range(100):
if hasattr(program, "lr_sheduler"):
program.lr_sheduler.step()
loss_res = exe.run(program,
feed=self.feed,
fetch_list=self.fetch_list)
result.append(loss_res)
return np.array(result)
def test_training(self):
data = np.random.rand(1, 3, 10, 10).astype(np.float32)
self.feed = {'image': data}
# cpu and ipu dimenstion mismatch, cpu:(100, 1, 1), ipu:(100, 1)
ipu_loss = self._test(True).flatten()
cpu_loss = self._test(False).flatten()
ipu_loss = self.run_model(True).flatten()
cpu_loss = self.run_model(False).flatten()
self.assertTrue(np.allclose(ipu_loss, cpu_loss, atol=1e-4))
self.assertTrue(np.allclose(ipu_loss, cpu_loss, atol=1e-10))
if __name__ == "__main__":
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
x = np.random.uniform(size=[20, 30])
y = np.random.uniform(size=[30, 20])
......@@ -52,63 +48,25 @@ class TestBase(IPUOpTest):
"alpha": 1.0,
}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
y = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype='float32')
out = paddle.fluid.layers.matmul(x, y, **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
y = paddle.static.data(
name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32')
out = paddle.fluid.layers.matmul(x, y, **self.attrs)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase):
......
......@@ -26,7 +26,7 @@ def set_serialize_factor(serialize_factor):
op._set_attr('serialize_factor', serialize_factor)
@unittest.skipIf(not paddle.is_compiled_with_ipu() or IPUOpTest.use_ipumodel(),
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestBase(IPUOpTest):
def setUp(self):
......@@ -38,8 +38,8 @@ class TestBase(IPUOpTest):
def set_data_feed(self):
self.feed = {
"x": np.random.uniform(size=[2048, 3072]).astype('float32'),
"y": np.random.uniform(size=[3072, 2048]).astype('float32'),
"x": np.random.uniform(size=[16, 32]).astype('float32'),
"y": np.random.uniform(size=[32, 16]).astype('float32'),
}
def set_feed_attr(self):
......@@ -50,58 +50,47 @@ class TestBase(IPUOpTest):
def set_op_attrs(self):
self.attrs = {"transpose_x": False, "transpose_y": False}
def _test_base(self, run_ipu=True):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype=self.feed_dtype[0])
y = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype=self.feed_dtype[1])
# decrator maybe the best choice, but need to modify api
out = paddle.matmul(x, y, **self.attrs)
set_serialize_factor(4)
fetch_list = [out.name]
if run_ipu:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if run_ipu:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
return result[0]
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype=self.feed_dtype[0])
y = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype=self.feed_dtype[1])
# decrator maybe the best choice, but need to modify api
out = paddle.matmul(x, y, **self.attrs)
set_serialize_factor(4)
self.fetch_list = [out.name]
def run_model(self, run_ipu):
self.build_model()
if run_ipu:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(self.startup_prog)
if run_ipu:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
program = paddle.static.IpuCompiledProgram(
self.main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, self.fetch_list)
else:
program = self.main_prog
result = exe.run(program, feed=self.feed, fetch_list=self.fetch_list)
return result[0]
def test_base(self):
res0 = self._test_base(False)
res1 = self._test_base(True)
res0 = self.run_model(False)
res1 = self.run_model(True)
self.assertTrue(
np.allclose(
res0.flatten(), res1.flatten(), atol=self.atol))
self.assertTrue(res0.shape == res1.shape)
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
x = np.random.uniform(size=[2, 3])
y = np.random.uniform(size=[3, 2])
......@@ -48,63 +44,24 @@ class TestBase(IPUOpTest):
def set_op_attrs(self):
self.attrs = {"transpose_x": False, "transpose_y": False}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
y = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype='float32')
out = paddle.matmul(x, y, **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
y = paddle.static.data(
name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32')
out = paddle.matmul(x, y, **self.attrs)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册