未验证 提交 001dab0b 编写于 作者: A Allen Guo 提交者: GitHub

update UTs 2 (#42518)

上级 063a3509
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {"in_0": data.astype(np.float32)}
......@@ -46,59 +42,22 @@ class TestBase(IPUOpTest):
def set_op_attrs(self):
self.attrs = {}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
out = paddle.fluid.layers.mean(x)
self.fetch_list = [out.name]
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
self.check(output_dict)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
if __name__ == "__main__":
......
......@@ -18,7 +18,7 @@ import numpy as np
import paddle
import paddle.static
import paddle.nn.functional as F
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionModeFull
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -28,10 +28,7 @@ class TestBase(IPUOpTest):
self.set_atol()
self.set_data_feed()
self.set_feed_attr()
@property
def fp16_enabled(self):
return True
self.set_attrs()
def set_atol(self):
self.atol = 1e-6
......@@ -42,7 +39,6 @@ class TestBase(IPUOpTest):
def set_data_feed(self):
data = np.random.uniform(size=[1, 10, 27, 27])
self.feed_fp32 = {"in_0": data.astype(np.float32)}
self.feed_fp16 = {"in_0": data.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
......@@ -54,86 +50,126 @@ class TestBase(IPUOpTest):
for var_name in to_fp16_var_names:
assert (block.var(var_name).dtype, paddle.float16)
def _test_base(self, exec_mode):
generator = paddle.fluid.unique_name.UniqueNameGenerator()
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.fluid.unique_name.guard(generator):
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
def set_attrs(self):
self.num_ipus = 1
self.enable_pipelining = False
self.enable_manual_shard = False
self.batches_per_step = 1
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
# using fp32
x = paddle.static.nn.conv2d(
input=x, num_filters=3, filter_size=3)
x = paddle.static.nn.conv2d(input=x, num_filters=3, filter_size=3)
x = paddle.static.nn.batch_norm(x, act='relu')
x = F.max_pool2d(x, kernel_size=2, stride=2)
# using fp16
with paddle.static.amp.fp16_guard():
x = paddle.static.nn.conv2d(
input=x, num_filters=6, filter_size=3)
x = paddle.static.nn.conv2d(input=x, num_filters=6, filter_size=3)
x = paddle.static.nn.batch_norm(x, act='relu')
x = F.max_pool2d(x, kernel_size=2, stride=2)
# using fp32
x = paddle.static.nn.fc(x, size=10)
loss = paddle.mean(x)
fetch_list = [loss.name]
if exec_mode == ExecutionModeFull.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
self.fetch_list = [loss.name]
def run_model(self, exec_mode):
# cast model to fp16
if exec_mode == ExecutionModeFull.IPU_MIXED_PRECISION:
if self.is_fp16_mode(exec_mode):
amp_list = paddle.static.amp.CustomOpLists()
amp_list.unsupported_list = {}
to_fp16_var_names = paddle.static.amp.cast_model_to_fp16(
main_prog, self.amp_list)
self.dtype_check(main_prog, to_fp16_var_names)
self.main_prog, amp_list, use_fp16_guard=True)
self.dtype_check(self.main_prog, to_fp16_var_names)
if self.is_ipu_mode(exec_mode):
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
exe.run(self.startup_prog)
# cast parameters to fp16
if exec_mode == ExecutionModeFull.IPU_MIXED_PRECISION:
if exec_mode == IPUOpTest.ExecutionMode.IPU_FP16:
paddle.static.amp.cast_parameters_to_fp16(
paddle.CPUPlace(),
main_prog,
self.main_prog,
to_fp16_var_names=to_fp16_var_names)
if exec_mode != ExecutionModeFull.CPU_FP32:
if self.is_ipu_mode(exec_mode):
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=False)
if exec_mode == ExecutionModeFull.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
ipu_strategy.set_graph_config(
is_training=False,
num_ipus=self.num_ipus,
enable_manual_shard=self.enable_manual_shard)
ipu_strategy.set_pipelining_config(
enable_pipelining=self.enable_pipelining,
batches_per_step=self.batches_per_step)
program = paddle.static.IpuCompiledProgram(
main_prog, ipu_strategy=ipu_strategy).compile(
self.feed_list, fetch_list)
self.main_prog, ipu_strategy=ipu_strategy).compile(
self.feed_list, self.fetch_list)
else:
program = main_prog
program = self.main_prog
result = exe.run(program,
feed=self.feed_fp32,
fetch_list=self.fetch_list)
self.output_dict[exec_mode] = result[0]
def test(self):
for m in IPUOpTest.ExecutionMode:
self.build_model()
self.run_model(m)
self.check()
class TestPipline(TestBase):
@IPUOpTest.static_graph
def build_model(self, exec_mode):
feed_shape = list(self.feed_shape[0])
if self.is_ipu_mode(exec_mode):
feed_shape[0] = 1
x = paddle.static.data(
name=self.feed_list[0], shape=feed_shape, dtype='float32')
with paddle.static.ipu_shard_guard(index=0, stage=0):
# using fp32
x = paddle.static.nn.conv2d(input=x, num_filters=3, filter_size=3)
x = paddle.static.nn.batch_norm(x, act='relu')
x = F.max_pool2d(x, kernel_size=2, stride=2)
with paddle.static.ipu_shard_guard(index=1, stage=1):
# using fp16
with paddle.static.amp.fp16_guard():
x = paddle.static.nn.conv2d(
input=x, num_filters=6, filter_size=3)
x = paddle.static.nn.batch_norm(x, act='relu')
x = F.max_pool2d(x, kernel_size=2, stride=2)
with paddle.static.ipu_shard_guard(index=2, stage=2):
# using fp32
x = paddle.static.nn.fc(x, size=10)
loss = paddle.mean(x)
self.fetch_list = [loss.name]
def set_data_feed(self):
data = np.random.uniform(size=[3, 10, 27, 27])
self.feed_fp32 = {"in_0": data.astype(np.float32)}
feed = self.feed_fp32
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def set_attrs(self):
self.num_ipus = 3
self.enable_pipelining = True
self.enable_manual_shard = True
self.batches_per_step = 3
def test(self):
output_dict = {}
for mode in ExecutionModeFull:
if mode == ExecutionModeFull.IPU_POPART_FP16:
continue
if mode > ExecutionModeFull.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
for m in IPUOpTest.ExecutionMode:
self.build_model(m)
self.run_model(m)
# skip check results
if __name__ == "__main__":
......
......@@ -18,7 +18,7 @@ import numpy as np
import paddle
import paddle.static
import paddle.nn.functional as F
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionModeFull
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -29,10 +29,7 @@ class TestBase(IPUOpTest):
self.set_training()
self.set_data_feed()
self.set_feed_attr()
@property
def fp16_enabled(self):
return True
self.set_attrs()
def set_atol(self):
self.atol = 2e-6
......@@ -47,44 +44,36 @@ class TestBase(IPUOpTest):
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 28, 28])
self.feed_fp32 = {"in_0": data.astype(np.float32)}
self.feed_fp16 = {"in_0": data.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def set_attrs(self):
self.num_ipus = 1
self.enable_pipelining = False
self.enable_manual_shard = False
self.batches_per_step = 1
def dtype_check(self, program, to_fp16_var_names):
block = program.global_block()
assert len(to_fp16_var_names) > 0
for var_name in to_fp16_var_names:
assert (block.var(var_name).dtype, paddle.float16)
def _test_base(self, exec_mode):
generator = paddle.fluid.unique_name.UniqueNameGenerator()
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.fluid.unique_name.guard(generator):
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
# using fp32
x = paddle.static.nn.conv2d(
input=x, num_filters=3, filter_size=3)
x = paddle.static.nn.conv2d(input=x, num_filters=3, filter_size=3)
x = paddle.static.nn.batch_norm(x, act='relu')
x = F.max_pool2d(x, kernel_size=2, stride=2)
# using fp16
with paddle.static.amp.fp16_guard():
x = paddle.static.nn.conv2d(
input=x, num_filters=6, filter_size=3)
x = paddle.static.nn.conv2d(input=x, num_filters=6, filter_size=3)
x = paddle.static.nn.batch_norm(x, act='relu')
x = F.max_pool2d(x, kernel_size=2, stride=2)
......@@ -94,57 +83,110 @@ class TestBase(IPUOpTest):
# optimizer
optimizer = paddle.optimizer.Adam(learning_rate=1e-2)
optimizer.minimize(loss, startup_prog)
fetch_list = [loss.name]
optimizer.minimize(loss, self.startup_prog)
self.fetch_list = [loss.name]
def run_model(self, exec_mode):
# cast model to fp16
if exec_mode == ExecutionModeFull.IPU_MIXED_PRECISION:
if self.is_fp16_mode(exec_mode):
amp_list = paddle.static.amp.CustomOpLists()
amp_list.unsupported_list = {}
to_fp16_var_names = paddle.static.amp.cast_model_to_fp16(
main_prog, self.amp_list)
self.dtype_check(main_prog, to_fp16_var_names)
self.main_prog, amp_list)
self.dtype_check(self.main_prog, to_fp16_var_names)
if exec_mode == ExecutionModeFull.CPU_FP32:
if self.is_ipu_mode(exec_mode):
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
exe.run(self.startup_prog)
# cast parameters to fp16
if exec_mode == ExecutionModeFull.IPU_MIXED_PRECISION:
if self.is_fp16_mode(exec_mode):
paddle.static.amp.cast_parameters_to_fp16(
paddle.CPUPlace(),
main_prog,
self.main_prog,
to_fp16_var_names=to_fp16_var_names)
if exec_mode != ExecutionModeFull.CPU_FP32:
if self.is_ipu_mode(exec_mode):
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionModeFull.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
ipu_strategy.set_graph_config(
is_training=self.is_training,
num_ipus=self.num_ipus,
enable_manual_shard=self.enable_manual_shard)
ipu_strategy.set_pipelining_config(
enable_pipelining=self.enable_pipelining,
batches_per_step=self.batches_per_step)
program = paddle.static.IpuCompiledProgram(
main_prog, ipu_strategy=ipu_strategy).compile(
self.feed_list, fetch_list)
self.main_prog, ipu_strategy=ipu_strategy).compile(
self.feed_list, self.fetch_list)
else:
program = main_prog
program = self.main_prog
feed = self.feed_fp32
result = []
for i in range(self.epoch):
out = exe.run(program, feed=feed, fetch_list=fetch_list)
for _ in range(self.epoch):
out = exe.run(program,
feed=self.feed_fp32,
fetch_list=self.fetch_list)
result.append(out)
return np.array(result)
def test_base(self):
output_dict = {}
for mode in ExecutionModeFull:
if mode == ExecutionModeFull.IPU_POPART_FP16:
continue
if mode > ExecutionModeFull.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
self.output_dict[exec_mode] = result
def test(self):
for m in IPUOpTest.ExecutionMode:
self.build_model()
self.run_model(m)
self.check()
class TestPipline(TestBase):
@IPUOpTest.static_graph
def build_model(self, exec_mode):
feed_shape = list(self.feed_shape[0])
if self.is_ipu_mode(exec_mode):
feed_shape[0] = 1
x = paddle.static.data(
name=self.feed_list[0], shape=feed_shape, dtype='float32')
with paddle.static.ipu_shard_guard(index=0, stage=0):
# using fp32
x = paddle.static.nn.conv2d(input=x, num_filters=3, filter_size=3)
x = paddle.static.nn.batch_norm(x, act='relu')
x = F.max_pool2d(x, kernel_size=2, stride=2)
with paddle.static.ipu_shard_guard(index=1, stage=1):
# using fp16
with paddle.static.amp.fp16_guard():
x = paddle.static.nn.conv2d(
input=x, num_filters=6, filter_size=3)
x = paddle.static.nn.batch_norm(x, act='relu')
x = F.max_pool2d(x, kernel_size=2, stride=2)
with paddle.static.ipu_shard_guard(index=2, stage=2):
# using fp32
x = paddle.static.nn.fc(x, size=10)
loss = paddle.mean(x)
# optimizer
optimizer = paddle.optimizer.Adam(learning_rate=1e-2)
optimizer.minimize(loss, self.startup_prog)
self.fetch_list = [loss.name]
def set_data_feed(self):
data = np.random.uniform(size=[5, 10, 27, 27])
self.feed_fp32 = {"in_0": data.astype(np.float32)}
def set_attrs(self):
self.num_ipus = 3
self.enable_pipelining = True
self.enable_manual_shard = True
self.batches_per_step = 5
def test(self):
for m in IPUOpTest.ExecutionMode:
self.build_model(m)
self.run_model(m)
# skip check results
if __name__ == "__main__":
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
x = np.random.uniform(size=[2, 5])
y = np.random.uniform(size=[5, 3])
......@@ -51,63 +47,24 @@ class TestBase(IPUOpTest):
"y_num_col_dims": 1,
}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
y = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype='float32')
name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32')
out = paddle.fluid.layers.mul(x, y, **self.attrs)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase):
......
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
def set_data_feed(self):
x = np.ones([1, 10])
y = np.zeros([1, 10])
self.feed_fp32 = {
"x": x.astype(np.float32),
"y": y.astype(np.float32),
}
self.feed_fp16 = {
"x": x.astype(np.float16),
"y": y.astype(np.float16),
}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def set_op_attrs(self):
self.attrs = {}
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
y = paddle.static.data(
name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32')
out = paddle.fluid.layers.not_equal(x, y, **self.attrs)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase):
def set_data_feed(self):
x = np.ones([1, 10])
y = np.ones([1, 10])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
class TestCase2(TestBase):
def set_data_feed(self):
x = np.ones([1, 10])
y = np.arange(0, 10).reshape([1, 10])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestScalar(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
def set_data_feed(self):
x = np.ones([1, 10])
y = 0.5
self.feed_fp32 = {"x": x.astype(np.float32), }
self.feed_fp16 = {"x": x.astype(np.float16), }
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def set_op_attrs(self):
self.attrs = {}
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
out = (x != 0.5)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
if __name__ == "__main__":
unittest.main()
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -30,74 +30,34 @@ class TestBase(IPUOpTest):
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
data1 = np.array([[1], [1], [3], [0]])
self.feed = {'x': data1.astype(np.int32)}
self.feed_fp32 = {'x': data1.astype(np.int32)}
self.feed_fp16 = {'x': data1.astype(np.int32)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()]
self.feed_list = list(self.feed.keys())
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def set_op_attrs(self):
self.attrs = {"depth": 4, "allow_out_of_range": False}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='int32')
name=self.feed_list[0], shape=self.feed_shape[0], dtype='int32')
out = paddle.fluid.layers.one_hot(x, **self.attrs)
self.fetch_list = [out.name]
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if (mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled):
break
output_dict[mode] = self._test_base(mode).flatten()
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
self.check(output_dict)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
@unittest.skip('does not support allow_out_of_range=True')
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -30,74 +30,34 @@ class TestBase(IPUOpTest):
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
data1 = np.array([[1], [1], [3], [0]])
self.feed = {'x': data1.astype(np.int32)}
self.feed_fp32 = {'x': data1.astype(np.int32)}
self.feed_fp16 = {'x': data1.astype(np.int32)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()]
self.feed_list = list(self.feed.keys())
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def set_op_attrs(self):
self.attrs = {"depth": 4, "allow_out_of_range": False}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='int32')
name=self.feed_list[0], shape=self.feed_shape[0], dtype='int32')
out = paddle.fluid.input.one_hot(x, **self.attrs)
self.fetch_list = [out.name]
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if (mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled):
break
output_dict[mode] = self._test_base(mode).flatten()
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
self.check(output_dict)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
@unittest.skip('does not support allow_out_of_range=True')
......
......@@ -12,8 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import unittest
import paddle
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {'in_0': data.astype(np.float32)}
......@@ -56,59 +52,22 @@ class TestBase(IPUOpTest):
"data_format": 'NCHW',
}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
out = paddle.fluid.layers.pool2d(x, **self.attrs)
self.fetch_list = [out.name]
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
self.check(output_dict)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase):
......@@ -180,5 +139,21 @@ class TestCase6(TestBase):
self.attrs['exclusive'] = False
class TestAdaptive(TestBase):
def set_op_attrs(self):
self.attrs = {
"pool_size": 1,
"pool_type": 'avg',
"require_index": False
}
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
out = paddle.fluid.layers.adaptive_pool2d(x, **self.attrs)
self.fetch_list = [out.name]
if __name__ == "__main__":
unittest.main()
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {'in_0': data.astype(np.float32)}
......@@ -56,59 +52,22 @@ class TestBase(IPUOpTest):
"data_format": 'NCHW',
}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
out = paddle.fluid.layers.pool2d(x, **self.attrs)
self.fetch_list = [out.name]
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
self.check(output_dict)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase):
......@@ -179,5 +138,21 @@ class TestCase6(TestBase):
self.attrs['exclusive'] = False
class TestAdaptive(TestBase):
def set_op_attrs(self):
self.attrs = {
"pool_size": 1,
"pool_type": 'max',
"require_index": False
}
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
out = paddle.fluid.layers.adaptive_pool2d(x, **self.attrs)
self.fetch_list = [out.name]
if __name__ == "__main__":
unittest.main()
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 2, 2])
self.feed_fp32 = {"x": data.astype(np.float32)}
......@@ -47,59 +43,22 @@ class TestBase(IPUOpTest):
def set_op_attrs(self):
self.attrs = {"factor": 2.0}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
out = paddle.fluid.layers.pow(x, **self.attrs)
self.fetch_list = [out.name]
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase):
......@@ -119,54 +78,14 @@ class TestCase1(TestBase):
def set_op_attrs(self):
self.attrs = {}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
factor = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype='float32')
name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32')
out = paddle.fluid.layers.pow(x, factor=factor, **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
self.fetch_list = [out.name]
if __name__ == "__main__":
......
......@@ -30,82 +30,48 @@ class TestBase(IPUOpTest):
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return False
def set_data_feed(self):
self.feed = {
"x": np.random.uniform(size=[1, 3, 3, 3]).astype('float32'),
}
data = np.random.uniform(size=[1, 3, 3, 3]).astype('float32')
self.feed_fp32 = {"x": data.astype(np.float32)}
self.feed_fp16 = {"x": data.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()]
self.feed_list = list(self.feed.keys())
self.feed_dtype = [x.dtype for x in self.feed.values()]
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def set_op_attrs(self):
self.attrs = {}
def _test_base(self, run_ipu=True):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype=self.feed_dtype[0])
out = paddle.fluid.layers.conv2d(
x, num_filters=3, filter_size=3)
out = paddle.fluid.layers.conv2d(x, num_filters=3, filter_size=3)
out = paddle.fluid.layers.Print(out, **self.attrs)
if self.is_training:
loss = paddle.mean(out)
adam = paddle.optimizer.Adam(learning_rate=1e-2)
adam.minimize(loss)
fetch_list = [loss.name]
else:
fetch_list = [out.name]
if run_ipu:
place = paddle.IPUPlace()
self.fetch_list = [loss.name]
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if run_ipu:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
self.fetch_list = [out.name]
if self.is_training:
result = []
for _ in range(self.epoch):
loss_res = exe.run(program,
feed=self.feed,
fetch_list=fetch_list)
result.append(loss_res[0])
return np.array(result)
else:
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
return result[0]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def test(self):
res0 = self._test_base(False)
res1 = self._test_base(True)
self.assertTrue(
np.allclose(
res0.flatten(), res1.flatten(), atol=self.atol))
self.assertTrue(res0.shape == res1.shape)
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
class TestCase1(TestBase):
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -28,10 +28,6 @@ class TestMean(IPUOpTest):
self.set_training()
self.set_test_op()
@property
def fp16_enabled(self):
return True
def set_test_op(self):
self.op = paddle.fluid.layers.reduce_mean
......@@ -40,59 +36,22 @@ class TestMean(IPUOpTest):
self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
out = self.op(x, **self.attrs)
self.fetch_list = [out.name]
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def run_test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
def set_data_feed0(self):
data = np.random.uniform(size=[2, 4])
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {"x": data.astype(np.float32)}
......@@ -50,60 +46,23 @@ class TestBase(IPUOpTest):
"inplace": True,
}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
add = paddle.fluid.layers.elementwise_add(x, x)
out = paddle.fluid.layers.reshape(add, **self.attrs)
self.fetch_list = [out.name]
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode)
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
self.check(output_dict, check_shape=True)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase):
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
data = np.random.uniform(size=[2, 4, 6])
self.feed_fp32 = {"in_0": data.astype(np.float32)}
......@@ -48,59 +44,22 @@ class TestBase(IPUOpTest):
self.attrs['shape'] = [6, 8]
self.attrs['inplace'] = False
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
out = paddle.fluid.layers.reshape(x=x, **self.attrs)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode)
self.check(output_dict, check_shape=True)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase):
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -51,59 +51,22 @@ class TestBase(IPUOpTest):
"bias_after_scale": True,
}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
out = paddle.fluid.layers.scale(x, **self.attrs)
self.fetch_list = [out.name]
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase):
......@@ -155,54 +118,14 @@ class TestCase5(TestBase):
"bias_after_scale": True,
}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
y = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype='float32')
name=self.feed_list[1], shape=self.feed_shape[1], dtype='float32')
out = paddle.fluid.layers.scale(x, scale=y, **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
self.fetch_list = [out.name]
if __name__ == "__main__":
......
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
self.set_attrs()
def set_training(self):
self.is_training = True
self.epoch = 100
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 10, 10]).astype('float32')
self.feed_fp32 = {"image": data.astype(np.float32)}
self.feed_fp16 = {"image": data.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def set_attrs(self):
self.attrs = {
"optimizer": 'lamb',
"weight_decay": 0.0,
"scaled_optimizer_state": True
}
@IPUOpTest.static_graph
def build_model(self):
image = paddle.static.data(
name='image', shape=[1, 3, 10, 10], dtype='float32')
conv1 = paddle.static.nn.conv2d(
image, num_filters=3, filter_size=3, bias_attr=False)
loss = paddle.mean(conv1)
weight_decay = self.attrs['weight_decay']
opt = paddle.optimizer.Adam(
learning_rate=1e-1, weight_decay=weight_decay)
if self.attrs['optimizer'] == 'lamb':
opt = paddle.optimizer.Lamb(
learning_rate=1e-1, lamb_weight_decay=weight_decay)
opt.minimize(loss)
self.feed_list = [image.name]
self.fetch_list = [loss.name]
def run_model(self, exec_mode):
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if self.is_ipu_mode(exec_mode):
if "use_no_bias_optimizer" in self.attrs.keys():
ipu_strategy.set_options({
"use_no_bias_optimizer": self.attrs["use_no_bias_optimizer"]
})
if "scaled_optimizer_state" in self.attrs.keys():
ipu_strategy.set_options({
"scaled_optimizer_state":
self.attrs["scaled_optimizer_state"]
})
self.run_op_test(exec_mode, ipu_strategy=ipu_strategy)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestScaledAdam(TestBase):
def set_attrs(self):
self.attrs = {
"optimizer": 'adam',
"weight_decay": 0.0,
"scaled_optimizer_state": True
}
def set_atol(self):
super().set_atol()
self.atol = 1e-5
self.rtol = 1e-5
@unittest.skip('cpu do not support AdamNoBias')
class TestScaledAdamNoBias(TestBase):
def set_attrs(self):
self.attrs = {
"optimizer": 'adam',
"weight_decay": 0.0,
"use_no_bias_optimizer": True,
"scaled_optimizer_state": True
}
@unittest.skip('cpu do not support LambNoBias')
class TestScaledLambNoBias(TestBase):
def set_attrs(self):
self.attrs = {
"optimizer": 'lamb',
"weight_decay": 0.0,
"use_no_bias_optimizer": True,
"scaled_optimizer_state": True
}
if __name__ == "__main__":
unittest.main()
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_atol(self):
self.atol = 3e-6
self.rtol = 1e-5
......@@ -52,20 +48,10 @@ class TestBase(IPUOpTest):
def set_op_attrs(self):
self.attrs = {}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
conv1 = paddle.static.nn.conv2d(
x, num_filters=3, filter_size=3, bias_attr=False)
conv2 = paddle.static.nn.conv2d(
......@@ -74,45 +60,20 @@ class TestBase(IPUOpTest):
conv2, num_filters=3, filter_size=3, bias_attr=False)
conv4 = paddle.static.nn.conv2d(
conv3, num_filters=3, filter_size=3, bias_attr=False)
self.fetch_list = [conv4.name]
fetch_list = [conv4.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
def run_model(self, exec_mode):
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(
is_training=self.is_training, micro_batch_size=2)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
self.run_op_test(exec_mode, ipu_strategy)
def test(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
if __name__ == "__main__":
......
......@@ -17,7 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
......@@ -30,10 +30,6 @@ class TestBase(IPUOpTest):
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
data = np.random.uniform(size=[4, 5, 6])
self.feed_fp32 = {"in_0": data.astype(np.float32)}
......@@ -51,59 +47,22 @@ class TestBase(IPUOpTest):
"ends": [3, 2, 4],
}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
out = paddle.fluid.layers.slice(x, **self.attrs)
self.fetch_list = [out.name]
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode)
self.check(output_dict, check_shape=True)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase):
......@@ -135,54 +94,17 @@ class TestCase2(TestBase):
def set_op_attrs(self):
self.attrs = {"axes": [0, 1, 2]}
def _test_base(self, run_ipu=True):
scope = fluid.core.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with fluid.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
starts = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype='int32')
name=self.feed_list[1], shape=self.feed_shape[1], dtype='int32')
ends = paddle.static.data(
name=self.feed_list[2],
shape=self.feed_shape[2],
dtype='int32')
name=self.feed_list[2], shape=self.feed_shape[2], dtype='int32')
out = paddle.fluid.layers.slice(
x, starts=starts, ends=ends, **self.attrs)
fetch_list = [out.name]
if run_ipu:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if run_ipu:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
return result[0]
def test_base(self):
pass
self.fetch_list = [out.name]
if __name__ == "__main__":
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册