未验证 提交 1fd61106 编写于 作者: A Allen Guo 提交者: GitHub

[IPU] add more UTs 0/N (#44206)

* add authors
Co-authored-by: NAllen Guo <alleng@graphcore.ai>
Co-authored-by: NZhixin Yao <zhixiny@graphcore.ai>
Co-authored-by: NZhaorui Chen <zhaoruic@graphcore.ai>

* squash py changes 0/N
Co-authored-by: NZhixin Yao <zhixiny@graphcore.ai>
Co-authored-by: NZhaorui Chen <zhaoruic@graphcore.ai>
上级 6cd79701
...@@ -635,7 +635,7 @@ class IpuDynamicPatcher(object): ...@@ -635,7 +635,7 @@ class IpuDynamicPatcher(object):
if not isinstance(item, CacheKey): if not isinstance(item, CacheKey):
raise ValueError( raise ValueError(
'type(item) should be CacheKey, but received %s' % 'type(item) should be CacheKey, but received %s' %
type_name(item)) type(item).__name__)
item_id = hash(item) item_id = hash(item)
self._recent_key = item_id self._recent_key = item_id
if item_id not in self._caches or ipu_strategy.need_compile: if item_id not in self._caches or ipu_strategy.need_compile:
......
...@@ -216,42 +216,54 @@ class IPUOpTest(IPUTest): ...@@ -216,42 +216,54 @@ class IPUOpTest(IPUTest):
raise ValueError("output_dict is empty") raise ValueError("output_dict is empty")
cpu_fp32 = output_dict[ExecutionMode.CPU_FP32] cpu_fp32 = output_dict[ExecutionMode.CPU_FP32]
ipu_fp32 = output_dict[ExecutionMode.IPU_FP32] ipu_fp32 = output_dict[ExecutionMode.IPU_FP32]
cpu_fp32 = np.asarray(cpu_fp32).astype(np.float32).flatten() if len(cpu_fp32) != len(ipu_fp32):
ipu_fp32 = np.asarray(ipu_fp32).astype(np.float32).flatten() raise ValueError("different outputs number between ipu and cpu.")
pass_check = np.allclose(ipu_fp32, for cpu_fp32_res, ipu_fp32_res in zip(cpu_fp32, ipu_fp32):
cpu_fp32, cpu_fp32_res = np.asarray(cpu_fp32_res).astype(np.float32).flatten()
rtol=self.rtol, ipu_fp32_res = np.asarray(ipu_fp32_res).astype(np.float32).flatten()
atol=self.atol) pass_check = np.allclose(ipu_fp32_res,
if not pass_check: cpu_fp32_res,
max_atol = np.abs(ipu_fp32 - cpu_fp32).max() rtol=self.rtol,
cpu_fp32_abs = np.abs(cpu_fp32) atol=self.atol)
cpu_fp32_abs[cpu_fp32_abs == 0.0] = 1e-20
max_rtol = (np.abs(ipu_fp32 - cpu_fp32) / cpu_fp32_abs).max()
raise AssertionError(
f"ipu_fp32 check failed. max_atol is {max_atol}, max_rtol is {max_rtol}"
)
if check_shape:
self.assertTrue(cpu_fp32.shape == ipu_fp32.shape)
if ExecutionMode.IPU_FP16 in output_dict.keys():
ipu_fp16 = output_dict[ExecutionMode.IPU_FP16]
ipu_fp16 = np.asarray(ipu_fp16).astype(np.float32).flatten()
pass_check = np.allclose(ipu_fp16,
cpu_fp32,
rtol=self.rtol_fp16,
atol=self.atol_fp16)
if not pass_check: if not pass_check:
max_atol = np.abs(ipu_fp16 - cpu_fp32).max() max_atol = np.abs(ipu_fp32_res - cpu_fp32_res).max()
cpu_fp32_abs = np.abs(cpu_fp32) cpu_fp32_abs = np.abs(cpu_fp32_res)
cpu_fp32_abs[cpu_fp32_abs == 0.0] = 1e-20 cpu_fp32_abs[cpu_fp32_abs == 0.0] = 1e-20
max_rtol = (np.abs(ipu_fp16 - cpu_fp32) / cpu_fp32_abs).max() max_rtol = (np.abs(ipu_fp32_res - cpu_fp32_res) /
cpu_fp32_abs).max()
raise AssertionError( raise AssertionError(
f"ipu_fp16 check failed. max_atol is {max_atol}, max_rtol is {max_rtol}" f"ipu_fp32 check failed. max_atol is {max_atol}, max_rtol is {max_rtol}"
) )
if check_shape: if check_shape:
self.assertTrue(ipu_fp16.shape == cpu_fp32.shape) self.assertTrue(cpu_fp32_res.shape == ipu_fp32_res.shape)
if ExecutionMode.IPU_FP16 in output_dict.keys():
ipu_fp16 = output_dict[ExecutionMode.IPU_FP16]
if len(cpu_fp32) != len(ipu_fp16):
raise ValueError(
"different outputs number between ipu and cpu.")
for cpu_fp32_res, ipu_fp16_res in zip(cpu_fp32, ipu_fp16):
cpu_fp32_res = np.asarray(cpu_fp32_res).astype(
np.float32).flatten()
ipu_fp16_res = np.asarray(ipu_fp16_res).astype(
np.float32).flatten()
pass_check = np.allclose(ipu_fp16_res,
cpu_fp32_res,
rtol=self.rtol_fp16,
atol=self.atol_fp16)
if not pass_check:
max_atol = np.abs(ipu_fp16_res - cpu_fp32_res).max()
cpu_fp32_abs = np.abs(cpu_fp32_res)
cpu_fp32_abs[cpu_fp32_abs == 0.0] = 1e-20
max_rtol = (np.abs(ipu_fp16_res - cpu_fp32_res) /
cpu_fp32_abs).max()
raise AssertionError(
f"ipu_fp16 check failed. max_atol is {max_atol}, max_rtol is {max_rtol}"
)
if check_shape:
self.assertTrue(ipu_fp16_res.shape == cpu_fp32_res.shape)
# Execution Mode # Execution Mode
class ExecutionMode(IntEnum): class ExecutionMode(IntEnum):
......
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return False
def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 32, 32])
self.feed_fp32 = {'data': data.astype(np.float32)}
self.feed_fp16 = {'data': data.astype(np.float16)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def set_op_attrs(self):
self.attrs = {}
self.attrs['data_layout'] = 'NCHW'
@IPUOpTest.static_graph
def build_model(self):
data = paddle.static.data(name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
input_scale = paddle.fluid.layers.create_parameter(
shape=[self.feed_shape[0][1]], dtype="float32")
input_bias = paddle.fluid.layers.create_parameter(
shape=[self.feed_shape[0][1]], dtype="float32")
out = paddle.fluid.layers.affine_channel(data,
scale=input_scale,
bias=input_bias)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase):
def set_data_feed(self):
data = np.random.uniform(size=[2, 4, 64, 64])
self.feed_fp32 = {'data': data.astype(np.float32)}
self.feed_fp16 = {'data': data.astype(np.float16)}
@unittest.skip("Only support NCHW")
class TestNHWC(TestBase):
def set_op_attrs(self):
self.attrs = {}
self.attrs['data_layout'] = 'NHWC'
def set_data_feed(self):
data = np.random.uniform(size=[2, 64, 64, 3])
self.feed_fp32 = {'data': data.astype(np.float32)}
self.feed_fp16 = {'data': data.astype(np.float16)}
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
import paddle.nn.functional as F
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
def set_data_feed(self):
x = np.random.uniform(size=[3, 4, 2, 2])
target = np.random.uniform(size=[3, 4, 2, 2])
self.feed_fp32 = {
"x": x.astype(np.float32),
"target": target.astype(np.float32)
}
self.feed_fp16 = {
"x": x.astype(np.float16),
"target": target.astype(np.float16)
}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def set_op_attrs(self):
self.attrs = {
'reduction': 'mean',
}
@IPUOpTest.static_graph
def build_model(self, on_ipu):
x = paddle.static.data(name=self.feed_list[0],
shape=self.feed_shape[0],
dtype="float32")
target = paddle.static.data(name=self.feed_list[1],
shape=self.feed_shape[1],
dtype='float32')
out = F.binary_cross_entropy(x, target, **self.attrs)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model(self.is_ipu_mode(m))
self.run_model(m)
self.check()
class TestCase1(TestBase):
def set_op_attrs(self):
self.attrs = {
'reduction': 'sum',
}
class TestCase2(TestBase):
def set_op_attrs(self):
self.attrs = {
'reduction': 'none',
}
def set_atol(self):
self.atol = 1e-10
self.rtol = 1e-6
self.atol_fp16 = 5e-2
self.rtol_fp16 = 2e-2
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_feed()
self.set_op_attrs()
def set_atol(self):
self.atol = 1e-6
self.rtol = 1e-6
self.atol_fp16 = 1e-3
self.rtol_fp16 = 1e-3
def set_feed(self):
data = np.random.uniform(size=[5, 5])
self.feed_fp32 = {'x': data.astype(np.float32)}
self.feed_fp16 = {'x': data.astype(np.float16)}
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def set_op_attrs(self):
self.attrs = {}
self.attrs['min'] = 0.1
self.attrs['max'] = 3.4
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
x = paddle.clip(x, **self.attrs)
self.fetch_list = [x.name]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestNoMin(TestBase):
def set_op_attrs(self):
self.attrs = {}
self.attrs['max'] = 3.4
class TestNoMax(TestBase):
def set_op_attrs(self):
self.attrs = {}
self.attrs['min'] = 0.1
class TestNoMinNoMax(TestBase):
def set_op_attrs(self):
self.attrs = {}
class TestMinMaxTensor(TestBase):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
min = paddle.fluid.layers.fill_constant(name="min",
shape=[1],
dtype='float32',
value=0.1)
max = paddle.fluid.layers.fill_constant(name="max",
shape=[1],
dtype='float32',
value=3.4)
x = paddle.clip(x, min=min, max=max)
self.fetch_list = [x.name]
class TestMinTensor(TestBase):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
min = paddle.fluid.layers.fill_constant(name="min",
shape=[1],
dtype='float32',
value=0.1)
x = paddle.clip(x, min=min)
self.fetch_list = [x.name]
class TestMaxTensor(TestBase):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
max = paddle.fluid.layers.fill_constant(name="max",
shape=[1],
dtype='float32',
value=3.4)
x = paddle.clip(x, max=max)
self.fetch_list = [x.name]
class TestCombine1(TestBase):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
min = paddle.fluid.layers.fill_constant(name="min",
shape=[1],
dtype='float32',
value=0.1)
x = paddle.clip(x, min=min, max=3.4)
self.fetch_list = [x.name]
class TestCombine2(TestBase):
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
max = paddle.fluid.layers.fill_constant(name="max",
shape=[1],
dtype='float32',
value=3.4)
x = paddle.clip(x, min=0.1, max=max)
self.fetch_list = [x.name]
class TestIntInput(TestBase):
def set_feed(self):
data = np.random.uniform(size=[5, 5])
self.feed_fp32 = {'x': data.astype(np.int32)}
self.feed_fp16 = {'x': data.astype(np.int32)}
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='int32')
x = paddle.clip(x, min=0.1, max=3.4)
self.fetch_list = [x.name]
class TestIntMinMax(TestBase):
def set_feed(self):
data = np.random.uniform(size=[5, 5])
self.feed_fp32 = {'x': data.astype(np.int32)}
self.feed_fp16 = {'x': data.astype(np.int32)}
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='int32')
min = paddle.fluid.layers.fill_constant(name="min",
shape=[1],
dtype='int32',
value=1)
max = paddle.fluid.layers.fill_constant(name="max",
shape=[1],
dtype='int32',
value=3)
x = paddle.clip(x, min=min, max=max)
self.fetch_list = [x.name]
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.static
from op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_feed()
self.set_op_attrs()
def set_atol(self):
self.atol = 1e-6
self.rtol = 1e-6
self.atol_fp16 = 1e-3
self.rtol_fp16 = 1e-3
def set_feed(self):
data = np.random.uniform(size=[1, 3, 8, 8])
self.feed_fp32 = {'in_0': data.astype(np.float32)}
self.feed_fp16 = {'in_0': data.astype(np.float16)}
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def set_op_attrs(self):
self.attrs = {}
self.attrs['num_filters'] = 3
self.attrs['filter_size'] = 3
self.attrs['padding'] = 0
self.attrs['stride'] = 1
self.attrs['dilation'] = 1
self.attrs['bias_attr'] = False
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
x = paddle.static.nn.conv2d_transpose(x, **self.attrs)
self.fetch_list = [x.name]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase):
def set_op_attrs(self):
super().set_op_attrs()
self.attrs['stride'] = 2
@unittest.skip("Only support dilation=1")
class TestCase2(TestBase):
def set_op_attrs(self):
super().set_op_attrs()
self.attrs['stride'] = 2
self.attrs['dilation'] = 2
class TestCase3(TestBase):
def set_op_attrs(self):
super().set_op_attrs()
self.attrs['padding'] = 2
class TestCase4(TestBase):
def set_op_attrs(self):
super().set_op_attrs()
self.attrs['padding'] = "SAME"
class TestCase5(TestBase):
def set_op_attrs(self):
super().set_op_attrs()
self.attrs['stride'] = 2
self.attrs['padding'] = "SAME"
class TestCase6(TestBase):
def set_op_attrs(self):
super().set_op_attrs()
self.attrs['padding'] = "VALID"
class TestCase7(TestBase):
def set_op_attrs(self):
super().set_op_attrs()
self.attrs['padding'] = "VALID"
self.attrs['stride'] = 2
class TestCase8(TestBase):
def set_op_attrs(self):
super().set_op_attrs()
self.attrs['filter_size'] = 4
self.attrs['stride'] = 2
class TestCase9(TestBase):
# When bias_attr is not False, a Add Op will be added after conv2d_transpose Op.
# When bias_attr = None, the bias value is 0.
def set_op_attrs(self):
super().set_op_attrs()
self.attrs['bias_attr'] = None
class TestCase10(TestBase):
# When output_size is not None, the filter_size will be re-computed by output_size
def set_op_attrs(self):
super().set_op_attrs()
self.attrs['filter_size'] = None
self.attrs['output_size'] = [12, 12]
class TestCase11(TestBase):
# Depthwise conv2d transpose
def set_op_attrs(self):
super().set_op_attrs()
self.attrs['groups'] = 3
if __name__ == "__main__":
unittest.main()
...@@ -108,7 +108,7 @@ class TestCase4(TestBase): ...@@ -108,7 +108,7 @@ class TestCase4(TestBase):
class TestCase5(TestBase): class TestCase5(TestBase):
# Depthwise conv2d
def set_op_attrs(self): def set_op_attrs(self):
super().set_op_attrs() super().set_op_attrs()
self.attrs['groups'] = 3 self.attrs['groups'] = 3
......
...@@ -116,5 +116,35 @@ class TestCase3(TestBase): ...@@ -116,5 +116,35 @@ class TestCase3(TestBase):
} }
class TestCase4(TestBase):
def set_data_feed(self):
x = np.random.uniform(size=[3, 5, 7])
label = np.random.randint(0, 7, [3, 5, 1], dtype='int64')
self.feed_fp32 = {
"x": x.astype(np.float32),
"label": label.astype(np.int64)
}
self.feed_fp16 = {
"x": x.astype(np.float16),
"label": label.astype(np.int32)
}
class TestCase5(TestBase):
def set_data_feed(self):
x = np.random.uniform(size=[3, 5, 6, 7])
label = np.random.randint(0, 7, [3, 5, 6], dtype='int64')
self.feed_fp32 = {
"x": x.astype(np.float32),
"label": label.astype(np.int64)
}
self.feed_fp16 = {
"x": x.astype(np.float16),
"label": label.astype(np.int32)
}
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -86,5 +86,35 @@ class TestCase3(TestBase): ...@@ -86,5 +86,35 @@ class TestCase3(TestBase):
self.attrs = {"exclusive": True, "reverse": True} self.attrs = {"exclusive": True, "reverse": True}
class TestCase4(TestBase):
def set_data_feed(self):
x = np.random.uniform(size=[1, 128])
self.feed_fp32 = {"x": x.astype(np.int32)}
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(name=self.feed_list[0],
shape=self.feed_shape[0],
dtype="int32")
out = paddle.fluid.layers.cumsum(x, **self.attrs)
self.fetch_list = [out.name]
class TestCase5(TestBase):
def set_data_feed(self):
x = np.random.uniform(size=[1, 128])
self.feed_fp32 = {"x": x.astype(np.int64)}
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(name=self.feed_list[0],
shape=self.feed_shape[0],
dtype="int64")
out = paddle.fluid.layers.cumsum(x, **self.attrs)
self.fetch_list = [out.name]
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_feed()
self.set_op_attrs()
def set_op_attrs(self):
self.attrs = {}
def set_feed(self):
data = np.random.uniform(size=[32, 100])
self.feed_fp32 = {'x': data.astype(np.float32)}
self.feed_fp16 = {'x': data.astype(np.float16)}
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
x = paddle.static.nn.data_norm(input=x, **self.attrs)
self.fetch_list = [x.name]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase):
def set_op_attrs(self):
self.attrs = {"in_place": True}
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
x = paddle.static.nn.data_norm(input=x, **self.attrs)
x = x + 1
self.fetch_list = [x.name]
@unittest.skip("Do not support in_place=True when test single data_norm Op")
class TestCase2(TestBase):
def set_op_attrs(self):
self.attrs = {"in_place": True}
class TestCase3(TestBase):
def set_op_attrs(self):
self.attrs = {"data_layout": "NHWC"}
class TestCase4(TestBase):
def set_op_attrs(self):
self.attrs = {"epsilon": 0.001}
class TestCase5(TestBase):
def set_op_attrs(self):
self.attrs = {"do_model_average_for_mean_and_var": True}
class TestCase6(TestBase):
# If enable_scale_and_shift=True, it requires to set values of scale and bias in `param_attr`
def set_op_attrs(self):
self.attrs = {
"param_attr": {
"scale_w": 0.5,
"bias": 0.1
},
"enable_scale_and_shift": True
}
class TestCase7(TestBase):
def set_op_attrs(self):
self.attrs = {
"param_attr": {
"batch_size": 1e3,
"batch_sum": 0.1,
"batch_square": 1e3,
"scale_w": 0.5,
"bias": 0.1
},
"enable_scale_and_shift": True
}
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.static
from op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
def set_data_feed(self):
data_x = np.random.uniform(size=[8, 1, 6, 1])
data_y = np.random.uniform(size=[7, 1, 5])
self.feed_fp32 = {
"x": data_x.astype(np.float32),
"y": data_y.astype(np.float32)
}
self.feed_fp16 = {
"x": data_x.astype(np.float16),
"y": data_y.astype(np.float16)
}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = [x.dtype for x in self.feed_fp32.values()]
def set_op_attrs(self):
self.attrs = {"p": 2}
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
y = paddle.static.data(name=self.feed_list[1],
shape=self.feed_shape[1],
dtype='float32')
out = paddle.dist(x, y, **self.attrs)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def test(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
class TestCase1(TestBase):
def set_op_attrs(self):
self.attrs = {"p": 0}
class TestCase2(TestBase):
def set_op_attrs(self):
self.attrs = {"p": float("inf")}
class TestCase3(TestBase):
def set_op_attrs(self):
self.attrs = {"p": float("-inf")}
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册