未验证 提交 75c975f0 编写于 作者: L Leo Guo 提交者: GitHub

Modify the unittests of the assign_value, iou_similarity, one_hot_v2,...

Modify the unittests of the assign_value, iou_similarity, one_hot_v2, reduce_mean, roi_align op. test=kunlun (#44061)
上级 ca9339eb
...@@ -10,7 +10,7 @@ set(XPU_RT_LIB_NAME "libxpurt.so") ...@@ -10,7 +10,7 @@ set(XPU_RT_LIB_NAME "libxpurt.so")
if(NOT DEFINED XPU_BASE_URL) if(NOT DEFINED XPU_BASE_URL)
set(XPU_BASE_URL_WITHOUT_DATE set(XPU_BASE_URL_WITHOUT_DATE
"https://baidu-kunlun-product.cdn.bcebos.com/KL-SDK/klsdk-dev") "https://baidu-kunlun-product.cdn.bcebos.com/KL-SDK/klsdk-dev")
set(XPU_BASE_URL "${XPU_BASE_URL_WITHOUT_DATE}/20220601") set(XPU_BASE_URL "${XPU_BASE_URL_WITHOUT_DATE}/20220703")
else() else()
set(XPU_BASE_URL "${XPU_BASE_URL}") set(XPU_BASE_URL "${XPU_BASE_URL}")
endif() endif()
...@@ -19,14 +19,14 @@ endif() ...@@ -19,14 +19,14 @@ endif()
if(NOT DEFINED XPU_XDNN_BASE_URL) if(NOT DEFINED XPU_XDNN_BASE_URL)
set(XPU_XDNN_BASE_URL_WITHOUT_DATE set(XPU_XDNN_BASE_URL_WITHOUT_DATE
"https://klx-sdk-release-public.su.bcebos.com/xdnn/dev") "https://klx-sdk-release-public.su.bcebos.com/xdnn/dev")
set(XPU_XDNN_BASE_URL "${XPU_XDNN_BASE_URL_WITHOUT_DATE}/20220601") set(XPU_XDNN_BASE_URL "${XPU_XDNN_BASE_URL_WITHOUT_DATE}/20220703")
else() else()
set(XPU_XDNN_BASE_URL "${XPU_XDNN_BASE_URL}") set(XPU_XDNN_BASE_URL "${XPU_XDNN_BASE_URL}")
endif() endif()
if(WITH_AARCH64) if(WITH_AARCH64)
set(XPU_XRE_DIR_NAME "xre-kylin_aarch64") set(XPU_XRE_DIR_NAME "xre-kylin_aarch64")
set(XPU_XDNN_DIR_NAME "XDNN-kylin_aarch64") set(XPU_XDNN_DIR_NAME "xdnn-kylin_aarch64")
set(XPU_XCCL_DIR_NAME "xccl-kylin_aarch64") set(XPU_XCCL_DIR_NAME "xccl-kylin_aarch64")
set(XPU_XDNN_URL set(XPU_XDNN_URL
"${XPU_XDNN_BASE_URL}/${XPU_XDNN_DIR_NAME}.tar.gz" "${XPU_XDNN_BASE_URL}/${XPU_XDNN_DIR_NAME}.tar.gz"
...@@ -40,7 +40,7 @@ elseif(WITH_SUNWAY) ...@@ -40,7 +40,7 @@ elseif(WITH_SUNWAY)
CACHE STRING "" FORCE) CACHE STRING "" FORCE)
elseif(WITH_BDCENTOS) elseif(WITH_BDCENTOS)
set(XPU_XRE_DIR_NAME "xre-bdcentos_x86_64") set(XPU_XRE_DIR_NAME "xre-bdcentos_x86_64")
set(XPU_XDNN_DIR_NAME "XDNN-bdcentos_x86_64") set(XPU_XDNN_DIR_NAME "xdnn-bdcentos_x86_64")
set(XPU_XCCL_DIR_NAME "xccl-bdcentos_x86_64") set(XPU_XCCL_DIR_NAME "xccl-bdcentos_x86_64")
# ubuntu and centos: use output by XDNN API team # ubuntu and centos: use output by XDNN API team
set(XPU_XDNN_URL set(XPU_XDNN_URL
...@@ -48,7 +48,7 @@ elseif(WITH_BDCENTOS) ...@@ -48,7 +48,7 @@ elseif(WITH_BDCENTOS)
CACHE STRING "" FORCE) CACHE STRING "" FORCE)
elseif(WITH_UBUNTU) elseif(WITH_UBUNTU)
set(XPU_XRE_DIR_NAME "xre-ubuntu_x86_64") set(XPU_XRE_DIR_NAME "xre-ubuntu_x86_64")
set(XPU_XDNN_DIR_NAME "XDNN-ubuntu_x86_64") set(XPU_XDNN_DIR_NAME "xdnn-ubuntu_x86_64")
set(XPU_XCCL_DIR_NAME "xccl-bdcentos_x86_64") set(XPU_XCCL_DIR_NAME "xccl-bdcentos_x86_64")
# ubuntu and centos: use output by XDNN API team # ubuntu and centos: use output by XDNN API team
set(XPU_XDNN_URL set(XPU_XDNN_URL
...@@ -56,7 +56,7 @@ elseif(WITH_UBUNTU) ...@@ -56,7 +56,7 @@ elseif(WITH_UBUNTU)
CACHE STRING "" FORCE) CACHE STRING "" FORCE)
elseif(WITH_CENTOS) elseif(WITH_CENTOS)
set(XPU_XRE_DIR_NAME "xre-centos7_x86_64") set(XPU_XRE_DIR_NAME "xre-centos7_x86_64")
set(XPU_XDNN_DIR_NAME "XDNN-bdcentos_x86_64") set(XPU_XDNN_DIR_NAME "xdnn-bdcentos_x86_64")
set(XPU_XCCL_DIR_NAME "xccl-bdcentos_x86_64") set(XPU_XCCL_DIR_NAME "xccl-bdcentos_x86_64")
# ubuntu and centos: use output by XDNN API team # ubuntu and centos: use output by XDNN API team
set(XPU_XDNN_URL set(XPU_XDNN_URL
...@@ -64,7 +64,7 @@ elseif(WITH_CENTOS) ...@@ -64,7 +64,7 @@ elseif(WITH_CENTOS)
CACHE STRING "" FORCE) CACHE STRING "" FORCE)
else() else()
set(XPU_XRE_DIR_NAME "xre-ubuntu_x86_64") set(XPU_XRE_DIR_NAME "xre-ubuntu_x86_64")
set(XPU_XDNN_DIR_NAME "XDNN-ubuntu_x86_64") set(XPU_XDNN_DIR_NAME "xdnn-ubuntu_x86_64")
set(XPU_XCCL_DIR_NAME "xccl-bdcentos_x86_64") set(XPU_XCCL_DIR_NAME "xccl-bdcentos_x86_64")
# default: use output by XDNN API team # default: use output by XDNN API team
set(XPU_XDNN_URL set(XPU_XDNN_URL
......
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy
import sys
sys.path.append("..")
import paddle.fluid as fluid
import paddle.fluid.framework as framework
import paddle.fluid.layers as layers
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
import paddle
paddle.enable_static()
class XPUTestAssignValueOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'assign_value'
self.use_dynamic_create_class = False
class TestAssignValueOp(XPUOpTest):
def init(self):
self.dtype = self.in_type
self.place = paddle.XPUPlace(0)
self.op_type = 'assign_value'
def setUp(self):
self.init()
self.inputs = {}
self.attrs = {}
self.init_data()
self.attrs["shape"] = self.value.shape
self.attrs["dtype"] = framework.convert_np_dtype_to_dtype_(
self.value.dtype)
self.outputs = {"Out": self.value}
def init_data(self):
self.value = numpy.random.random(size=(2, 5)).astype(self.dtype)
self.attrs["fp32_values"] = [float(v) for v in self.value.flat]
def test_forward(self):
self.check_output_with_place(self.place)
class TestAssignValueOp2(TestAssignValueOp):
def init_data(self):
self.value = numpy.random.random(size=(2, 5)).astype(numpy.int32)
self.attrs["int32_values"] = [int(v) for v in self.value.flat]
class TestAssignValueOp3(TestAssignValueOp):
def init_data(self):
self.value = numpy.random.random(size=(2, 5)).astype(numpy.int64)
self.attrs["int64_values"] = [int(v) for v in self.value.flat]
class TestAssignValueOp4(TestAssignValueOp):
def init_data(self):
self.value = numpy.random.choice(a=[False, True],
size=(2, 5)).astype(numpy.bool)
self.attrs["bool_values"] = [int(v) for v in self.value.flat]
class TestAssignApi(unittest.TestCase):
def setUp(self):
self.init_dtype()
self.value = (-100 + 200 * numpy.random.random(size=(2, 5))).astype(
self.dtype)
self.place = fluid.XPUPlace(0)
def init_dtype(self):
self.dtype = "float32"
def test_assign(self):
main_program = fluid.Program()
with fluid.program_guard(main_program):
x = layers.create_tensor(dtype=self.dtype)
layers.assign(input=self.value, output=x)
exe = fluid.Executor(self.place)
[fetched_x] = exe.run(main_program, feed={}, fetch_list=[x])
self.assertTrue(numpy.array_equal(fetched_x, self.value),
"fetch_x=%s val=%s" % (fetched_x, self.value))
self.assertEqual(fetched_x.dtype, self.value.dtype)
class TestAssignApi2(TestAssignApi):
def init_dtype(self):
self.dtype = "int32"
class TestAssignApi3(TestAssignApi):
def init_dtype(self):
self.dtype = "int64"
class TestAssignApi4(TestAssignApi):
def setUp(self):
self.init_dtype()
self.value = numpy.random.choice(a=[False, True],
size=(2, 5)).astype(numpy.bool)
self.place = fluid.XPUPlace(0)
def init_dtype(self):
self.dtype = "bool"
support_types = get_xpu_op_support_types('assign_value')
for stype in support_types:
create_test_class(globals(), XPUTestAssignValueOp, stype)
if __name__ == '__main__':
unittest.main()
...@@ -22,31 +22,44 @@ import unittest ...@@ -22,31 +22,44 @@ import unittest
import numpy as np import numpy as np
import numpy.random as random import numpy.random as random
import sys import sys
import math
from op_test import OpTest sys.path.append("..")
from op_test_xpu import XPUOpTest from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
import paddle import paddle
paddle.enable_static() paddle.enable_static()
class TestXPUIOUSimilarityOp(XPUOpTest): class XPUTestIOUSimilarityOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'iou_similarity'
self.use_dynamic_create_class = False
class TestXPUIOUSimilarityOp(XPUOpTest):
def init(self):
self.dtype = self.in_type
self.place = paddle.XPUPlace(0)
self.op_type = 'iou_similarity'
def test_check_output(self): def test_check_output(self):
if paddle.is_compiled_with_xpu(): self.check_output_with_place(self.place)
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def setUp(self): def setUp(self):
self.op_type = "iou_similarity" self.init()
self.boxes1 = random.rand(2, 4).astype('float32') self.boxes1 = random.rand(2, 4).astype(self.dtype)
self.boxes2 = random.rand(3, 4).astype('float32') self.boxes2 = random.rand(3, 4).astype(self.dtype)
self.output = random.rand(2, 3).astype('float32') self.output = random.rand(2, 3).astype(self.dtype)
self.box_normalized = False self.box_normalized = False
# run python iou computation # run python iou computation
self._compute_iou() self._compute_iou()
self.inputs = {'X': self.boxes1, 'Y': self.boxes2} self.inputs = {'X': self.boxes1, 'Y': self.boxes2}
self.attrs = {"box_normalized": self.box_normalized, 'use_xpu': True} self.attrs = {
"box_normalized": self.box_normalized,
'use_xpu': True
}
self.outputs = {'Out': self.output} self.outputs = {'Out': self.output}
def _compute_iou(self, ): def _compute_iou(self, ):
...@@ -77,44 +90,48 @@ class TestXPUIOUSimilarityOp(XPUOpTest): ...@@ -77,44 +90,48 @@ class TestXPUIOUSimilarityOp(XPUOpTest):
sim_score = inter_area / union_area sim_score = inter_area / union_area
self.output[row, col] = sim_score self.output[row, col] = sim_score
class TestXPUIOUSimilarityOpWithLoD(TestXPUIOUSimilarityOp):
class TestXPUIOUSimilarityOpWithLoD(TestXPUIOUSimilarityOp):
def test_check_output(self): def test_check_output(self):
if paddle.is_compiled_with_xpu(): self.check_output_with_place(self.place, check_dygraph=False)
place = paddle.XPUPlace(0)
self.check_output_with_place(place, check_dygraph=False)
def setUp(self): def setUp(self):
super(TestXPUIOUSimilarityOpWithLoD, self).setUp() super().setUp()
self.boxes1_lod = [[1, 1]] self.boxes1_lod = [[1, 1]]
self.output_lod = [[1, 1]] self.output_lod = [[1, 1]]
self.box_normalized = False self.box_normalized = False
# run python iou computation # run python iou computation
self._compute_iou() self._compute_iou()
self.inputs = {'X': (self.boxes1, self.boxes1_lod), 'Y': self.boxes2} self.inputs = {
'X': (self.boxes1, self.boxes1_lod),
'Y': self.boxes2
}
self.attrs = {"box_normalized": self.box_normalized} self.attrs = {"box_normalized": self.box_normalized}
self.outputs = {'Out': (self.output, self.output_lod)} self.outputs = {'Out': (self.output, self.output_lod)}
class TestXPUIOUSimilarityOpWithBoxNormalized(TestXPUIOUSimilarityOp):
class TestXPUIOUSimilarityOpWithBoxNormalized(TestXPUIOUSimilarityOp):
def test_check_output(self): def test_check_output(self):
if paddle.is_compiled_with_xpu(): self.check_output_with_place(self.place, check_dygraph=False)
place = paddle.XPUPlace(0)
self.check_output_with_place(place, check_dygraph=False)
def setUp(self): def setUp(self):
super(TestXPUIOUSimilarityOpWithBoxNormalized, self).setUp() super().setUp()
self.boxes1_lod = [[1, 1]] self.boxes1_lod = [[1, 1]]
self.output_lod = [[1, 1]] self.output_lod = [[1, 1]]
self.box_normalized = True self.box_normalized = True
# run python iou computation # run python iou computation
self._compute_iou() self._compute_iou()
self.inputs = {'X': (self.boxes1, self.boxes1_lod), 'Y': self.boxes2} self.inputs = {
'X': (self.boxes1, self.boxes1_lod),
'Y': self.boxes2
}
self.attrs = {"box_normalized": self.box_normalized} self.attrs = {"box_normalized": self.box_normalized}
self.outputs = {'Out': (self.output, self.output_lod)} self.outputs = {'Out': (self.output, self.output_lod)}
support_types = get_xpu_op_support_types('iou_similarity')
for stype in support_types:
create_test_class(globals(), XPUTestIOUSimilarityOp, stype)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -18,22 +18,31 @@ import unittest ...@@ -18,22 +18,31 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.fluid as fluid
import sys import sys
sys.path.append("..") sys.path.append("..")
from op_test_xpu import XPUOpTest from op_test_xpu import XPUOpTest
import paddle.fluid as fluid from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
from paddle.fluid import Program, program_guard
import time
paddle.enable_static() paddle.enable_static()
class TestOneHotOp(XPUOpTest): class XPUTestOneHotOp(XPUOpTestWrapper):
def setUp(self): def __init__(self):
self.use_xpu = True self.op_name = 'one_hot_v2'
self.use_dynamic_create_class = False
class TestOneHotOp(XPUOpTest):
def init(self):
self.dtype = self.in_type
self.place = paddle.XPUPlace(0)
self.op_type = 'one_hot_v2' self.op_type = 'one_hot_v2'
def setUp(self):
self.init()
depth = 10 depth = 10
depth_np = np.array(10).astype('int32') depth_np = np.array(10).astype('int32')
# dimension = 12 # dimension = 12
...@@ -41,7 +50,8 @@ class TestOneHotOp(XPUOpTest): ...@@ -41,7 +50,8 @@ class TestOneHotOp(XPUOpTest):
x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))]
x = np.array(x).astype('int32').reshape([sum(x_lod[0])]) x = np.array(x).astype('int32').reshape([sum(x_lod[0])])
out = np.zeros(shape=(np.product(x.shape), depth)).astype('float32') out = np.zeros(shape=(np.product(x.shape),
depth)).astype(self.dtype)
for i in range(np.product(x.shape)): for i in range(np.product(x.shape)):
out[i, x[i]] = 1.0 out[i, x[i]] = 1.0
...@@ -51,14 +61,12 @@ class TestOneHotOp(XPUOpTest): ...@@ -51,14 +61,12 @@ class TestOneHotOp(XPUOpTest):
self.outputs = {'Out': (out, x_lod)} self.outputs = {'Out': (out, x_lod)}
def test_check_output(self): def test_check_output(self):
place = paddle.XPUPlace(0) self.check_output_with_place(self.place)
self.check_output_with_place(place, check_dygraph=False)
class TestOneHotOp_attr(XPUOpTest): class TestOneHotOp_attr(TestOneHotOp):
def setUp(self): def setUp(self):
self.op_type = 'one_hot_v2' self.init()
depth = 10 depth = 10
dimension = 12 dimension = 12
x_lod = [[4, 1, 3, 3]] x_lod = [[4, 1, 3, 3]]
...@@ -66,24 +74,22 @@ class TestOneHotOp_attr(XPUOpTest): ...@@ -66,24 +74,22 @@ class TestOneHotOp_attr(XPUOpTest):
x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1]) x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1])
out = np.zeros(shape=(np.product(x.shape[:-1]), 1, out = np.zeros(shape=(np.product(x.shape[:-1]), 1,
depth)).astype('float32') depth)).astype(self.dtype)
for i in range(np.product(x.shape)): for i in range(np.product(x.shape)):
out[i, 0, x[i]] = 1.0 out[i, 0, x[i]] = 1.0
self.inputs = {'X': (x, x_lod)} self.inputs = {'X': (x, x_lod)}
self.attrs = {'dtype': int(core.VarDesc.VarType.FP32), 'depth': depth} self.attrs = {
'dtype': int(core.VarDesc.VarType.FP32),
'depth': depth
}
self.outputs = {'Out': (out, x_lod)} self.outputs = {'Out': (out, x_lod)}
def test_check_output(self): class TestOneHotOp_default_dtype(TestOneHotOp):
place = paddle.XPUPlace(0)
self.check_output_with_place(place, check_dygraph=False)
class TestOneHotOp_default_dtype(XPUOpTest):
def setUp(self): def setUp(self):
self.op_type = 'one_hot_v2' self.init()
depth = 10 depth = 10
depth_np = np.array(10).astype('int32') depth_np = np.array(10).astype('int32')
dimension = 12 dimension = 12
...@@ -91,7 +97,8 @@ class TestOneHotOp_default_dtype(XPUOpTest): ...@@ -91,7 +97,8 @@ class TestOneHotOp_default_dtype(XPUOpTest):
x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))]
x = np.array(x).astype('int32').reshape([sum(x_lod[0])]) x = np.array(x).astype('int32').reshape([sum(x_lod[0])])
out = np.zeros(shape=(np.product(x.shape), depth)).astype('float32') out = np.zeros(shape=(np.product(x.shape),
depth)).astype(self.dtype)
for i in range(np.product(x.shape)): for i in range(np.product(x.shape)):
out[i, x[i]] = 1.0 out[i, x[i]] = 1.0
...@@ -100,15 +107,10 @@ class TestOneHotOp_default_dtype(XPUOpTest): ...@@ -100,15 +107,10 @@ class TestOneHotOp_default_dtype(XPUOpTest):
self.attrs = {} self.attrs = {}
self.outputs = {'Out': (out, x_lod)} self.outputs = {'Out': (out, x_lod)}
def test_check_output(self): class TestOneHotOp_default_dtype_attr(TestOneHotOp):
place = paddle.XPUPlace(0)
self.check_output_with_place(place, check_dygraph=False)
class TestOneHotOp_default_dtype_attr(XPUOpTest):
def setUp(self): def setUp(self):
self.op_type = 'one_hot_v2' self.init()
depth = 10 depth = 10
dimension = 12 dimension = 12
x_lod = [[4, 1, 3, 3]] x_lod = [[4, 1, 3, 3]]
...@@ -116,7 +118,7 @@ class TestOneHotOp_default_dtype_attr(XPUOpTest): ...@@ -116,7 +118,7 @@ class TestOneHotOp_default_dtype_attr(XPUOpTest):
x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1]) x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1])
out = np.zeros(shape=(np.product(x.shape[:-1]), 1, out = np.zeros(shape=(np.product(x.shape[:-1]), 1,
depth)).astype('float32') depth)).astype(self.dtype)
for i in range(np.product(x.shape)): for i in range(np.product(x.shape)):
out[i, 0, x[i]] = 1.0 out[i, 0, x[i]] = 1.0
...@@ -125,30 +127,22 @@ class TestOneHotOp_default_dtype_attr(XPUOpTest): ...@@ -125,30 +127,22 @@ class TestOneHotOp_default_dtype_attr(XPUOpTest):
self.attrs = {'depth': depth} self.attrs = {'depth': depth}
self.outputs = {'Out': (out, x_lod)} self.outputs = {'Out': (out, x_lod)}
def test_check_output(self): class TestOneHotOp_out_of_range(TestOneHotOp):
place = paddle.XPUPlace(0)
self.check_output_with_place(place, check_dygraph=False)
class TestOneHotOp_out_of_range(XPUOpTest):
def setUp(self): def setUp(self):
self.op_type = 'one_hot_v2' self.init()
depth = 10 depth = 10
x_lod = [[4, 1, 3, 3]] x_lod = [[4, 1, 3, 3]]
x = [np.random.choice([-1, depth]) for i in range(sum(x_lod[0]))] x = [np.random.choice([-1, depth]) for i in range(sum(x_lod[0]))]
x = np.array(x).astype('int32').reshape([sum(x_lod[0])]) x = np.array(x).astype('int32').reshape([sum(x_lod[0])])
out = np.zeros(shape=(np.product(x.shape), depth)).astype('float32') out = np.zeros(shape=(np.product(x.shape),
depth)).astype(self.dtype)
self.inputs = {'X': (x, x_lod)} self.inputs = {'X': (x, x_lod)}
self.attrs = {'depth': depth, 'allow_out_of_range': True} self.attrs = {'depth': depth, 'allow_out_of_range': True}
self.outputs = {'Out': (out, x_lod)} self.outputs = {'Out': (out, x_lod)}
def test_check_output(self):
place = paddle.XPUPlace(0)
self.check_output_with_place(place, check_dygraph=False)
class TestOneHotOpApi(unittest.TestCase): class TestOneHotOpApi(unittest.TestCase):
...@@ -200,6 +194,9 @@ class BadInputTestOnehotV2(unittest.TestCase): ...@@ -200,6 +194,9 @@ class BadInputTestOnehotV2(unittest.TestCase):
self.assertRaises(TypeError, test_bad_x) self.assertRaises(TypeError, test_bad_x)
support_types = get_xpu_op_support_types('one_hot_v2')
for stype in support_types:
create_test_class(globals(), XPUTestOneHotOp, stype)
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static()
unittest.main() unittest.main()
...@@ -19,174 +19,153 @@ import numpy as np ...@@ -19,174 +19,153 @@ import numpy as np
import sys import sys
sys.path.append("..") sys.path.append("..")
from op_test import OpTest, skip_check_grad_ci from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
import paddle import paddle
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
from paddle.fluid.framework import convert_np_dtype_to_dtype_
paddle.enable_static()
class TestMeanOp(OpTest):
class XPUTestMeanOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'reduce_mean'
self.use_dynamic_create_class = False
class TestMeanOp(XPUOpTest):
def setUp(self): def setUp(self):
self.dtype = self.in_type
self.place = paddle.XPUPlace(0)
self.op_type = "reduce_mean" self.op_type = "reduce_mean"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float32")} self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)}
self.attrs = {'use_xpu': True} self.attrs = {'use_xpu': True}
self.outputs = {'Out': self.inputs['X'].mean(axis=0)} self.outputs = {'Out': self.inputs['X'].mean(axis=0)}
def test_check_output(self): def test_check_output(self):
if paddle.is_compiled_with_xpu(): self.check_output_with_place(self.place)
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def check_grad_(self):
self.check_grad(['X'], 'Out')
def test_check_grad(self):
self.check_grad_with_place(self.place, ['X'], 'Out')
class TestMeanOp5D(OpTest): class TestMeanOp5D(TestMeanOp):
def setUp(self): def setUp(self):
self.op_type = "reduce_mean" super().setUp()
self.inputs = { self.inputs = {
'X': np.random.random((1, 2, 5, 6, 10)).astype("float32") 'X': np.random.random((1, 2, 5, 6, 10)).astype(self.dtype)
} }
self.attrs = {'use_xpu': True} self.attrs = {'use_xpu': True}
self.outputs = {'Out': self.inputs['X'].mean(axis=0)} self.outputs = {'Out': self.inputs['X'].mean(axis=0)}
def test_check_output(self): class TestMeanOp6D(TestMeanOp):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestMeanOp6D(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_mean" super().setUp()
self.inputs = { self.inputs = {
'X': np.random.random((1, 1, 2, 5, 6, 10)).astype("float32") 'X': np.random.random((1, 1, 2, 5, 6, 10)).astype(self.dtype)
} }
self.attrs = {'use_xpu': True} self.attrs = {'use_xpu': True}
self.outputs = {'Out': self.inputs['X'].mean(axis=0)} self.outputs = {'Out': self.inputs['X'].mean(axis=0)}
def test_check_output(self): class TestMeanOp8D(TestMeanOp):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestMeanOp8D(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_mean" super().setUp()
self.inputs = { self.inputs = {
'X': np.random.random((1, 3, 1, 2, 1, 4, 3, 10)).astype("float32") 'X': np.random.random(
(1, 3, 1, 2, 1, 4, 3, 10)).astype(self.dtype)
} }
self.attrs = {'dim': (0, 3), 'use_xpu': True} self.attrs = {'dim': (0, 3), 'use_xpu': True}
self.outputs = {'Out': self.inputs['X'].mean(axis=(0, 3))} self.outputs = {'Out': self.inputs['X'].mean(axis=(0, 3))}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad(self): class XPUTestReduce(XPUOpTestWrapper):
self.check_grad(['X'], 'Out')
def __init__(self):
self.op_name = 'reduce_mean'
self.use_dynamic_create_class = False
class Test1DReduce(OpTest): class Test1DReduce(XPUOpTest):
def setUp(self): def setUp(self):
self.dtype = self.in_type
self.place = paddle.XPUPlace(0)
self.op_type = "reduce_mean" self.op_type = "reduce_mean"
self.inputs = {'X': np.random.random(120).astype("float32")} self.inputs = {'X': np.random.random(120).astype(self.dtype)}
self.attrs = {'use_xpu': True} self.attrs = {'use_xpu': True}
self.outputs = {'Out': self.inputs['X'].mean(axis=0)} self.outputs = {'Out': self.inputs['X'].mean(axis=0)}
def test_check_output(self): def test_check_output(self):
if paddle.is_compiled_with_xpu(): self.check_output_with_place(self.place)
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad(self):
self.check_grad(['X'], 'Out')
# There is a api bug in checking grad when dim[0] > 0
# def test_check_grad(self):
# self.check_output_with_place(self.place, ['X'], 'Out')
class Test2DReduce0(Test1DReduce): class Test2DReduce0(Test1DReduce):
def setUp(self): def setUp(self):
self.op_type = "reduce_mean" super().setUp()
self.attrs = {'dim': [0], 'use_xpu': True} self.attrs = {'dim': [0], 'use_xpu': True}
self.inputs = {'X': np.random.random((20, 10)).astype("float32")} self.inputs = {'X': np.random.random((20, 10)).astype(self.dtype)}
self.outputs = {'Out': self.inputs['X'].mean(axis=0)} self.outputs = {'Out': self.inputs['X'].mean(axis=0)}
class Test2DReduce1(Test1DReduce):
class Test2DReduce1(Test1DReduce):
def setUp(self): def setUp(self):
self.op_type = "reduce_mean" super().setUp()
self.attrs = {'dim': [1], 'use_xpu': True} self.attrs = {'dim': [1], 'use_xpu': True}
self.inputs = {'X': np.random.random((20, 10)).astype("float32")} self.inputs = {'X': np.random.random((20, 10)).astype(self.dtype)}
self.outputs = { self.outputs = {
'Out': self.inputs['X'].mean(axis=tuple(self.attrs['dim'])) 'Out': self.inputs['X'].mean(axis=tuple(self.attrs['dim']))
} }
class Test3DReduce0(Test1DReduce):
class Test3DReduce0(Test1DReduce):
def setUp(self): def setUp(self):
self.op_type = "reduce_mean" super().setUp()
self.attrs = {'dim': [1], 'use_xpu': True} self.attrs = {'dim': [1], 'use_xpu': True}
self.inputs = {'X': np.random.random((5, 6, 7)).astype("float32")} self.inputs = {'X': np.random.random((5, 6, 7)).astype(self.dtype)}
self.outputs = { self.outputs = {
'Out': self.inputs['X'].mean(axis=tuple(self.attrs['dim'])) 'Out': self.inputs['X'].mean(axis=tuple(self.attrs['dim']))
} }
class Test3DReduce1(Test1DReduce):
class Test3DReduce1(Test1DReduce):
def setUp(self): def setUp(self):
self.op_type = "reduce_mean" super().setUp()
self.attrs = {'dim': [2], 'use_xpu': True} self.attrs = {'dim': [2], 'use_xpu': True}
self.inputs = {'X': np.random.random((5, 6, 7)).astype("float32")} self.inputs = {'X': np.random.random((5, 6, 7)).astype(self.dtype)}
self.outputs = { self.outputs = {
'Out': self.inputs['X'].mean(axis=tuple(self.attrs['dim'])) 'Out': self.inputs['X'].mean(axis=tuple(self.attrs['dim']))
} }
class Test3DReduce2(Test1DReduce):
class Test3DReduce2(Test1DReduce):
def setUp(self): def setUp(self):
self.op_type = "reduce_mean" super().setUp()
self.attrs = {'dim': [-2], 'use_xpu': True} self.attrs = {'dim': [-2], 'use_xpu': True}
self.inputs = {'X': np.random.random((5, 6, 7)).astype("float32")} self.inputs = {'X': np.random.random((5, 6, 7)).astype(self.dtype)}
self.outputs = { self.outputs = {
'Out': self.inputs['X'].mean(axis=tuple(self.attrs['dim'])) 'Out': self.inputs['X'].mean(axis=tuple(self.attrs['dim']))
} }
class Test3DReduce3(Test1DReduce):
class Test3DReduce3(Test1DReduce):
def setUp(self): def setUp(self):
self.op_type = "reduce_mean" super().setUp()
self.attrs = {'dim': [1, 2], 'use_xpu': True} self.attrs = {'dim': [1, 2], 'use_xpu': True}
self.inputs = {'X': np.random.random((5, 6, 7)).astype("float32")} self.inputs = {'X': np.random.random((5, 6, 7)).astype(self.dtype)}
self.outputs = { self.outputs = {
'Out': self.inputs['X'].mean(axis=tuple(self.attrs['dim'])) 'Out': self.inputs['X'].mean(axis=tuple(self.attrs['dim']))
} }
class TestKeepDimReduce(Test1DReduce):
class TestKeepDimReduce(Test1DReduce):
def setUp(self): def setUp(self):
self.op_type = "reduce_mean" super().setUp()
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float32")} self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)}
self.attrs = {'dim': [1], 'keep_dim': True, 'use_xpu': True} self.attrs = {'dim': [1], 'keep_dim': True, 'use_xpu': True}
self.outputs = { self.outputs = {
'Out': 'Out':
...@@ -194,13 +173,13 @@ class TestKeepDimReduce(Test1DReduce): ...@@ -194,13 +173,13 @@ class TestKeepDimReduce(Test1DReduce):
keepdims=self.attrs['keep_dim']) keepdims=self.attrs['keep_dim'])
} }
class TestKeepDim8DReduce(Test1DReduce):
class TestKeepDim8DReduce(Test1DReduce):
def setUp(self): def setUp(self):
self.op_type = "reduce_mean" super().setUp()
self.inputs = { self.inputs = {
'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float32") 'X': np.random.random(
(2, 5, 3, 2, 2, 3, 4, 2)).astype(self.dtype)
} }
self.attrs = {'dim': (3, 4, 5), 'keep_dim': True, 'use_xpu': True} self.attrs = {'dim': (3, 4, 5), 'keep_dim': True, 'use_xpu': True}
self.outputs = { self.outputs = {
...@@ -210,5 +189,10 @@ class TestKeepDim8DReduce(Test1DReduce): ...@@ -210,5 +189,10 @@ class TestKeepDim8DReduce(Test1DReduce):
} }
support_types = get_xpu_op_support_types('reduce_mean')
for stype in support_types:
create_test_class(globals(), XPUTestMeanOp, stype)
create_test_class(globals(), XPUTestReduce, stype)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -20,14 +20,20 @@ import unittest ...@@ -20,14 +20,20 @@ import unittest
import math import math
import numpy as np import numpy as np
import paddle.fluid.core as core import paddle.fluid.core as core
from op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest from op_test_xpu import XPUOpTest
import paddle import paddle
import paddle.fluid as fluid from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
from paddle.fluid import Program, program_guard
paddle.enable_static()
class TestROIAlignOp(XPUOpTest):
class XPUTestROIAlignOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'roi_align'
self.use_dynamic_create_class = False
class TestROIAlignOp(XPUOpTest):
def set_data(self): def set_data(self):
self.init_test_case() self.init_test_case()
...@@ -57,7 +63,8 @@ class TestROIAlignOp(XPUOpTest): ...@@ -57,7 +63,8 @@ class TestROIAlignOp(XPUOpTest):
self.xpu_version = core.get_xpu_device_version(0) self.xpu_version = core.get_xpu_device_version(0)
# n, c, h, w # n, c, h, w
self.x_dim = (self.batch_size, self.channels, self.height, self.width) self.x_dim = (self.batch_size, self.channels, self.height,
self.width)
self.spatial_scale = 1.0 / 2.0 self.spatial_scale = 1.0 / 2.0
self.pooled_height = 2 self.pooled_height = 2
...@@ -67,16 +74,16 @@ class TestROIAlignOp(XPUOpTest): ...@@ -67,16 +74,16 @@ class TestROIAlignOp(XPUOpTest):
self.continuous_coordinate = False self.continuous_coordinate = False
else: else:
self.continuous_coordinate = bool(np.random.randint(2)) self.continuous_coordinate = bool(np.random.randint(2))
self.x = np.random.random(self.x_dim).astype('float32') self.x = np.random.random(self.x_dim).astype(self.dtype)
def pre_calc(self, x_i, roi_xmin, roi_ymin, roi_bin_grid_h, roi_bin_grid_w, def pre_calc(self, x_i, roi_xmin, roi_ymin, roi_bin_grid_h,
bin_size_h, bin_size_w): roi_bin_grid_w, bin_size_h, bin_size_w):
count = roi_bin_grid_h * roi_bin_grid_w count = roi_bin_grid_h * roi_bin_grid_w
bilinear_pos = np.zeros( bilinear_pos = np.zeros([
[self.channels, self.pooled_height, self.pooled_width, count, 4], self.channels, self.pooled_height, self.pooled_width, count, 4
np.float32) ], np.float32)
bilinear_w = np.zeros([self.pooled_height, self.pooled_width, count, 4], bilinear_w = np.zeros(
np.float32) [self.pooled_height, self.pooled_width, count, 4], np.float32)
for ph in range(self.pooled_width): for ph in range(self.pooled_width):
for pw in range(self.pooled_height): for pw in range(self.pooled_height):
c = 0 c = 0
...@@ -126,7 +133,7 @@ class TestROIAlignOp(XPUOpTest): ...@@ -126,7 +133,7 @@ class TestROIAlignOp(XPUOpTest):
def calc_roi_align(self): def calc_roi_align(self):
self.out_data = np.zeros( self.out_data = np.zeros(
(self.rois_num, self.channels, self.pooled_height, (self.rois_num, self.channels, self.pooled_height,
self.pooled_width)).astype('float32') self.pooled_width)).astype(self.dtype)
for i in range(self.rois_num): for i in range(self.rois_num):
roi = self.rois[i] roi = self.rois[i]
...@@ -150,10 +157,9 @@ class TestROIAlignOp(XPUOpTest): ...@@ -150,10 +157,9 @@ class TestROIAlignOp(XPUOpTest):
math.ceil(roi_width / self.pooled_width) math.ceil(roi_width / self.pooled_width)
count = int(roi_bin_grid_h * roi_bin_grid_w) count = int(roi_bin_grid_h * roi_bin_grid_w)
pre_size = count * self.pooled_width * self.pooled_height pre_size = count * self.pooled_width * self.pooled_height
bilinear_pos, bilinear_w = self.pre_calc(x_i, roi_xmin, roi_ymin, bilinear_pos, bilinear_w = self.pre_calc(
int(roi_bin_grid_h), x_i, roi_xmin, roi_ymin, int(roi_bin_grid_h),
int(roi_bin_grid_w), int(roi_bin_grid_w), bin_size_h, bin_size_w)
bin_size_h, bin_size_w)
for ch in range(self.channels): for ch in range(self.channels):
align_per_bin = (bilinear_pos[ch] * bilinear_w).sum(axis=-1) align_per_bin = (bilinear_pos[ch] * bilinear_w).sum(axis=-1)
output_val = align_per_bin.mean(axis=-1) output_val = align_per_bin.mean(axis=-1)
...@@ -168,36 +174,38 @@ class TestROIAlignOp(XPUOpTest): ...@@ -168,36 +174,38 @@ class TestROIAlignOp(XPUOpTest):
x1 = np.random.random_integers( x1 = np.random.random_integers(
0, self.width // self.spatial_scale - self.pooled_width) 0, self.width // self.spatial_scale - self.pooled_width)
y1 = np.random.random_integers( y1 = np.random.random_integers(
0, self.height // self.spatial_scale - self.pooled_height) 0,
self.height // self.spatial_scale - self.pooled_height)
x2 = np.random.random_integers(x1 + self.pooled_width, x2 = np.random.random_integers(
x1 + self.pooled_width,
self.width // self.spatial_scale) self.width // self.spatial_scale)
y2 = np.random.random_integers( y2 = np.random.random_integers(
y1 + self.pooled_height, self.height // self.spatial_scale) y1 + self.pooled_height,
self.height // self.spatial_scale)
roi = [bno, x1, y1, x2, y2] roi = [bno, x1, y1, x2, y2]
rois.append(roi) rois.append(roi)
self.rois_num = len(rois) self.rois_num = len(rois)
self.rois = np.array(rois).astype("float32") self.rois = np.array(rois).astype(self.dtype)
def setUp(self): def setUp(self):
self.set_xpu()
self.op_type = "roi_align" self.op_type = "roi_align"
self.place = paddle.XPUPlace(0)
self.dtype = self.in_type
self.set_data() self.set_data()
def set_xpu(self):
self.__class__.use_xpu = True
def test_check_output(self): def test_check_output(self):
if paddle.is_compiled_with_xpu(): self.check_output_with_place(self.place)
paddle.enable_static()
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad(self): def test_check_grad(self):
if core.is_compiled_with_xpu(): self.check_grad_with_place(self.place, {'X'}, 'Out')
paddle.enable_static()
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, {'X'}, 'Out')
class TestROIAlignInLodOp(TestROIAlignOp): class TestROIAlignInLodOp(TestROIAlignOp):
def set_data(self): def set_data(self):
self.init_test_case() self.init_test_case()
...@@ -223,5 +231,9 @@ class TestROIAlignInLodOp(TestROIAlignOp): ...@@ -223,5 +231,9 @@ class TestROIAlignInLodOp(TestROIAlignOp):
self.outputs = {'Out': self.out_data} self.outputs = {'Out': self.out_data}
support_types = get_xpu_op_support_types('roi_align')
for stype in support_types:
create_test_class(globals(), XPUTestROIAlignOp, stype)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册