未验证 提交 e07420b9 编写于 作者: z8hanghuan's avatar z8hanghuan 提交者: GitHub

new may of test cases, *test=kunlun (#39444)

* new may of test cases, *test=kunlun

* new may of test cases, *test=kunlun

* new may of test cases, *test=kunlun
上级 d0df5632
......@@ -24,23 +24,41 @@ from op_test_xpu import OpTest, XPUOpTest
import paddle
from paddle.fluid import Program, program_guard
import op_test
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
class TestClipOp(XPUOpTest):
def set_xpu(self):
self.__class__.use_xpu = True
self.place = paddle.XPUPlace(0)
class XPUTestClipOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'clip'
self.use_dynamic_create_class = False
class TestClipOp(XPUOpTest):
def setUp(self):
self.init_dtype()
self.set_xpu()
self.max_relative_error = 0.006
self.op_type = "clip"
self.place = paddle.XPUPlace(0)
self.inputs = {}
self.initTestCase()
self.init_data()
self.set_attrs()
self.set_inputs()
self.outputs = {
'Out': np.clip(self.inputs['X'], self.min_v, self.max_v)
}
self.op_type = "clip"
self.attrs = {}
self.attrs['min'] = self.min
self.attrs['max'] = self.max
def set_xpu(self):
self.__class__.use_xpu = True
self.__class__.no_need_check_grad = True
self.__class__.op_type = self.dtype
def init_data(self):
self.shape = (4, 10, 10)
self.max = 0.8
self.min = 0.3
def set_inputs(self):
if 'Min' in self.inputs:
min_v = self.inputs['Min']
else:
......@@ -51,62 +69,55 @@ class TestClipOp(XPUOpTest):
else:
max_v = self.attrs['max']
self.min_v = min_v
self.max_v = max_v
self.max_relative_error = 0.006
input = np.random.random(self.shape).astype("float32")
input[np.abs(input - min_v) < self.max_relative_error] = 0.5
input[np.abs(input - max_v) < self.max_relative_error] = 0.5
self.inputs['X'] = input
self.outputs = {'Out': np.clip(self.inputs['X'], min_v, max_v)}
def set_attrs(self):
self.attrs = {}
self.attrs['min'] = self.min
self.attrs['max'] = self.max
def init_dtype(self):
self.dtype = self.in_type
def test_check_output(self):
paddle.enable_static()
self.check_output_with_place(self.place)
paddle.disable_static()
def test_check_grad_normal(self):
paddle.enable_static()
self.check_grad_with_place(self.place, ['X'], 'Out')
paddle.disable_static()
def initTestCase(self):
self.shape = (4, 10, 10)
self.max = 0.8
self.min = 0.3
self.inputs['Max'] = np.array([0.8]).astype('float32')
self.inputs['Min'] = np.array([0.1]).astype('float32')
class TestCase1(TestClipOp):
def initTestCase(self):
class TestClipOp1(TestClipOp):
def init_data(self):
self.shape = (8, 16, 8)
self.max = 0.7
self.min = 0.0
class TestCase2(TestClipOp):
def initTestCase(self):
class TestClipOp2(TestClipOp):
def init_data(self):
self.shape = (8, 16)
self.max = 1.0
self.min = 0.0
class TestCase3(TestClipOp):
def initTestCase(self):
class TestClipOp3(TestClipOp):
def init_data(self):
self.shape = (4, 8, 16)
self.max = 0.7
self.min = 0.2
class TestCase4(TestClipOp):
def initTestCase(self):
class TestClipOp4(TestClipOp):
def init_data(self):
self.shape = (4, 8, 8)
self.max = 0.7
self.min = 0.2
self.inputs['Max'] = np.array([0.8]).astype('float32')
self.inputs['Min'] = np.array([0.3]).astype('float32')
class TestCase5(TestClipOp):
def initTestCase(self):
class TestClipOp5(TestClipOp):
def init_data(self):
self.shape = (4, 8, 16)
self.max = 0.5
self.min = 0.5
......@@ -212,5 +223,9 @@ class TestInplaceClipAPI(TestClipAPI):
return x.clip_(min, max)
support_types = get_xpu_op_support_types('clip')
for stype in support_types:
create_test_class(globals(), XPUTestClipOp, stype)
if __name__ == '__main__':
unittest.main()
......@@ -18,54 +18,78 @@ import unittest
import numpy as np
import sys
sys.path.append("..")
from op_test_xpu import XPUOpTest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.op import Operator
import paddle
from paddle.static import Program, program_guard
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid import compiler, Program, program_guard
import op_test
from op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
class XPUTestScaleOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'scale'
self.use_dynamic_create_class = False
class TestXPUScaleOp(XPUOpTest):
class TestScaleOp(XPUOpTest):
def setUp(self):
self.init_dtype()
self.set_xpu()
self.op_type = "scale"
self.init_type()
self.inputs = {'X': np.random.random((10, 10)).astype(self.dtype)}
self.attrs = {'scale': -2.3, 'use_xpu': True}
self.place = paddle.XPUPlace(0)
self.set_inputs()
self.set_attrs()
self.outputs = {
'Out': self.inputs['X'] * self.dtype(self.attrs['scale'])
}
def init_type(self):
def set_xpu(self):
self.__class__.use_xpu = True
self.__class__.no_need_check_grad = True
self.__class__.op_type = self.dtype
def set_inputs(self):
self.inputs = {'X': np.random.random((10, 10)).astype(self.dtype)}
def init_dtype(self):
if "float16" == self.in_type:
self.dtype = np.float16
if "float32" == self.in_type:
self.dtype = np.float32
if "int64" == self.in_type:
self.dtype = np.int64
def set_attrs(self):
self.attrs = {'scale': -2.3}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
class TestScaleOp1(TestScaleOp):
def set_attrs(self):
self.attrs = {'scale': 3.5}
# class TestXPUScaleOpInt64(TestXPUScaleOp):
# def init_type(self):
# self.dtype = np.int64
class TestScaleOp2(TestScaleOp):
def set_attrs(self):
self.attrs = {'scale': 6.77}
class TestScaleOp3(TestScaleOp):
def set_attrs(self):
self.attrs = {'scale': -9.19}
class TestScaleFp16Op(TestXPUScaleOp):
def init_dtype_type(self):
self.dtype = np.float16
class TestScaleOp4(TestScaleOp):
def set_attrs(self):
self.attrs = {'scale': 0.0}
def test_check_output(self):
place = core.XPUPlace(0)
self.check_output_with_place(place, atol=0.002)
def test_check_grad(self):
place = core.XPUPlace(0)
self.check_grad_with_place(place, ["X"], "Out", max_relative_error=0.05)
class TestScaleOp5(TestScaleOp):
def set_attrs(self):
self.attrs = {'scale': -0.003}
class TestScaleApiStatic(unittest.TestCase):
......@@ -108,5 +132,9 @@ class TestScaleInplaceApiDygraph(TestScaleApiDygraph):
return x.scale_(scale, bias)
support_types = get_xpu_op_support_types('scale')
for stype in support_types:
create_test_class(globals(), XPUTestScaleOp, stype)
if __name__ == "__main__":
unittest.main()
......@@ -19,28 +19,49 @@ import numpy as np
import sys
sys.path.append("..")
from op_test_xpu import OpTest, XPUOpTest
from op_test import skip_check_grad_ci
import paddle
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
from paddle.fluid.framework import convert_np_dtype_to_dtype_
from paddle.fluid import compiler, Program, program_guard, core
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
from scipy.special import logit
from scipy.special import expit
paddle.enable_static()
class TestSigmoidCrossEntropyWithLogitsOp1(XPUOpTest):
class XPUTestSigmoidCrossEntropyWithLogitsOp(XPUOpTestWrapper):
"""Test sigmoid_cross_entropy_with_logit_op with binary label
"""
def __init__(self):
self.op_name = "sigmoid_cross_entropy_with_logits"
self.use_dynamic_create_class = False
class TestSigmoidCrossEntropyWithLogitsOp(XPUOpTest):
def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.set_xpu()
self.op_type = "sigmoid_cross_entropy_with_logits"
self.place = paddle.XPUPlace(0)
self.init_dtype()
self.set_inputs()
self.init_dtype()
self.set_output()
def set_output(self):
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
self.outputs = {'Out': -term1 - term2}
def set_inputs(self):
batch_size = 64
num_classes = 20
self.inputs = {
......@@ -50,14 +71,7 @@ class TestSigmoidCrossEntropyWithLogitsOp1(XPUOpTest):
'Label': np.random.randint(0, 2, (batch_size, num_classes))
.astype(self.dtype)
}
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
self.outputs = {'Out': -term1 - term2}
self.attrs = {'num_classes': num_classes, 'batch_size': batch_size}
def test_check_output(self):
self.check_output_with_place(self.place)
......@@ -67,25 +81,22 @@ class TestSigmoidCrossEntropyWithLogitsOp1(XPUOpTest):
def set_xpu(self):
self.__class__.use_xpu = True
self.__class__.op_type = self.in_type
self.place = paddle.XPUPlace(0)
def init_dtype(self):
self.dtype = np.float32
self.dtype = self.in_type
class TestSigmoidCrossEntropyWithLogitsOp2(
TestSigmoidCrossEntropyWithLogitsOp1):
class TestSigmoidCrossEntropyWithLogitsOp2(
TestSigmoidCrossEntropyWithLogitsOp):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label
"""
def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.set_xpu()
self.init_dtype()
def set_inputs(self):
batch_size = 64
num_classes = 20
ignore_index = -1
self.ignore_index = ignore_index
self.inputs = {
'X': logit(
np.random.uniform(0, 1, (batch_size, num_classes))
......@@ -93,8 +104,9 @@ class TestSigmoidCrossEntropyWithLogitsOp2(
'Label': np.random.randint(-1, 2, (batch_size, num_classes))
.astype(self.dtype)
}
self.attrs = {'ignore_index': ignore_index, }
self.attrs = {'ignore_index': ignore_index}
def set_output(self):
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
......@@ -102,20 +114,15 @@ class TestSigmoidCrossEntropyWithLogitsOp2(
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
out = -term1 - term2
out[np.where(self.inputs['Label'] == ignore_index)] = 0
out[np.where(self.inputs['Label'] == self.ignore_index)] = 0
self.outputs = {'Out': out}
class TestSigmoidCrossEntropyWithLogitsOp3(
TestSigmoidCrossEntropyWithLogitsOp1):
class TestSigmoidCrossEntropyWithLogitsOp3(
TestSigmoidCrossEntropyWithLogitsOp):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label
"""
def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.set_xpu()
self.init_dtype()
def set_inputs(self):
batch_size = 64
num_classes = 20
self.inputs = {
......@@ -125,7 +132,9 @@ class TestSigmoidCrossEntropyWithLogitsOp3(
'Label': np.random.uniform(0, 1, (batch_size, num_classes))
.astype(self.dtype)
}
self.attrs = {'num_classes': num_classes, 'batch_size': batch_size}
def set_output(self):
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
......@@ -134,20 +143,16 @@ class TestSigmoidCrossEntropyWithLogitsOp3(
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
self.outputs = {'Out': -term1 - term2}
class TestSigmoidCrossEntropyWithLogitsOp4(
TestSigmoidCrossEntropyWithLogitsOp1):
class TestSigmoidCrossEntropyWithLogitsOp4(
TestSigmoidCrossEntropyWithLogitsOp):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label
"""
def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.set_xpu()
self.init_dtype()
def set_inputs(self):
batch_size = 64
num_classes = 20
ignore_index = -1
self.ignore_index = ignore_index
self.inputs = {
'X': logit(
np.random.uniform(0, 1, (batch_size, num_classes))
......@@ -157,6 +162,7 @@ class TestSigmoidCrossEntropyWithLogitsOp4(
}
self.attrs = {'ignore_index': ignore_index, 'normalize': True}
def set_output(self):
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
......@@ -164,33 +170,31 @@ class TestSigmoidCrossEntropyWithLogitsOp4(
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
out = -term1 - term2
out[np.where(self.inputs['Label'] == ignore_index)] = 0
out[np.where(self.inputs['Label'] == self.ignore_index)] = 0
if self.attrs['normalize']:
out = out / float(
np.where(self.inputs['Label'] != ignore_index)[0].size)
np.where(self.inputs['Label'] != self.ignore_index)[0].size)
self.outputs = {'Out': out}
class TestSigmoidCrossEntropyWithLogitsOp5(
TestSigmoidCrossEntropyWithLogitsOp1):
class TestSigmoidCrossEntropyWithLogitsOp5(
TestSigmoidCrossEntropyWithLogitsOp):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label
"""
def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.set_xpu()
self.init_dtype()
def set_inputs(self):
batch_size = [10, 10]
num_classes = 20
self.inputs = {
'X': logit(
np.random.uniform(0, 1, tuple(batch_size + [num_classes]))
.astype(self.dtype)),
'Label': np.random.uniform(0, 1, tuple(batch_size + [num_classes]))
'Label':
np.random.uniform(0, 1, tuple(batch_size + [num_classes]))
.astype(self.dtype)
}
self.attrs = {'num_classes': num_classes, 'batch_size': batch_size}
def set_output(self):
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
......@@ -199,71 +203,71 @@ class TestSigmoidCrossEntropyWithLogitsOp5(
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
self.outputs = {'Out': -term1 - term2}
class TestSigmoidCrossEntropyWithLogitsNorm(
TestSigmoidCrossEntropyWithLogitsOp1):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label
class TestSigmoidCrossEntropyWithLogitsOp6(
TestSigmoidCrossEntropyWithLogitsOp):
"""Test sigmoid_cross_entropy_with_logit_op with binary label
"""
def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.set_xpu()
self.init_dtype()
def set_inputs(self):
batch_size = [10, 10]
num_classes = 20
ignore_index = -1
self.inputs = {
'X': logit(
np.random.uniform(0, 1, tuple(batch_size + [num_classes]))
.astype(self.dtype)),
'Label': np.random.randint(-1, 2, tuple(batch_size + [num_classes]))
'Label':
np.random.randint(0, 2, tuple(batch_size + [num_classes]))
.astype(self.dtype)
}
self.attrs = {'ignore_index': ignore_index, 'normalize': True}
self.attrs = {'num_classes': num_classes, 'batch_size': batch_size}
def set_output(self):
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
out = -term1 - term2
out[np.where(self.inputs['Label'] == ignore_index)] = 0
if self.attrs['normalize']:
out = out / float(
np.where(self.inputs['Label'] != ignore_index)[0].size)
self.outputs = {'Out': out}
self.outputs = {'Out': -term1 - term2}
class TestSigmoidCrossEntropyWithLogitsOp6(
TestSigmoidCrossEntropyWithLogitsOp1):
"""Test sigmoid_cross_entropy_with_logit_op with binary label
class TestSigmoidCrossEntropyWithLogitsNorm(
TestSigmoidCrossEntropyWithLogitsOp):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label
"""
def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.set_xpu()
self.init_dtype()
def set_inputs(self):
batch_size = [10, 10]
num_classes = 20
ignore_index = -1
self.ignore_index = ignore_index
self.inputs = {
'X': logit(
np.random.uniform(0, 1, tuple(batch_size + [num_classes]))
.astype(self.dtype)),
'Label': np.random.randint(0, 2, tuple(batch_size + [num_classes]))
'Label':
np.random.randint(-1, 2, tuple(batch_size + [num_classes]))
.astype(self.dtype)
}
self.attrs = {'ignore_index': ignore_index, 'normalize': True}
def set_output(self):
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
self.outputs = {'Out': -term1 - term2}
out = -term1 - term2
out[np.where(self.inputs['Label'] == self.ignore_index)] = 0
if self.attrs['normalize']:
out = out / float(
np.where(self.inputs['Label'] != self.ignore_index)[0].size)
self.outputs = {'Out': out}
support_types = get_xpu_op_support_types('sigmoid_cross_entropy_with_logits')
for stype in support_types:
create_test_class(globals(), XPUTestSigmoidCrossEntropyWithLogitsOp, stype)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册