未验证 提交 e07420b9 编写于 作者: z8hanghuan's avatar z8hanghuan 提交者: GitHub

new may of test cases, *test=kunlun (#39444)

* new may of test cases, *test=kunlun

* new may of test cases, *test=kunlun

* new may of test cases, *test=kunlun
上级 d0df5632
...@@ -24,23 +24,41 @@ from op_test_xpu import OpTest, XPUOpTest ...@@ -24,23 +24,41 @@ from op_test_xpu import OpTest, XPUOpTest
import paddle import paddle
from paddle.fluid import Program, program_guard from paddle.fluid import Program, program_guard
import op_test
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
class TestClipOp(XPUOpTest):
def set_xpu(self):
self.__class__.use_xpu = True
self.place = paddle.XPUPlace(0)
class XPUTestClipOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'clip'
self.use_dynamic_create_class = False
class TestClipOp(XPUOpTest):
def setUp(self): def setUp(self):
self.init_dtype()
self.set_xpu() self.set_xpu()
self.max_relative_error = 0.006 self.op_type = "clip"
self.place = paddle.XPUPlace(0)
self.inputs = {} self.inputs = {}
self.initTestCase() self.init_data()
self.set_attrs()
self.set_inputs()
self.outputs = {
'Out': np.clip(self.inputs['X'], self.min_v, self.max_v)
}
self.op_type = "clip" def set_xpu(self):
self.attrs = {} self.__class__.use_xpu = True
self.attrs['min'] = self.min self.__class__.no_need_check_grad = True
self.attrs['max'] = self.max self.__class__.op_type = self.dtype
def init_data(self):
self.shape = (4, 10, 10)
self.max = 0.8
self.min = 0.3
def set_inputs(self):
if 'Min' in self.inputs: if 'Min' in self.inputs:
min_v = self.inputs['Min'] min_v = self.inputs['Min']
else: else:
...@@ -51,62 +69,55 @@ class TestClipOp(XPUOpTest): ...@@ -51,62 +69,55 @@ class TestClipOp(XPUOpTest):
else: else:
max_v = self.attrs['max'] max_v = self.attrs['max']
self.min_v = min_v
self.max_v = max_v
self.max_relative_error = 0.006
input = np.random.random(self.shape).astype("float32") input = np.random.random(self.shape).astype("float32")
input[np.abs(input - min_v) < self.max_relative_error] = 0.5 input[np.abs(input - min_v) < self.max_relative_error] = 0.5
input[np.abs(input - max_v) < self.max_relative_error] = 0.5 input[np.abs(input - max_v) < self.max_relative_error] = 0.5
self.inputs['X'] = input self.inputs['X'] = input
self.outputs = {'Out': np.clip(self.inputs['X'], min_v, max_v)}
def set_attrs(self):
self.attrs = {}
self.attrs['min'] = self.min
self.attrs['max'] = self.max
def init_dtype(self):
self.dtype = self.in_type
def test_check_output(self): def test_check_output(self):
paddle.enable_static() paddle.enable_static()
self.check_output_with_place(self.place) self.check_output_with_place(self.place)
paddle.disable_static() paddle.disable_static()
def test_check_grad_normal(self): class TestClipOp1(TestClipOp):
paddle.enable_static() def init_data(self):
self.check_grad_with_place(self.place, ['X'], 'Out')
paddle.disable_static()
def initTestCase(self):
self.shape = (4, 10, 10)
self.max = 0.8
self.min = 0.3
self.inputs['Max'] = np.array([0.8]).astype('float32')
self.inputs['Min'] = np.array([0.1]).astype('float32')
class TestCase1(TestClipOp):
def initTestCase(self):
self.shape = (8, 16, 8) self.shape = (8, 16, 8)
self.max = 0.7 self.max = 0.7
self.min = 0.0 self.min = 0.0
class TestClipOp2(TestClipOp):
class TestCase2(TestClipOp): def init_data(self):
def initTestCase(self):
self.shape = (8, 16) self.shape = (8, 16)
self.max = 1.0 self.max = 1.0
self.min = 0.0 self.min = 0.0
class TestClipOp3(TestClipOp):
class TestCase3(TestClipOp): def init_data(self):
def initTestCase(self):
self.shape = (4, 8, 16) self.shape = (4, 8, 16)
self.max = 0.7 self.max = 0.7
self.min = 0.2 self.min = 0.2
class TestClipOp4(TestClipOp):
class TestCase4(TestClipOp): def init_data(self):
def initTestCase(self):
self.shape = (4, 8, 8) self.shape = (4, 8, 8)
self.max = 0.7 self.max = 0.7
self.min = 0.2 self.min = 0.2
self.inputs['Max'] = np.array([0.8]).astype('float32') self.inputs['Max'] = np.array([0.8]).astype('float32')
self.inputs['Min'] = np.array([0.3]).astype('float32') self.inputs['Min'] = np.array([0.3]).astype('float32')
class TestClipOp5(TestClipOp):
class TestCase5(TestClipOp): def init_data(self):
def initTestCase(self):
self.shape = (4, 8, 16) self.shape = (4, 8, 16)
self.max = 0.5 self.max = 0.5
self.min = 0.5 self.min = 0.5
...@@ -212,5 +223,9 @@ class TestInplaceClipAPI(TestClipAPI): ...@@ -212,5 +223,9 @@ class TestInplaceClipAPI(TestClipAPI):
return x.clip_(min, max) return x.clip_(min, max)
support_types = get_xpu_op_support_types('clip')
for stype in support_types:
create_test_class(globals(), XPUTestClipOp, stype)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -18,54 +18,78 @@ import unittest ...@@ -18,54 +18,78 @@ import unittest
import numpy as np import numpy as np
import sys import sys
sys.path.append("..") sys.path.append("..")
from op_test_xpu import XPUOpTest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.op import Operator
import paddle import paddle
from paddle.static import Program, program_guard import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid import compiler, Program, program_guard
import op_test
from op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
class XPUTestScaleOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'scale'
self.use_dynamic_create_class = False
class TestXPUScaleOp(XPUOpTest): class TestScaleOp(XPUOpTest):
def setUp(self): def setUp(self):
self.init_dtype()
self.set_xpu()
self.op_type = "scale" self.op_type = "scale"
self.init_type() self.place = paddle.XPUPlace(0)
self.inputs = {'X': np.random.random((10, 10)).astype(self.dtype)} self.set_inputs()
self.attrs = {'scale': -2.3, 'use_xpu': True} self.set_attrs()
self.outputs = { self.outputs = {
'Out': self.inputs['X'] * self.dtype(self.attrs['scale']) 'Out': self.inputs['X'] * self.dtype(self.attrs['scale'])
} }
def init_type(self): def set_xpu(self):
self.__class__.use_xpu = True
self.__class__.no_need_check_grad = True
self.__class__.op_type = self.dtype
def set_inputs(self):
self.inputs = {'X': np.random.random((10, 10)).astype(self.dtype)}
def init_dtype(self):
if "float16" == self.in_type:
self.dtype = np.float16
if "float32" == self.in_type:
self.dtype = np.float32 self.dtype = np.float32
if "int64" == self.in_type:
self.dtype = np.int64
def set_attrs(self):
self.attrs = {'scale': -2.3}
def test_check_output(self): def test_check_output(self):
if paddle.is_compiled_with_xpu(): if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0) place = paddle.XPUPlace(0)
self.check_output_with_place(place) self.check_output_with_place(place)
def test_check_grad(self): class TestScaleOp1(TestScaleOp):
if paddle.is_compiled_with_xpu(): def set_attrs(self):
place = paddle.XPUPlace(0) self.attrs = {'scale': 3.5}
self.check_grad_with_place(place, ['X'], 'Out')
# class TestXPUScaleOpInt64(TestXPUScaleOp): class TestScaleOp2(TestScaleOp):
# def init_type(self): def set_attrs(self):
# self.dtype = np.int64 self.attrs = {'scale': 6.77}
class TestScaleOp3(TestScaleOp):
def set_attrs(self):
self.attrs = {'scale': -9.19}
class TestScaleFp16Op(TestXPUScaleOp): class TestScaleOp4(TestScaleOp):
def init_dtype_type(self): def set_attrs(self):
self.dtype = np.float16 self.attrs = {'scale': 0.0}
def test_check_output(self): class TestScaleOp5(TestScaleOp):
place = core.XPUPlace(0) def set_attrs(self):
self.check_output_with_place(place, atol=0.002) self.attrs = {'scale': -0.003}
def test_check_grad(self):
place = core.XPUPlace(0)
self.check_grad_with_place(place, ["X"], "Out", max_relative_error=0.05)
class TestScaleApiStatic(unittest.TestCase): class TestScaleApiStatic(unittest.TestCase):
...@@ -108,5 +132,9 @@ class TestScaleInplaceApiDygraph(TestScaleApiDygraph): ...@@ -108,5 +132,9 @@ class TestScaleInplaceApiDygraph(TestScaleApiDygraph):
return x.scale_(scale, bias) return x.scale_(scale, bias)
support_types = get_xpu_op_support_types('scale')
for stype in support_types:
create_test_class(globals(), XPUTestScaleOp, stype)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -19,28 +19,49 @@ import numpy as np ...@@ -19,28 +19,49 @@ import numpy as np
import sys import sys
sys.path.append("..") sys.path.append("..")
from op_test_xpu import OpTest, XPUOpTest from op_test_xpu import OpTest, XPUOpTest
from op_test import skip_check_grad_ci
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard from paddle.fluid import compiler, Program, program_guard
from paddle.fluid.framework import convert_np_dtype_to_dtype_ from paddle.fluid.framework import convert_np_dtype_to_dtype_
from paddle.fluid import compiler, Program, program_guard, core
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
from scipy.special import logit from scipy.special import logit
from scipy.special import expit from scipy.special import expit
paddle.enable_static() paddle.enable_static()
class TestSigmoidCrossEntropyWithLogitsOp1(XPUOpTest): class XPUTestSigmoidCrossEntropyWithLogitsOp(XPUOpTestWrapper):
"""Test sigmoid_cross_entropy_with_logit_op with binary label """Test sigmoid_cross_entropy_with_logit_op with binary label
""" """
def __init__(self):
self.op_name = "sigmoid_cross_entropy_with_logits"
self.use_dynamic_create_class = False
class TestSigmoidCrossEntropyWithLogitsOp(XPUOpTest):
def setUp(self): def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.set_xpu() self.set_xpu()
self.op_type = "sigmoid_cross_entropy_with_logits"
self.place = paddle.XPUPlace(0)
self.init_dtype() self.init_dtype()
self.set_inputs()
self.init_dtype()
self.set_output()
def set_output(self):
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
self.outputs = {'Out': -term1 - term2}
def set_inputs(self):
batch_size = 64 batch_size = 64
num_classes = 20 num_classes = 20
self.inputs = { self.inputs = {
...@@ -50,14 +71,7 @@ class TestSigmoidCrossEntropyWithLogitsOp1(XPUOpTest): ...@@ -50,14 +71,7 @@ class TestSigmoidCrossEntropyWithLogitsOp1(XPUOpTest):
'Label': np.random.randint(0, 2, (batch_size, num_classes)) 'Label': np.random.randint(0, 2, (batch_size, num_classes))
.astype(self.dtype) .astype(self.dtype)
} }
self.attrs = {'num_classes': num_classes, 'batch_size': batch_size}
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
self.outputs = {'Out': -term1 - term2}
def test_check_output(self): def test_check_output(self):
self.check_output_with_place(self.place) self.check_output_with_place(self.place)
...@@ -67,25 +81,22 @@ class TestSigmoidCrossEntropyWithLogitsOp1(XPUOpTest): ...@@ -67,25 +81,22 @@ class TestSigmoidCrossEntropyWithLogitsOp1(XPUOpTest):
def set_xpu(self): def set_xpu(self):
self.__class__.use_xpu = True self.__class__.use_xpu = True
self.__class__.op_type = self.in_type
self.place = paddle.XPUPlace(0) self.place = paddle.XPUPlace(0)
def init_dtype(self): def init_dtype(self):
self.dtype = np.float32 self.dtype = self.in_type
class TestSigmoidCrossEntropyWithLogitsOp2( class TestSigmoidCrossEntropyWithLogitsOp2(
TestSigmoidCrossEntropyWithLogitsOp1): TestSigmoidCrossEntropyWithLogitsOp):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label """Test sigmoid_cross_entropy_with_logit_op with probabalistic label
""" """
def setUp(self): def set_inputs(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.set_xpu()
self.init_dtype()
batch_size = 64 batch_size = 64
num_classes = 20 num_classes = 20
ignore_index = -1 ignore_index = -1
self.ignore_index = ignore_index
self.inputs = { self.inputs = {
'X': logit( 'X': logit(
np.random.uniform(0, 1, (batch_size, num_classes)) np.random.uniform(0, 1, (batch_size, num_classes))
...@@ -93,8 +104,9 @@ class TestSigmoidCrossEntropyWithLogitsOp2( ...@@ -93,8 +104,9 @@ class TestSigmoidCrossEntropyWithLogitsOp2(
'Label': np.random.randint(-1, 2, (batch_size, num_classes)) 'Label': np.random.randint(-1, 2, (batch_size, num_classes))
.astype(self.dtype) .astype(self.dtype)
} }
self.attrs = {'ignore_index': ignore_index, } self.attrs = {'ignore_index': ignore_index}
def set_output(self):
# Fw Pass is implemented as elementwise sigmoid followed by # Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss # elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X)) # Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
...@@ -102,20 +114,15 @@ class TestSigmoidCrossEntropyWithLogitsOp2( ...@@ -102,20 +114,15 @@ class TestSigmoidCrossEntropyWithLogitsOp2(
term1 = self.inputs['Label'] * np.log(sigmoid_X) term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X) term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
out = -term1 - term2 out = -term1 - term2
out[np.where(self.inputs['Label'] == ignore_index)] = 0 out[np.where(self.inputs['Label'] == self.ignore_index)] = 0
self.outputs = {'Out': out} self.outputs = {'Out': out}
class TestSigmoidCrossEntropyWithLogitsOp3(
class TestSigmoidCrossEntropyWithLogitsOp3( TestSigmoidCrossEntropyWithLogitsOp):
TestSigmoidCrossEntropyWithLogitsOp1):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label """Test sigmoid_cross_entropy_with_logit_op with probabalistic label
""" """
def setUp(self): def set_inputs(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.set_xpu()
self.init_dtype()
batch_size = 64 batch_size = 64
num_classes = 20 num_classes = 20
self.inputs = { self.inputs = {
...@@ -125,7 +132,9 @@ class TestSigmoidCrossEntropyWithLogitsOp3( ...@@ -125,7 +132,9 @@ class TestSigmoidCrossEntropyWithLogitsOp3(
'Label': np.random.uniform(0, 1, (batch_size, num_classes)) 'Label': np.random.uniform(0, 1, (batch_size, num_classes))
.astype(self.dtype) .astype(self.dtype)
} }
self.attrs = {'num_classes': num_classes, 'batch_size': batch_size}
def set_output(self):
# Fw Pass is implemented as elementwise sigmoid followed by # Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss # elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X)) # Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
...@@ -134,20 +143,16 @@ class TestSigmoidCrossEntropyWithLogitsOp3( ...@@ -134,20 +143,16 @@ class TestSigmoidCrossEntropyWithLogitsOp3(
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X) term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
self.outputs = {'Out': -term1 - term2} self.outputs = {'Out': -term1 - term2}
class TestSigmoidCrossEntropyWithLogitsOp4(
class TestSigmoidCrossEntropyWithLogitsOp4( TestSigmoidCrossEntropyWithLogitsOp):
TestSigmoidCrossEntropyWithLogitsOp1):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label """Test sigmoid_cross_entropy_with_logit_op with probabalistic label
""" """
def setUp(self): def set_inputs(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.set_xpu()
self.init_dtype()
batch_size = 64 batch_size = 64
num_classes = 20 num_classes = 20
ignore_index = -1 ignore_index = -1
self.ignore_index = ignore_index
self.inputs = { self.inputs = {
'X': logit( 'X': logit(
np.random.uniform(0, 1, (batch_size, num_classes)) np.random.uniform(0, 1, (batch_size, num_classes))
...@@ -157,6 +162,7 @@ class TestSigmoidCrossEntropyWithLogitsOp4( ...@@ -157,6 +162,7 @@ class TestSigmoidCrossEntropyWithLogitsOp4(
} }
self.attrs = {'ignore_index': ignore_index, 'normalize': True} self.attrs = {'ignore_index': ignore_index, 'normalize': True}
def set_output(self):
# Fw Pass is implemented as elementwise sigmoid followed by # Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss # elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X)) # Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
...@@ -164,33 +170,31 @@ class TestSigmoidCrossEntropyWithLogitsOp4( ...@@ -164,33 +170,31 @@ class TestSigmoidCrossEntropyWithLogitsOp4(
term1 = self.inputs['Label'] * np.log(sigmoid_X) term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X) term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
out = -term1 - term2 out = -term1 - term2
out[np.where(self.inputs['Label'] == ignore_index)] = 0 out[np.where(self.inputs['Label'] == self.ignore_index)] = 0
if self.attrs['normalize']: if self.attrs['normalize']:
out = out / float( out = out / float(
np.where(self.inputs['Label'] != ignore_index)[0].size) np.where(self.inputs['Label'] != self.ignore_index)[0].size)
self.outputs = {'Out': out} self.outputs = {'Out': out}
class TestSigmoidCrossEntropyWithLogitsOp5(
class TestSigmoidCrossEntropyWithLogitsOp5( TestSigmoidCrossEntropyWithLogitsOp):
TestSigmoidCrossEntropyWithLogitsOp1):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label """Test sigmoid_cross_entropy_with_logit_op with probabalistic label
""" """
def setUp(self): def set_inputs(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.set_xpu()
self.init_dtype()
batch_size = [10, 10] batch_size = [10, 10]
num_classes = 20 num_classes = 20
self.inputs = { self.inputs = {
'X': logit( 'X': logit(
np.random.uniform(0, 1, tuple(batch_size + [num_classes])) np.random.uniform(0, 1, tuple(batch_size + [num_classes]))
.astype(self.dtype)), .astype(self.dtype)),
'Label': np.random.uniform(0, 1, tuple(batch_size + [num_classes])) 'Label':
np.random.uniform(0, 1, tuple(batch_size + [num_classes]))
.astype(self.dtype) .astype(self.dtype)
} }
self.attrs = {'num_classes': num_classes, 'batch_size': batch_size}
def set_output(self):
# Fw Pass is implemented as elementwise sigmoid followed by # Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss # elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X)) # Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
...@@ -199,71 +203,71 @@ class TestSigmoidCrossEntropyWithLogitsOp5( ...@@ -199,71 +203,71 @@ class TestSigmoidCrossEntropyWithLogitsOp5(
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X) term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
self.outputs = {'Out': -term1 - term2} self.outputs = {'Out': -term1 - term2}
class TestSigmoidCrossEntropyWithLogitsOp6(
class TestSigmoidCrossEntropyWithLogitsNorm( TestSigmoidCrossEntropyWithLogitsOp):
TestSigmoidCrossEntropyWithLogitsOp1): """Test sigmoid_cross_entropy_with_logit_op with binary label
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label
""" """
def setUp(self): def set_inputs(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.set_xpu()
self.init_dtype()
batch_size = [10, 10] batch_size = [10, 10]
num_classes = 20 num_classes = 20
ignore_index = -1
self.inputs = { self.inputs = {
'X': logit( 'X': logit(
np.random.uniform(0, 1, tuple(batch_size + [num_classes])) np.random.uniform(0, 1, tuple(batch_size + [num_classes]))
.astype(self.dtype)), .astype(self.dtype)),
'Label': np.random.randint(-1, 2, tuple(batch_size + [num_classes])) 'Label':
np.random.randint(0, 2, tuple(batch_size + [num_classes]))
.astype(self.dtype) .astype(self.dtype)
} }
self.attrs = {'ignore_index': ignore_index, 'normalize': True} self.attrs = {'num_classes': num_classes, 'batch_size': batch_size}
def set_output(self):
# Fw Pass is implemented as elementwise sigmoid followed by # Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss # elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X)) # Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X']) sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X) term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X) term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
out = -term1 - term2 self.outputs = {'Out': -term1 - term2}
out[np.where(self.inputs['Label'] == ignore_index)] = 0
if self.attrs['normalize']:
out = out / float(
np.where(self.inputs['Label'] != ignore_index)[0].size)
self.outputs = {'Out': out}
class TestSigmoidCrossEntropyWithLogitsOp6( class TestSigmoidCrossEntropyWithLogitsNorm(
TestSigmoidCrossEntropyWithLogitsOp1): TestSigmoidCrossEntropyWithLogitsOp):
"""Test sigmoid_cross_entropy_with_logit_op with binary label """Test sigmoid_cross_entropy_with_logit_op with probabalistic label
""" """
def setUp(self): def set_inputs(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.set_xpu()
self.init_dtype()
batch_size = [10, 10] batch_size = [10, 10]
num_classes = 20 num_classes = 20
ignore_index = -1
self.ignore_index = ignore_index
self.inputs = { self.inputs = {
'X': logit( 'X': logit(
np.random.uniform(0, 1, tuple(batch_size + [num_classes])) np.random.uniform(0, 1, tuple(batch_size + [num_classes]))
.astype(self.dtype)), .astype(self.dtype)),
'Label': np.random.randint(0, 2, tuple(batch_size + [num_classes])) 'Label':
np.random.randint(-1, 2, tuple(batch_size + [num_classes]))
.astype(self.dtype) .astype(self.dtype)
} }
self.attrs = {'ignore_index': ignore_index, 'normalize': True}
def set_output(self):
# Fw Pass is implemented as elementwise sigmoid followed by # Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss # elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X)) # Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X']) sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X) term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X) term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
self.outputs = {'Out': -term1 - term2} out = -term1 - term2
out[np.where(self.inputs['Label'] == self.ignore_index)] = 0
if self.attrs['normalize']:
out = out / float(
np.where(self.inputs['Label'] != self.ignore_index)[0].size)
self.outputs = {'Out': out}
support_types = get_xpu_op_support_types('sigmoid_cross_entropy_with_logits')
for stype in support_types:
create_test_class(globals(), XPUTestSigmoidCrossEntropyWithLogitsOp, stype)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册