未验证 提交 e07420b9 编写于 作者: z8hanghuan's avatar z8hanghuan 提交者: GitHub

new may of test cases, *test=kunlun (#39444)

* new may of test cases, *test=kunlun

* new may of test cases, *test=kunlun

* new may of test cases, *test=kunlun
上级 d0df5632
...@@ -24,92 +24,103 @@ from op_test_xpu import OpTest, XPUOpTest ...@@ -24,92 +24,103 @@ from op_test_xpu import OpTest, XPUOpTest
import paddle import paddle
from paddle.fluid import Program, program_guard from paddle.fluid import Program, program_guard
import op_test
class TestClipOp(XPUOpTest): from op_test_xpu import XPUOpTest
def set_xpu(self): from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
self.__class__.use_xpu = True
self.place = paddle.XPUPlace(0)
class XPUTestClipOp(XPUOpTestWrapper):
def setUp(self): def __init__(self):
self.set_xpu() self.op_name = 'clip'
self.max_relative_error = 0.006 self.use_dynamic_create_class = False
self.inputs = {} class TestClipOp(XPUOpTest):
self.initTestCase() def setUp(self):
self.init_dtype()
self.op_type = "clip" self.set_xpu()
self.attrs = {} self.op_type = "clip"
self.attrs['min'] = self.min self.place = paddle.XPUPlace(0)
self.attrs['max'] = self.max self.inputs = {}
if 'Min' in self.inputs: self.init_data()
min_v = self.inputs['Min'] self.set_attrs()
else: self.set_inputs()
min_v = self.attrs['min'] self.outputs = {
'Out': np.clip(self.inputs['X'], self.min_v, self.max_v)
if 'Max' in self.inputs: }
max_v = self.inputs['Max']
else: def set_xpu(self):
max_v = self.attrs['max'] self.__class__.use_xpu = True
self.__class__.no_need_check_grad = True
input = np.random.random(self.shape).astype("float32") self.__class__.op_type = self.dtype
input[np.abs(input - min_v) < self.max_relative_error] = 0.5
input[np.abs(input - max_v) < self.max_relative_error] = 0.5 def init_data(self):
self.inputs['X'] = input self.shape = (4, 10, 10)
self.outputs = {'Out': np.clip(self.inputs['X'], min_v, max_v)} self.max = 0.8
self.min = 0.3
def test_check_output(self):
paddle.enable_static() def set_inputs(self):
self.check_output_with_place(self.place) if 'Min' in self.inputs:
paddle.disable_static() min_v = self.inputs['Min']
else:
def test_check_grad_normal(self): min_v = self.attrs['min']
paddle.enable_static()
self.check_grad_with_place(self.place, ['X'], 'Out') if 'Max' in self.inputs:
paddle.disable_static() max_v = self.inputs['Max']
else:
def initTestCase(self): max_v = self.attrs['max']
self.shape = (4, 10, 10)
self.max = 0.8 self.min_v = min_v
self.min = 0.3 self.max_v = max_v
self.inputs['Max'] = np.array([0.8]).astype('float32') self.max_relative_error = 0.006
self.inputs['Min'] = np.array([0.1]).astype('float32') input = np.random.random(self.shape).astype("float32")
input[np.abs(input - min_v) < self.max_relative_error] = 0.5
input[np.abs(input - max_v) < self.max_relative_error] = 0.5
class TestCase1(TestClipOp): self.inputs['X'] = input
def initTestCase(self):
self.shape = (8, 16, 8) def set_attrs(self):
self.max = 0.7 self.attrs = {}
self.min = 0.0 self.attrs['min'] = self.min
self.attrs['max'] = self.max
class TestCase2(TestClipOp): def init_dtype(self):
def initTestCase(self): self.dtype = self.in_type
self.shape = (8, 16)
self.max = 1.0 def test_check_output(self):
self.min = 0.0 paddle.enable_static()
self.check_output_with_place(self.place)
paddle.disable_static()
class TestCase3(TestClipOp):
def initTestCase(self): class TestClipOp1(TestClipOp):
self.shape = (4, 8, 16) def init_data(self):
self.max = 0.7 self.shape = (8, 16, 8)
self.min = 0.2 self.max = 0.7
self.min = 0.0
class TestCase4(TestClipOp): class TestClipOp2(TestClipOp):
def initTestCase(self): def init_data(self):
self.shape = (4, 8, 8) self.shape = (8, 16)
self.max = 0.7 self.max = 1.0
self.min = 0.2 self.min = 0.0
self.inputs['Max'] = np.array([0.8]).astype('float32')
self.inputs['Min'] = np.array([0.3]).astype('float32') class TestClipOp3(TestClipOp):
def init_data(self):
self.shape = (4, 8, 16)
class TestCase5(TestClipOp): self.max = 0.7
def initTestCase(self): self.min = 0.2
self.shape = (4, 8, 16)
self.max = 0.5 class TestClipOp4(TestClipOp):
self.min = 0.5 def init_data(self):
self.shape = (4, 8, 8)
self.max = 0.7
self.min = 0.2
self.inputs['Max'] = np.array([0.8]).astype('float32')
self.inputs['Min'] = np.array([0.3]).astype('float32')
class TestClipOp5(TestClipOp):
def init_data(self):
self.shape = (4, 8, 16)
self.max = 0.5
self.min = 0.5
class TestClipOpError(unittest.TestCase): class TestClipOpError(unittest.TestCase):
...@@ -212,5 +223,9 @@ class TestInplaceClipAPI(TestClipAPI): ...@@ -212,5 +223,9 @@ class TestInplaceClipAPI(TestClipAPI):
return x.clip_(min, max) return x.clip_(min, max)
support_types = get_xpu_op_support_types('clip')
for stype in support_types:
create_test_class(globals(), XPUTestClipOp, stype)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -18,54 +18,78 @@ import unittest ...@@ -18,54 +18,78 @@ import unittest
import numpy as np import numpy as np
import sys import sys
sys.path.append("..") sys.path.append("..")
from op_test_xpu import XPUOpTest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.op import Operator
import paddle
from paddle.static import Program, program_guard
class TestXPUScaleOp(XPUOpTest):
def setUp(self):
self.op_type = "scale"
self.init_type()
self.inputs = {'X': np.random.random((10, 10)).astype(self.dtype)}
self.attrs = {'scale': -2.3, 'use_xpu': True}
self.outputs = {
'Out': self.inputs['X'] * self.dtype(self.attrs['scale'])
}
def init_type(self):
self.dtype = np.float32
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
import paddle
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid import compiler, Program, program_guard
# class TestXPUScaleOpInt64(TestXPUScaleOp): import op_test
# def init_type(self): from op_test import OpTest, skip_check_grad_ci
# self.dtype = np.int64 from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
class TestScaleFp16Op(TestXPUScaleOp):
def init_dtype_type(self): class XPUTestScaleOp(XPUOpTestWrapper):
self.dtype = np.float16 def __init__(self):
self.op_name = 'scale'
def test_check_output(self): self.use_dynamic_create_class = False
place = core.XPUPlace(0)
self.check_output_with_place(place, atol=0.002) class TestScaleOp(XPUOpTest):
def setUp(self):
def test_check_grad(self): self.init_dtype()
place = core.XPUPlace(0) self.set_xpu()
self.check_grad_with_place(place, ["X"], "Out", max_relative_error=0.05) self.op_type = "scale"
self.place = paddle.XPUPlace(0)
self.set_inputs()
self.set_attrs()
self.outputs = {
'Out': self.inputs['X'] * self.dtype(self.attrs['scale'])
}
def set_xpu(self):
self.__class__.use_xpu = True
self.__class__.no_need_check_grad = True
self.__class__.op_type = self.dtype
def set_inputs(self):
self.inputs = {'X': np.random.random((10, 10)).astype(self.dtype)}
def init_dtype(self):
if "float16" == self.in_type:
self.dtype = np.float16
if "float32" == self.in_type:
self.dtype = np.float32
if "int64" == self.in_type:
self.dtype = np.int64
def set_attrs(self):
self.attrs = {'scale': -2.3}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
class TestScaleOp1(TestScaleOp):
def set_attrs(self):
self.attrs = {'scale': 3.5}
class TestScaleOp2(TestScaleOp):
def set_attrs(self):
self.attrs = {'scale': 6.77}
class TestScaleOp3(TestScaleOp):
def set_attrs(self):
self.attrs = {'scale': -9.19}
class TestScaleOp4(TestScaleOp):
def set_attrs(self):
self.attrs = {'scale': 0.0}
class TestScaleOp5(TestScaleOp):
def set_attrs(self):
self.attrs = {'scale': -0.003}
class TestScaleApiStatic(unittest.TestCase): class TestScaleApiStatic(unittest.TestCase):
...@@ -108,5 +132,9 @@ class TestScaleInplaceApiDygraph(TestScaleApiDygraph): ...@@ -108,5 +132,9 @@ class TestScaleInplaceApiDygraph(TestScaleApiDygraph):
return x.scale_(scale, bias) return x.scale_(scale, bias)
support_types = get_xpu_op_support_types('scale')
for stype in support_types:
create_test_class(globals(), XPUTestScaleOp, stype)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -19,251 +19,255 @@ import numpy as np ...@@ -19,251 +19,255 @@ import numpy as np
import sys import sys
sys.path.append("..") sys.path.append("..")
from op_test_xpu import OpTest, XPUOpTest from op_test_xpu import OpTest, XPUOpTest
from op_test import skip_check_grad_ci
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard from paddle.fluid import compiler, Program, program_guard
from paddle.fluid.framework import convert_np_dtype_to_dtype_ from paddle.fluid.framework import convert_np_dtype_to_dtype_
from paddle.fluid import compiler, Program, program_guard, core
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
from scipy.special import logit from scipy.special import logit
from scipy.special import expit from scipy.special import expit
paddle.enable_static() paddle.enable_static()
class TestSigmoidCrossEntropyWithLogitsOp1(XPUOpTest): class XPUTestSigmoidCrossEntropyWithLogitsOp(XPUOpTestWrapper):
"""Test sigmoid_cross_entropy_with_logit_op with binary label
"""
def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.set_xpu()
self.init_dtype()
batch_size = 64
num_classes = 20
self.inputs = {
'X': logit(
np.random.uniform(0, 1, (batch_size, num_classes))
.astype(self.dtype)),
'Label': np.random.randint(0, 2, (batch_size, num_classes))
.astype(self.dtype)
}
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
self.outputs = {'Out': -term1 - term2}
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
self.check_grad_with_place(self.place, ['X'], 'Out')
def set_xpu(self):
self.__class__.use_xpu = True
self.place = paddle.XPUPlace(0)
def init_dtype(self):
self.dtype = np.float32
class TestSigmoidCrossEntropyWithLogitsOp2(
TestSigmoidCrossEntropyWithLogitsOp1):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label
"""
def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.set_xpu()
self.init_dtype()
batch_size = 64
num_classes = 20
ignore_index = -1
self.inputs = {
'X': logit(
np.random.uniform(0, 1, (batch_size, num_classes))
.astype(self.dtype)),
'Label': np.random.randint(-1, 2, (batch_size, num_classes))
.astype(self.dtype)
}
self.attrs = {'ignore_index': ignore_index, }
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
out = -term1 - term2
out[np.where(self.inputs['Label'] == ignore_index)] = 0
self.outputs = {'Out': out}
class TestSigmoidCrossEntropyWithLogitsOp3(
TestSigmoidCrossEntropyWithLogitsOp1):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label
"""
def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.set_xpu()
self.init_dtype()
batch_size = 64
num_classes = 20
self.inputs = {
'X': logit(
np.random.uniform(0, 1, (batch_size, num_classes))
.astype(self.dtype)),
'Label': np.random.uniform(0, 1, (batch_size, num_classes))
.astype(self.dtype)
}
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
self.outputs = {'Out': -term1 - term2}
class TestSigmoidCrossEntropyWithLogitsOp4(
TestSigmoidCrossEntropyWithLogitsOp1):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label
"""
def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.set_xpu()
self.init_dtype()
batch_size = 64
num_classes = 20
ignore_index = -1
self.inputs = {
'X': logit(
np.random.uniform(0, 1, (batch_size, num_classes))
.astype(self.dtype)),
'Label': np.random.randint(-1, 2, (batch_size, num_classes))
.astype(self.dtype)
}
self.attrs = {'ignore_index': ignore_index, 'normalize': True}
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
out = -term1 - term2
out[np.where(self.inputs['Label'] == ignore_index)] = 0
if self.attrs['normalize']:
out = out / float(
np.where(self.inputs['Label'] != ignore_index)[0].size)
self.outputs = {'Out': out}
class TestSigmoidCrossEntropyWithLogitsOp5(
TestSigmoidCrossEntropyWithLogitsOp1):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label
"""
def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.set_xpu()
self.init_dtype()
batch_size = [10, 10]
num_classes = 20
self.inputs = {
'X': logit(
np.random.uniform(0, 1, tuple(batch_size + [num_classes]))
.astype(self.dtype)),
'Label': np.random.uniform(0, 1, tuple(batch_size + [num_classes]))
.astype(self.dtype)
}
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
self.outputs = {'Out': -term1 - term2}
class TestSigmoidCrossEntropyWithLogitsNorm(
TestSigmoidCrossEntropyWithLogitsOp1):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label
"""
def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.set_xpu()
self.init_dtype()
batch_size = [10, 10]
num_classes = 20
ignore_index = -1
self.inputs = {
'X': logit(
np.random.uniform(0, 1, tuple(batch_size + [num_classes]))
.astype(self.dtype)),
'Label': np.random.randint(-1, 2, tuple(batch_size + [num_classes]))
.astype(self.dtype)
}
self.attrs = {'ignore_index': ignore_index, 'normalize': True}
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
out = -term1 - term2
out[np.where(self.inputs['Label'] == ignore_index)] = 0
if self.attrs['normalize']:
out = out / float(
np.where(self.inputs['Label'] != ignore_index)[0].size)
self.outputs = {'Out': out}
class TestSigmoidCrossEntropyWithLogitsOp6(
TestSigmoidCrossEntropyWithLogitsOp1):
"""Test sigmoid_cross_entropy_with_logit_op with binary label """Test sigmoid_cross_entropy_with_logit_op with binary label
""" """
def setUp(self): def __init__(self):
self.op_type = "sigmoid_cross_entropy_with_logits" self.op_name = "sigmoid_cross_entropy_with_logits"
self.set_xpu() self.use_dynamic_create_class = False
self.init_dtype()
class TestSigmoidCrossEntropyWithLogitsOp(XPUOpTest):
batch_size = [10, 10] def setUp(self):
num_classes = 20 self.set_xpu()
self.inputs = { self.op_type = "sigmoid_cross_entropy_with_logits"
'X': logit( self.place = paddle.XPUPlace(0)
self.init_dtype()
self.set_inputs()
self.init_dtype()
self.set_output()
def set_output(self):
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
self.outputs = {'Out': -term1 - term2}
def set_inputs(self):
batch_size = 64
num_classes = 20
self.inputs = {
'X': logit(
np.random.uniform(0, 1, (batch_size, num_classes))
.astype(self.dtype)),
'Label': np.random.randint(0, 2, (batch_size, num_classes))
.astype(self.dtype)
}
self.attrs = {'num_classes': num_classes, 'batch_size': batch_size}
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
self.check_grad_with_place(self.place, ['X'], 'Out')
def set_xpu(self):
self.__class__.use_xpu = True
self.__class__.op_type = self.in_type
self.place = paddle.XPUPlace(0)
def init_dtype(self):
self.dtype = self.in_type
class TestSigmoidCrossEntropyWithLogitsOp2(
TestSigmoidCrossEntropyWithLogitsOp):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label
"""
def set_inputs(self):
batch_size = 64
num_classes = 20
ignore_index = -1
self.ignore_index = ignore_index
self.inputs = {
'X': logit(
np.random.uniform(0, 1, (batch_size, num_classes))
.astype(self.dtype)),
'Label': np.random.randint(-1, 2, (batch_size, num_classes))
.astype(self.dtype)
}
self.attrs = {'ignore_index': ignore_index}
def set_output(self):
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
out = -term1 - term2
out[np.where(self.inputs['Label'] == self.ignore_index)] = 0
self.outputs = {'Out': out}
class TestSigmoidCrossEntropyWithLogitsOp3(
TestSigmoidCrossEntropyWithLogitsOp):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label
"""
def set_inputs(self):
batch_size = 64
num_classes = 20
self.inputs = {
'X': logit(
np.random.uniform(0, 1, (batch_size, num_classes))
.astype(self.dtype)),
'Label': np.random.uniform(0, 1, (batch_size, num_classes))
.astype(self.dtype)
}
self.attrs = {'num_classes': num_classes, 'batch_size': batch_size}
def set_output(self):
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
self.outputs = {'Out': -term1 - term2}
class TestSigmoidCrossEntropyWithLogitsOp4(
TestSigmoidCrossEntropyWithLogitsOp):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label
"""
def set_inputs(self):
batch_size = 64
num_classes = 20
ignore_index = -1
self.ignore_index = ignore_index
self.inputs = {
'X': logit(
np.random.uniform(0, 1, (batch_size, num_classes))
.astype(self.dtype)),
'Label': np.random.randint(-1, 2, (batch_size, num_classes))
.astype(self.dtype)
}
self.attrs = {'ignore_index': ignore_index, 'normalize': True}
def set_output(self):
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
out = -term1 - term2
out[np.where(self.inputs['Label'] == self.ignore_index)] = 0
if self.attrs['normalize']:
out = out / float(
np.where(self.inputs['Label'] != self.ignore_index)[0].size)
self.outputs = {'Out': out}
class TestSigmoidCrossEntropyWithLogitsOp5(
TestSigmoidCrossEntropyWithLogitsOp):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label
"""
def set_inputs(self):
batch_size = [10, 10]
num_classes = 20
self.inputs = {
'X': logit(
np.random.uniform(0, 1, tuple(batch_size + [num_classes]))
.astype(self.dtype)),
'Label':
np.random.uniform(0, 1, tuple(batch_size + [num_classes])) np.random.uniform(0, 1, tuple(batch_size + [num_classes]))
.astype(self.dtype)), .astype(self.dtype)
'Label': np.random.randint(0, 2, tuple(batch_size + [num_classes])) }
.astype(self.dtype) self.attrs = {'num_classes': num_classes, 'batch_size': batch_size}
}
def set_output(self):
# Fw Pass is implemented as elementwise sigmoid followed by # Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss # elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X)) # Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X']) sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X) term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X) term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
self.outputs = {'Out': -term1 - term2} self.outputs = {'Out': -term1 - term2}
class TestSigmoidCrossEntropyWithLogitsOp6(
TestSigmoidCrossEntropyWithLogitsOp):
"""Test sigmoid_cross_entropy_with_logit_op with binary label
"""
def set_inputs(self):
batch_size = [10, 10]
num_classes = 20
self.inputs = {
'X': logit(
np.random.uniform(0, 1, tuple(batch_size + [num_classes]))
.astype(self.dtype)),
'Label':
np.random.randint(0, 2, tuple(batch_size + [num_classes]))
.astype(self.dtype)
}
self.attrs = {'num_classes': num_classes, 'batch_size': batch_size}
def set_output(self):
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
self.outputs = {'Out': -term1 - term2}
class TestSigmoidCrossEntropyWithLogitsNorm(
TestSigmoidCrossEntropyWithLogitsOp):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label
"""
def set_inputs(self):
batch_size = [10, 10]
num_classes = 20
ignore_index = -1
self.ignore_index = ignore_index
self.inputs = {
'X': logit(
np.random.uniform(0, 1, tuple(batch_size + [num_classes]))
.astype(self.dtype)),
'Label':
np.random.randint(-1, 2, tuple(batch_size + [num_classes]))
.astype(self.dtype)
}
self.attrs = {'ignore_index': ignore_index, 'normalize': True}
def set_output(self):
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
out = -term1 - term2
out[np.where(self.inputs['Label'] == self.ignore_index)] = 0
if self.attrs['normalize']:
out = out / float(
np.where(self.inputs['Label'] != self.ignore_index)[0].size)
self.outputs = {'Out': out}
support_types = get_xpu_op_support_types('sigmoid_cross_entropy_with_logits')
for stype in support_types:
create_test_class(globals(), XPUTestSigmoidCrossEntropyWithLogitsOp, stype)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册