未验证 提交 e07420b9 编写于 作者: z8hanghuan's avatar z8hanghuan 提交者: GitHub

new may of test cases, *test=kunlun (#39444)

* new may of test cases, *test=kunlun

* new may of test cases, *test=kunlun

* new may of test cases, *test=kunlun
上级 d0df5632
......@@ -24,92 +24,103 @@ from op_test_xpu import OpTest, XPUOpTest
import paddle
from paddle.fluid import Program, program_guard
class TestClipOp(XPUOpTest):
def set_xpu(self):
self.__class__.use_xpu = True
self.place = paddle.XPUPlace(0)
def setUp(self):
self.set_xpu()
self.max_relative_error = 0.006
self.inputs = {}
self.initTestCase()
self.op_type = "clip"
self.attrs = {}
self.attrs['min'] = self.min
self.attrs['max'] = self.max
if 'Min' in self.inputs:
min_v = self.inputs['Min']
else:
min_v = self.attrs['min']
if 'Max' in self.inputs:
max_v = self.inputs['Max']
else:
max_v = self.attrs['max']
input = np.random.random(self.shape).astype("float32")
input[np.abs(input - min_v) < self.max_relative_error] = 0.5
input[np.abs(input - max_v) < self.max_relative_error] = 0.5
self.inputs['X'] = input
self.outputs = {'Out': np.clip(self.inputs['X'], min_v, max_v)}
def test_check_output(self):
paddle.enable_static()
self.check_output_with_place(self.place)
paddle.disable_static()
def test_check_grad_normal(self):
paddle.enable_static()
self.check_grad_with_place(self.place, ['X'], 'Out')
paddle.disable_static()
def initTestCase(self):
self.shape = (4, 10, 10)
self.max = 0.8
self.min = 0.3
self.inputs['Max'] = np.array([0.8]).astype('float32')
self.inputs['Min'] = np.array([0.1]).astype('float32')
class TestCase1(TestClipOp):
def initTestCase(self):
self.shape = (8, 16, 8)
self.max = 0.7
self.min = 0.0
class TestCase2(TestClipOp):
def initTestCase(self):
self.shape = (8, 16)
self.max = 1.0
self.min = 0.0
class TestCase3(TestClipOp):
def initTestCase(self):
self.shape = (4, 8, 16)
self.max = 0.7
self.min = 0.2
class TestCase4(TestClipOp):
def initTestCase(self):
self.shape = (4, 8, 8)
self.max = 0.7
self.min = 0.2
self.inputs['Max'] = np.array([0.8]).astype('float32')
self.inputs['Min'] = np.array([0.3]).astype('float32')
class TestCase5(TestClipOp):
def initTestCase(self):
self.shape = (4, 8, 16)
self.max = 0.5
self.min = 0.5
import op_test
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
class XPUTestClipOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'clip'
self.use_dynamic_create_class = False
class TestClipOp(XPUOpTest):
def setUp(self):
self.init_dtype()
self.set_xpu()
self.op_type = "clip"
self.place = paddle.XPUPlace(0)
self.inputs = {}
self.init_data()
self.set_attrs()
self.set_inputs()
self.outputs = {
'Out': np.clip(self.inputs['X'], self.min_v, self.max_v)
}
def set_xpu(self):
self.__class__.use_xpu = True
self.__class__.no_need_check_grad = True
self.__class__.op_type = self.dtype
def init_data(self):
self.shape = (4, 10, 10)
self.max = 0.8
self.min = 0.3
def set_inputs(self):
if 'Min' in self.inputs:
min_v = self.inputs['Min']
else:
min_v = self.attrs['min']
if 'Max' in self.inputs:
max_v = self.inputs['Max']
else:
max_v = self.attrs['max']
self.min_v = min_v
self.max_v = max_v
self.max_relative_error = 0.006
input = np.random.random(self.shape).astype("float32")
input[np.abs(input - min_v) < self.max_relative_error] = 0.5
input[np.abs(input - max_v) < self.max_relative_error] = 0.5
self.inputs['X'] = input
def set_attrs(self):
self.attrs = {}
self.attrs['min'] = self.min
self.attrs['max'] = self.max
def init_dtype(self):
self.dtype = self.in_type
def test_check_output(self):
paddle.enable_static()
self.check_output_with_place(self.place)
paddle.disable_static()
class TestClipOp1(TestClipOp):
def init_data(self):
self.shape = (8, 16, 8)
self.max = 0.7
self.min = 0.0
class TestClipOp2(TestClipOp):
def init_data(self):
self.shape = (8, 16)
self.max = 1.0
self.min = 0.0
class TestClipOp3(TestClipOp):
def init_data(self):
self.shape = (4, 8, 16)
self.max = 0.7
self.min = 0.2
class TestClipOp4(TestClipOp):
def init_data(self):
self.shape = (4, 8, 8)
self.max = 0.7
self.min = 0.2
self.inputs['Max'] = np.array([0.8]).astype('float32')
self.inputs['Min'] = np.array([0.3]).astype('float32')
class TestClipOp5(TestClipOp):
def init_data(self):
self.shape = (4, 8, 16)
self.max = 0.5
self.min = 0.5
class TestClipOpError(unittest.TestCase):
......@@ -212,5 +223,9 @@ class TestInplaceClipAPI(TestClipAPI):
return x.clip_(min, max)
support_types = get_xpu_op_support_types('clip')
for stype in support_types:
create_test_class(globals(), XPUTestClipOp, stype)
if __name__ == '__main__':
unittest.main()
......@@ -18,54 +18,78 @@ import unittest
import numpy as np
import sys
sys.path.append("..")
from op_test_xpu import XPUOpTest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.op import Operator
import paddle
from paddle.static import Program, program_guard
class TestXPUScaleOp(XPUOpTest):
def setUp(self):
self.op_type = "scale"
self.init_type()
self.inputs = {'X': np.random.random((10, 10)).astype(self.dtype)}
self.attrs = {'scale': -2.3, 'use_xpu': True}
self.outputs = {
'Out': self.inputs['X'] * self.dtype(self.attrs['scale'])
}
def init_type(self):
self.dtype = np.float32
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
import paddle
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid import compiler, Program, program_guard
# class TestXPUScaleOpInt64(TestXPUScaleOp):
# def init_type(self):
# self.dtype = np.int64
class TestScaleFp16Op(TestXPUScaleOp):
def init_dtype_type(self):
self.dtype = np.float16
def test_check_output(self):
place = core.XPUPlace(0)
self.check_output_with_place(place, atol=0.002)
def test_check_grad(self):
place = core.XPUPlace(0)
self.check_grad_with_place(place, ["X"], "Out", max_relative_error=0.05)
import op_test
from op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
class XPUTestScaleOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'scale'
self.use_dynamic_create_class = False
class TestScaleOp(XPUOpTest):
def setUp(self):
self.init_dtype()
self.set_xpu()
self.op_type = "scale"
self.place = paddle.XPUPlace(0)
self.set_inputs()
self.set_attrs()
self.outputs = {
'Out': self.inputs['X'] * self.dtype(self.attrs['scale'])
}
def set_xpu(self):
self.__class__.use_xpu = True
self.__class__.no_need_check_grad = True
self.__class__.op_type = self.dtype
def set_inputs(self):
self.inputs = {'X': np.random.random((10, 10)).astype(self.dtype)}
def init_dtype(self):
if "float16" == self.in_type:
self.dtype = np.float16
if "float32" == self.in_type:
self.dtype = np.float32
if "int64" == self.in_type:
self.dtype = np.int64
def set_attrs(self):
self.attrs = {'scale': -2.3}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
class TestScaleOp1(TestScaleOp):
def set_attrs(self):
self.attrs = {'scale': 3.5}
class TestScaleOp2(TestScaleOp):
def set_attrs(self):
self.attrs = {'scale': 6.77}
class TestScaleOp3(TestScaleOp):
def set_attrs(self):
self.attrs = {'scale': -9.19}
class TestScaleOp4(TestScaleOp):
def set_attrs(self):
self.attrs = {'scale': 0.0}
class TestScaleOp5(TestScaleOp):
def set_attrs(self):
self.attrs = {'scale': -0.003}
class TestScaleApiStatic(unittest.TestCase):
......@@ -108,5 +132,9 @@ class TestScaleInplaceApiDygraph(TestScaleApiDygraph):
return x.scale_(scale, bias)
support_types = get_xpu_op_support_types('scale')
for stype in support_types:
create_test_class(globals(), XPUTestScaleOp, stype)
if __name__ == "__main__":
unittest.main()
......@@ -19,251 +19,255 @@ import numpy as np
import sys
sys.path.append("..")
from op_test_xpu import OpTest, XPUOpTest
from op_test import skip_check_grad_ci
import paddle
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
from paddle.fluid.framework import convert_np_dtype_to_dtype_
from paddle.fluid import compiler, Program, program_guard, core
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
from scipy.special import logit
from scipy.special import expit
paddle.enable_static()
class TestSigmoidCrossEntropyWithLogitsOp1(XPUOpTest):
"""Test sigmoid_cross_entropy_with_logit_op with binary label
"""
def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.set_xpu()
self.init_dtype()
batch_size = 64
num_classes = 20
self.inputs = {
'X': logit(
np.random.uniform(0, 1, (batch_size, num_classes))
.astype(self.dtype)),
'Label': np.random.randint(0, 2, (batch_size, num_classes))
.astype(self.dtype)
}
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
self.outputs = {'Out': -term1 - term2}
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
self.check_grad_with_place(self.place, ['X'], 'Out')
def set_xpu(self):
self.__class__.use_xpu = True
self.place = paddle.XPUPlace(0)
def init_dtype(self):
self.dtype = np.float32
class TestSigmoidCrossEntropyWithLogitsOp2(
TestSigmoidCrossEntropyWithLogitsOp1):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label
"""
def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.set_xpu()
self.init_dtype()
batch_size = 64
num_classes = 20
ignore_index = -1
self.inputs = {
'X': logit(
np.random.uniform(0, 1, (batch_size, num_classes))
.astype(self.dtype)),
'Label': np.random.randint(-1, 2, (batch_size, num_classes))
.astype(self.dtype)
}
self.attrs = {'ignore_index': ignore_index, }
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
out = -term1 - term2
out[np.where(self.inputs['Label'] == ignore_index)] = 0
self.outputs = {'Out': out}
class TestSigmoidCrossEntropyWithLogitsOp3(
TestSigmoidCrossEntropyWithLogitsOp1):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label
"""
def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.set_xpu()
self.init_dtype()
batch_size = 64
num_classes = 20
self.inputs = {
'X': logit(
np.random.uniform(0, 1, (batch_size, num_classes))
.astype(self.dtype)),
'Label': np.random.uniform(0, 1, (batch_size, num_classes))
.astype(self.dtype)
}
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
self.outputs = {'Out': -term1 - term2}
class TestSigmoidCrossEntropyWithLogitsOp4(
TestSigmoidCrossEntropyWithLogitsOp1):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label
"""
def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.set_xpu()
self.init_dtype()
batch_size = 64
num_classes = 20
ignore_index = -1
self.inputs = {
'X': logit(
np.random.uniform(0, 1, (batch_size, num_classes))
.astype(self.dtype)),
'Label': np.random.randint(-1, 2, (batch_size, num_classes))
.astype(self.dtype)
}
self.attrs = {'ignore_index': ignore_index, 'normalize': True}
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
out = -term1 - term2
out[np.where(self.inputs['Label'] == ignore_index)] = 0
if self.attrs['normalize']:
out = out / float(
np.where(self.inputs['Label'] != ignore_index)[0].size)
self.outputs = {'Out': out}
class TestSigmoidCrossEntropyWithLogitsOp5(
TestSigmoidCrossEntropyWithLogitsOp1):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label
"""
def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.set_xpu()
self.init_dtype()
batch_size = [10, 10]
num_classes = 20
self.inputs = {
'X': logit(
np.random.uniform(0, 1, tuple(batch_size + [num_classes]))
.astype(self.dtype)),
'Label': np.random.uniform(0, 1, tuple(batch_size + [num_classes]))
.astype(self.dtype)
}
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
self.outputs = {'Out': -term1 - term2}
class TestSigmoidCrossEntropyWithLogitsNorm(
TestSigmoidCrossEntropyWithLogitsOp1):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label
"""
def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.set_xpu()
self.init_dtype()
batch_size = [10, 10]
num_classes = 20
ignore_index = -1
self.inputs = {
'X': logit(
np.random.uniform(0, 1, tuple(batch_size + [num_classes]))
.astype(self.dtype)),
'Label': np.random.randint(-1, 2, tuple(batch_size + [num_classes]))
.astype(self.dtype)
}
self.attrs = {'ignore_index': ignore_index, 'normalize': True}
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
out = -term1 - term2
out[np.where(self.inputs['Label'] == ignore_index)] = 0
if self.attrs['normalize']:
out = out / float(
np.where(self.inputs['Label'] != ignore_index)[0].size)
self.outputs = {'Out': out}
class TestSigmoidCrossEntropyWithLogitsOp6(
TestSigmoidCrossEntropyWithLogitsOp1):
class XPUTestSigmoidCrossEntropyWithLogitsOp(XPUOpTestWrapper):
"""Test sigmoid_cross_entropy_with_logit_op with binary label
"""
def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits"
self.set_xpu()
self.init_dtype()
batch_size = [10, 10]
num_classes = 20
self.inputs = {
'X': logit(
def __init__(self):
self.op_name = "sigmoid_cross_entropy_with_logits"
self.use_dynamic_create_class = False
class TestSigmoidCrossEntropyWithLogitsOp(XPUOpTest):
def setUp(self):
self.set_xpu()
self.op_type = "sigmoid_cross_entropy_with_logits"
self.place = paddle.XPUPlace(0)
self.init_dtype()
self.set_inputs()
self.init_dtype()
self.set_output()
def set_output(self):
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
self.outputs = {'Out': -term1 - term2}
def set_inputs(self):
batch_size = 64
num_classes = 20
self.inputs = {
'X': logit(
np.random.uniform(0, 1, (batch_size, num_classes))
.astype(self.dtype)),
'Label': np.random.randint(0, 2, (batch_size, num_classes))
.astype(self.dtype)
}
self.attrs = {'num_classes': num_classes, 'batch_size': batch_size}
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
self.check_grad_with_place(self.place, ['X'], 'Out')
def set_xpu(self):
self.__class__.use_xpu = True
self.__class__.op_type = self.in_type
self.place = paddle.XPUPlace(0)
def init_dtype(self):
self.dtype = self.in_type
class TestSigmoidCrossEntropyWithLogitsOp2(
TestSigmoidCrossEntropyWithLogitsOp):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label
"""
def set_inputs(self):
batch_size = 64
num_classes = 20
ignore_index = -1
self.ignore_index = ignore_index
self.inputs = {
'X': logit(
np.random.uniform(0, 1, (batch_size, num_classes))
.astype(self.dtype)),
'Label': np.random.randint(-1, 2, (batch_size, num_classes))
.astype(self.dtype)
}
self.attrs = {'ignore_index': ignore_index}
def set_output(self):
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
out = -term1 - term2
out[np.where(self.inputs['Label'] == self.ignore_index)] = 0
self.outputs = {'Out': out}
class TestSigmoidCrossEntropyWithLogitsOp3(
TestSigmoidCrossEntropyWithLogitsOp):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label
"""
def set_inputs(self):
batch_size = 64
num_classes = 20
self.inputs = {
'X': logit(
np.random.uniform(0, 1, (batch_size, num_classes))
.astype(self.dtype)),
'Label': np.random.uniform(0, 1, (batch_size, num_classes))
.astype(self.dtype)
}
self.attrs = {'num_classes': num_classes, 'batch_size': batch_size}
def set_output(self):
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
self.outputs = {'Out': -term1 - term2}
class TestSigmoidCrossEntropyWithLogitsOp4(
TestSigmoidCrossEntropyWithLogitsOp):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label
"""
def set_inputs(self):
batch_size = 64
num_classes = 20
ignore_index = -1
self.ignore_index = ignore_index
self.inputs = {
'X': logit(
np.random.uniform(0, 1, (batch_size, num_classes))
.astype(self.dtype)),
'Label': np.random.randint(-1, 2, (batch_size, num_classes))
.astype(self.dtype)
}
self.attrs = {'ignore_index': ignore_index, 'normalize': True}
def set_output(self):
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
out = -term1 - term2
out[np.where(self.inputs['Label'] == self.ignore_index)] = 0
if self.attrs['normalize']:
out = out / float(
np.where(self.inputs['Label'] != self.ignore_index)[0].size)
self.outputs = {'Out': out}
class TestSigmoidCrossEntropyWithLogitsOp5(
TestSigmoidCrossEntropyWithLogitsOp):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label
"""
def set_inputs(self):
batch_size = [10, 10]
num_classes = 20
self.inputs = {
'X': logit(
np.random.uniform(0, 1, tuple(batch_size + [num_classes]))
.astype(self.dtype)),
'Label':
np.random.uniform(0, 1, tuple(batch_size + [num_classes]))
.astype(self.dtype)),
'Label': np.random.randint(0, 2, tuple(batch_size + [num_classes]))
.astype(self.dtype)
}
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
self.outputs = {'Out': -term1 - term2}
.astype(self.dtype)
}
self.attrs = {'num_classes': num_classes, 'batch_size': batch_size}
def set_output(self):
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
self.outputs = {'Out': -term1 - term2}
class TestSigmoidCrossEntropyWithLogitsOp6(
TestSigmoidCrossEntropyWithLogitsOp):
"""Test sigmoid_cross_entropy_with_logit_op with binary label
"""
def set_inputs(self):
batch_size = [10, 10]
num_classes = 20
self.inputs = {
'X': logit(
np.random.uniform(0, 1, tuple(batch_size + [num_classes]))
.astype(self.dtype)),
'Label':
np.random.randint(0, 2, tuple(batch_size + [num_classes]))
.astype(self.dtype)
}
self.attrs = {'num_classes': num_classes, 'batch_size': batch_size}
def set_output(self):
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
self.outputs = {'Out': -term1 - term2}
class TestSigmoidCrossEntropyWithLogitsNorm(
TestSigmoidCrossEntropyWithLogitsOp):
"""Test sigmoid_cross_entropy_with_logit_op with probabalistic label
"""
def set_inputs(self):
batch_size = [10, 10]
num_classes = 20
ignore_index = -1
self.ignore_index = ignore_index
self.inputs = {
'X': logit(
np.random.uniform(0, 1, tuple(batch_size + [num_classes]))
.astype(self.dtype)),
'Label':
np.random.randint(-1, 2, tuple(batch_size + [num_classes]))
.astype(self.dtype)
}
self.attrs = {'ignore_index': ignore_index, 'normalize': True}
def set_output(self):
# Fw Pass is implemented as elementwise sigmoid followed by
# elementwise logistic loss
# Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X))
sigmoid_X = expit(self.inputs['X'])
term1 = self.inputs['Label'] * np.log(sigmoid_X)
term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X)
out = -term1 - term2
out[np.where(self.inputs['Label'] == self.ignore_index)] = 0
if self.attrs['normalize']:
out = out / float(
np.where(self.inputs['Label'] != self.ignore_index)[0].size)
self.outputs = {'Out': out}
support_types = get_xpu_op_support_types('sigmoid_cross_entropy_with_logits')
for stype in support_types:
create_test_class(globals(), XPUTestSigmoidCrossEntropyWithLogitsOp, stype)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册