未验证 提交 6de75082 编写于 作者: Z zhupengyang 提交者: GitHub

fix test_hsigmoid windows ci (#25311)

上级 e3223ad7
......@@ -98,7 +98,7 @@ inline int clz(const T& value) {
}
}
inline size_t FindLastSet(size_t x) { return sizeof(size_t) * 8 - clz(x); }
inline size_t FindLastSet(size_t x) { return 1 + sizeof(size_t) * 8 - clz(x); }
#endif // !_WIN32
class SimpleCode {
public:
......
......@@ -58,7 +58,6 @@ if(WIN32)
LIST(REMOVE_ITEM TEST_OPS test_debugger)
list(REMOVE_ITEM TEST_OPS test_desc_clone)
list(REMOVE_ITEM TEST_OPS test_fake_init_op)
list(REMOVE_ITEM TEST_OPS test_hsigmoid_op)
list(REMOVE_ITEM TEST_OPS test_merge_ids_op)
list(REMOVE_ITEM TEST_OPS test_split_ids_op)
list(REMOVE_ITEM TEST_OPS test_program_code)
......
......@@ -70,9 +70,9 @@ def hsigmoid(x, w, label, bias, num_classes):
batch_size = x.shape[0]
code_length = find_latest_set(num_classes - 1)
code_table = [0 for _ in range(code_length)]
pre_output = np.zeros((batch_size, code_length))
pre_sum = np.zeros((batch_size, 1))
out = np.zeros((batch_size, 1))
pre_output = np.zeros((batch_size, code_length)).astype('float64')
pre_sum = np.zeros((batch_size, 1)).astype('float64')
out = np.zeros((batch_size, 1)).astype('float64')
for i in range(batch_size):
code_table = CodeTable(num_classes, label[i])
length = code_table.get_length()
......@@ -105,9 +105,9 @@ def hsigmoid(x, w, label, bias, num_classes):
def hsigmoid_grad(x, w, label, bias, num_classes):
batch_size = x.shape[0]
dx = np.zeros(x.shape)
dw = np.zeros(w.shape)
db = np.zeros(bias.shape)
dx = np.zeros(x.shape).astype('float64')
dw = np.zeros(w.shape).astype('float64')
db = np.zeros(bias.shape).astype('float64')
for i in range(batch_size):
code_table = CodeTable(num_classes, label[i])
length = code_table.get_length()
......@@ -133,9 +133,9 @@ def hsigmoidWithCustomTree(x, w, path_table, path_code, label, bias,
code_length = len(path_table[0])
code_table = [0 for _ in range(code_length)]
# init pre_out with shape [N, code_length]
pre_output = np.zeros((batch_size, code_length))
pre_sum = np.zeros((batch_size, 1))
out = np.zeros((batch_size, 1))
pre_output = np.zeros((batch_size, code_length)).astype('float64')
pre_sum = np.zeros((batch_size, 1)).astype('float64')
out = np.zeros((batch_size, 1)).astype('float64')
if isinstance(bias, np.ndarray):
for i in range(batch_size):
code_table = CodeTableWithCustomTree(path_table, path_code, i)
......@@ -173,10 +173,13 @@ class TestHSigmoidOp(OpTest):
num_classes = 101
feature_size = 5
batch_size = 20
x = np.random.uniform(-1, 1, (batch_size, feature_size))
w = np.random.uniform(-1, 1, (num_classes - 1, feature_size))
label = np.random.randint(0, num_classes, (batch_size, 1))
bias = np.random.uniform(-1, 1, (num_classes - 1, 1))
x = np.random.uniform(-1, 1,
(batch_size, feature_size)).astype('float64')
w = np.random.uniform(-1, 1,
(num_classes - 1, feature_size)).astype('float64')
label = np.random.randint(0, num_classes,
(batch_size, 1)).astype('int64')
bias = np.random.uniform(-1, 1, (num_classes - 1, 1)).astype('float64')
self.attrs = {'num_classes': num_classes, 'is_sparse': False}
self.inputs = {'X': x, 'W': w, 'Label': label, 'Bias': bias}
pre_output, out = hsigmoid(x, w, label, bias, num_classes)
......@@ -189,7 +192,6 @@ class TestHSigmoidOp(OpTest):
def test_check_grad(self):
self.check_grad(
['X', 'W', 'Bias'], ['Out'], user_defined_grads=self.user_grads)
#self.check_grad(['X', 'W', 'Bias'], ['Out'])
@skip_check_grad_ci(
......@@ -203,13 +205,15 @@ class TestHSigmoidOpSparse(OpTest):
batch_size = 4
x = np.random.random((batch_size, feature_size))
w = np.random.random((num_classes - 1, feature_size))
label = np.array([0, 1, 4, 5])
path_table = np.array(
[(0, 2, -1, -1, -1), (0, 1, 3, -1, -1), (0, 1, 4, -1, -1),
(0, 2, -1, -1,
-1)]) #np.array to store 1,2,5,6s' non-leaf path(root -> leaf)
path_code = np.array([(0, 0, -1, -1, -1), (1, 1, 1, -1, -1), (
1, 0, 0, -1, -1), (0, 1, -1, -1, -1)]) #np.array to store
label = np.array([0, 1, 4, 5]).astype('int64')
path_table = np.array([
(0, 2, -1, -1, -1), (0, 1, 3, -1, -1), (0, 1, 4, -1, -1), (0, 2, -1,
-1, -1)
]).astype(
'int64') #np.array to store 1,2,5,6s' non-leaf path(root -> leaf)
path_code = np.array(
[(0, 0, -1, -1, -1), (1, 1, 1, -1, -1), (1, 0, 0, -1, -1),
(0, 1, -1, -1, -1)]).astype('int64') #np.array to store
bias = np.random.random((num_classes - 1, 1))
self.attrs = {'num_classes': num_classes, 'is_sparse': True}
self.inputs = {
......@@ -265,9 +269,9 @@ class TestHSigmoidOpWithSparseGrad(unittest.TestCase):
start_up = fluid.default_startup_program()
start_up.random_seed = 1 # Fix random seed
x = np.arange(6).reshape(6)
path_table = np.array([(1, 2, -1), (1, 2, -1)])
path_code = np.array([(1, 0, -1), (0, 0, -1)])
label = np.array([1, 4])
path_table = np.array([(1, 2, -1), (1, 2, -1)]).astype('int64')
path_code = np.array([(1, 0, -1), (0, 0, -1)]).astype('int64')
label = np.array([1, 4]).astype('int64')
loss, data_list = self.hs_net_conf(is_sparse)
optimizer = fluid.optimizer.SGD(learning_rate=1e-3)
......@@ -307,13 +311,15 @@ class TestHSigmoidOpWithCostumTree(OpTest):
batch_size = 4
x = np.random.uniform(-1, 1, (batch_size, feature_size))
w = np.random.uniform(-1, 1, (num_classes - 1, feature_size))
label = np.array([0, 1, 4, 5])
path_table = np.array(
[(0, 2, -1, -1, -1), (0, 1, 3, -1, -1), (0, 1, 4, -1, -1),
(0, 2, -1, -1,
-1)]) #np.array to store 1,2,5,6s' non-leaf path(root -> leaf)
path_code = np.array([(0, 0, -1, -1, -1), (1, 1, 1, -1, -1), (
1, 0, 0, -1, -1), (0, 1, -1, -1, -1)]) #np.array to store
label = np.array([0, 1, 4, 5]).astype('int64')
path_table = np.array([
(0, 2, -1, -1, -1), (0, 1, 3, -1, -1), (0, 1, 4, -1, -1), (0, 2, -1,
-1, -1)
]).astype(
'int64') #np.array to store 1,2,5,6s' non-leaf path(root -> leaf)
path_code = np.array(
[(0, 0, -1, -1, -1), (1, 1, 1, -1, -1), (1, 0, 0, -1, -1),
(0, 1, -1, -1, -1)]).astype('int64') #np.array to store
bias = np.random.random((num_classes - 1, 1))
self.attrs = {'num_classes': num_classes, 'is_sparse': False}
self.inputs = {
......@@ -346,13 +352,15 @@ class TestHSigmoidOpWithCostumTreeWithoutBias(OpTest):
batch_size = 4
x = np.random.uniform(-1, 1, (batch_size, feature_size))
w = np.random.uniform(-1, 1, (num_classes - 1, feature_size))
label = np.array([0, 1, 4, 5])
path_table = np.array(
[(0, 2, -1, -1, -1), (0, 1, 3, -1, -1), (0, 1, 4, -1, -1),
(0, 2, -1, -1,
-1)]) #np.array to store 1,2,5,6s' non-leaf path(root -> leaf)
path_code = np.array([(0, 0, -1, -1, -1), (1, 1, 1, -1, -1), (
1, 0, 0, -1, -1), (0, 1, -1, -1, -1)]) #np.array to store
label = np.array([0, 1, 4, 5]).astype('int64')
path_table = np.array([
(0, 2, -1, -1, -1), (0, 1, 3, -1, -1), (0, 1, 4, -1, -1), (0, 2, -1,
-1, -1)
]).astype(
'int64') #np.array to store 1,2,5,6s' non-leaf path(root -> leaf)
path_code = np.array(
[(0, 0, -1, -1, -1), (1, 1, 1, -1, -1), (1, 0, 0, -1, -1),
(0, 1, -1, -1, -1)]).astype('int64') #np.array to store
# bias = np.random.random((num_classes - 1, 1)).astype("float32")
self.attrs = {'num_classes': num_classes, 'is_sparse': False}
self.inputs = {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册