提交 15627e48 编写于 作者: Q qijun

fix gou test bug

上级 090b8114
...@@ -197,7 +197,7 @@ class OpTest(unittest.TestCase): ...@@ -197,7 +197,7 @@ class OpTest(unittest.TestCase):
def check_output(self): def check_output(self):
places = [core.CPUPlace()] places = [core.CPUPlace()]
if core.is_compile_gpu() and self.op.support_gpu(): if core.is_compile_gpu():
places.append(core.GPUPlace(0)) places.append(core.GPUPlace(0))
for place in places: for place in places:
self.check_output_with_place(place) self.check_output_with_place(place)
...@@ -270,6 +270,6 @@ class OpTest(unittest.TestCase): ...@@ -270,6 +270,6 @@ class OpTest(unittest.TestCase):
for c_grad, g_grad, name in itertools.izip( for c_grad, g_grad, name in itertools.izip(
cpu_analytic_grads, gpu_analytic_grads, grad_names): cpu_analytic_grads, gpu_analytic_grads, grad_names):
self.assertTrue( self.assertTrue(
numpy.allclose( np.allclose(
c_grad, g_grad, atol=1e-4), c_grad, g_grad, atol=1e-4),
"output name: " + name + " has diff") "output name: " + name + " has diff")
...@@ -8,7 +8,7 @@ class TestCrossEntropy(OpTest): ...@@ -8,7 +8,7 @@ class TestCrossEntropy(OpTest):
self.op_type = "onehot_cross_entropy" self.op_type = "onehot_cross_entropy"
batch_size = 30 batch_size = 30
class_num = 10 class_num = 10
X = numpy.random.random((batch_size, class_num)).astype("float32") X = numpy.random.uniform(0.1, 1.0, [batch_size, class_num]).astype("float32")
label = (class_num / 2) * numpy.ones(batch_size).astype("int32") label = (class_num / 2) * numpy.ones(batch_size).astype("int32")
self.inputs = {'X': X, 'label': label} self.inputs = {'X': X, 'label': label}
Y = [] Y = []
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册