提交 9894a4fb 编写于 作者: J juncaipeng 提交者: Tao Luo

update test precision from fp32 to fp64, test=develop (#21783)

上级 c96f06f2
...@@ -25,9 +25,9 @@ class TestElementwiseOp(OpTest): ...@@ -25,9 +25,9 @@ class TestElementwiseOp(OpTest):
# If x and y have the same value, the min() is not differentiable. # If x and y have the same value, the min() is not differentiable.
# So we generate test data by the following method # So we generate test data by the following method
# to avoid them being too close to each other. # to avoid them being too close to each other.
x = np.random.uniform(0.1, 1, [13, 17]).astype("float32") x = np.random.uniform(0.1, 1, [13, 17]).astype("float64")
sgn = np.random.choice([-1, 1], [13, 17]).astype("float32") sgn = np.random.choice([-1, 1], [13, 17]).astype("float64")
y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype("float32") y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype("float64")
self.inputs = {'X': x, 'Y': y} self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])} self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])}
...@@ -49,8 +49,8 @@ class TestElementwiseOp(OpTest): ...@@ -49,8 +49,8 @@ class TestElementwiseOp(OpTest):
class TestElementwiseMinOp_scalar(TestElementwiseOp): class TestElementwiseMinOp_scalar(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_min" self.op_type = "elementwise_min"
x = np.random.random_integers(-5, 5, [10, 3, 4]).astype("float32") x = np.random.random_integers(-5, 5, [10, 3, 4]).astype("float64")
y = np.array([0.5]).astype("float32") y = np.array([0.5]).astype("float64")
self.inputs = {'X': x, 'Y': y} self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])} self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])}
...@@ -58,9 +58,9 @@ class TestElementwiseMinOp_scalar(TestElementwiseOp): ...@@ -58,9 +58,9 @@ class TestElementwiseMinOp_scalar(TestElementwiseOp):
class TestElementwiseMinOp_Vector(TestElementwiseOp): class TestElementwiseMinOp_Vector(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_min" self.op_type = "elementwise_min"
x = np.random.random((100, )).astype("float32") x = np.random.random((100, )).astype("float64")
sgn = np.random.choice([-1, 1], (100, )).astype("float32") sgn = np.random.choice([-1, 1], (100, )).astype("float64")
y = x + sgn * np.random.uniform(0.1, 1, (100, )).astype("float32") y = x + sgn * np.random.uniform(0.1, 1, (100, )).astype("float64")
self.inputs = {'X': x, 'Y': y} self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])} self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])}
...@@ -68,10 +68,10 @@ class TestElementwiseMinOp_Vector(TestElementwiseOp): ...@@ -68,10 +68,10 @@ class TestElementwiseMinOp_Vector(TestElementwiseOp):
class TestElementwiseMinOp_broadcast_0(TestElementwiseOp): class TestElementwiseMinOp_broadcast_0(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_min" self.op_type = "elementwise_min"
x = np.random.uniform(0.5, 1, (2, 3, 4)).astype(np.float32) x = np.random.uniform(0.5, 1, (2, 3, 4)).astype(np.float64)
sgn = np.random.choice([-1, 1], (2, )).astype(np.float32) sgn = np.random.choice([-1, 1], (2, )).astype(np.float64)
y = x[:, 0, 0] + sgn * \ y = x[:, 0, 0] + sgn * \
np.random.uniform(1, 2, (2, )).astype(np.float32) np.random.uniform(1, 2, (2, )).astype(np.float64)
self.inputs = {'X': x, 'Y': y} self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 0} self.attrs = {'axis': 0}
...@@ -84,10 +84,10 @@ class TestElementwiseMinOp_broadcast_0(TestElementwiseOp): ...@@ -84,10 +84,10 @@ class TestElementwiseMinOp_broadcast_0(TestElementwiseOp):
class TestElementwiseMinOp_broadcast_1(TestElementwiseOp): class TestElementwiseMinOp_broadcast_1(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_min" self.op_type = "elementwise_min"
x = np.random.uniform(0.5, 1, (2, 3, 4)).astype(np.float32) x = np.random.uniform(0.5, 1, (2, 3, 4)).astype(np.float64)
sgn = np.random.choice([-1, 1], (3, )).astype(np.float32) sgn = np.random.choice([-1, 1], (3, )).astype(np.float64)
y = x[0, :, 0] + sgn * \ y = x[0, :, 0] + sgn * \
np.random.uniform(1, 2, (3, )).astype(np.float32) np.random.uniform(1, 2, (3, )).astype(np.float64)
self.inputs = {'X': x, 'Y': y} self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 1} self.attrs = {'axis': 1}
...@@ -100,10 +100,10 @@ class TestElementwiseMinOp_broadcast_1(TestElementwiseOp): ...@@ -100,10 +100,10 @@ class TestElementwiseMinOp_broadcast_1(TestElementwiseOp):
class TestElementwiseMinOp_broadcast_2(TestElementwiseOp): class TestElementwiseMinOp_broadcast_2(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_min" self.op_type = "elementwise_min"
x = np.random.uniform(0.5, 1, (2, 3, 4)).astype(np.float32) x = np.random.uniform(0.5, 1, (2, 3, 4)).astype(np.float64)
sgn = np.random.choice([-1, 1], (4, )).astype(np.float32) sgn = np.random.choice([-1, 1], (4, )).astype(np.float64)
y = x[0, 0, :] + sgn * \ y = x[0, 0, :] + sgn * \
np.random.uniform(1, 2, (4, )).astype(np.float32) np.random.uniform(1, 2, (4, )).astype(np.float64)
self.inputs = {'X': x, 'Y': y} self.inputs = {'X': x, 'Y': y}
self.outputs = { self.outputs = {
...@@ -115,10 +115,10 @@ class TestElementwiseMinOp_broadcast_2(TestElementwiseOp): ...@@ -115,10 +115,10 @@ class TestElementwiseMinOp_broadcast_2(TestElementwiseOp):
class TestElementwiseMinOp_broadcast_3(TestElementwiseOp): class TestElementwiseMinOp_broadcast_3(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_min" self.op_type = "elementwise_min"
x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float32) x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float64)
sgn = np.random.choice([-1, 1], (3, 4)).astype(np.float32) sgn = np.random.choice([-1, 1], (3, 4)).astype(np.float64)
y = x[0, :, :, 0] + sgn * \ y = x[0, :, :, 0] + sgn * \
np.random.uniform(1, 2, (3, 4)).astype(np.float32) np.random.uniform(1, 2, (3, 4)).astype(np.float64)
self.inputs = {'X': x, 'Y': y} self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 1} self.attrs = {'axis': 1}
...@@ -131,10 +131,10 @@ class TestElementwiseMinOp_broadcast_3(TestElementwiseOp): ...@@ -131,10 +131,10 @@ class TestElementwiseMinOp_broadcast_3(TestElementwiseOp):
class TestElementwiseMinOp_broadcast_4(TestElementwiseOp): class TestElementwiseMinOp_broadcast_4(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_min" self.op_type = "elementwise_min"
x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float32) x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float64)
sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(np.float32) sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(np.float64)
y = x + sgn * \ y = x + sgn * \
np.random.uniform(1, 2, (2, 3, 1, 5)).astype(np.float32) np.random.uniform(1, 2, (2, 3, 1, 5)).astype(np.float64)
self.inputs = {'X': x, 'Y': y} self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])} self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])}
......
...@@ -23,8 +23,8 @@ class TestElementwisePowOp(OpTest): ...@@ -23,8 +23,8 @@ class TestElementwisePowOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "elementwise_pow" self.op_type = "elementwise_pow"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3]).astype("float32"), 'X': np.random.uniform(0.1, 1, [2, 3]).astype("float64"),
'Y': np.random.uniform(0.1, 1, [2, 3]).astype("float32") 'Y': np.random.uniform(0.1, 1, [2, 3]).astype("float64")
} }
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
...@@ -39,8 +39,8 @@ class TestElementwisePowOp_scalar(TestElementwisePowOp): ...@@ -39,8 +39,8 @@ class TestElementwisePowOp_scalar(TestElementwisePowOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_pow" self.op_type = "elementwise_pow"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [3, 3, 4]).astype(np.float32), 'X': np.random.uniform(0.1, 1, [3, 3, 4]).astype(np.float64),
'Y': np.random.uniform(0.1, 1, [1]).astype(np.float32) 'Y': np.random.uniform(0.1, 1, [1]).astype(np.float64)
} }
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
...@@ -49,8 +49,8 @@ class TestElementwisePowOp_tensor(TestElementwisePowOp): ...@@ -49,8 +49,8 @@ class TestElementwisePowOp_tensor(TestElementwisePowOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_pow" self.op_type = "elementwise_pow"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [32]).astype("float32"), 'X': np.random.uniform(0.1, 1, [32]).astype("float64"),
'Y': np.random.uniform(0.1, 1, [32]).astype("float32") 'Y': np.random.uniform(0.1, 1, [32]).astype("float64")
} }
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
...@@ -59,8 +59,8 @@ class TestElementwisePowOp_broadcast_0(TestElementwisePowOp): ...@@ -59,8 +59,8 @@ class TestElementwisePowOp_broadcast_0(TestElementwisePowOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_pow" self.op_type = "elementwise_pow"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 4]).astype("float32"), 'X': np.random.uniform(0.1, 1, [2, 3, 4]).astype("float64"),
'Y': np.random.uniform(0.1, 1, [4]).astype("float32") 'Y': np.random.uniform(0.1, 1, [4]).astype("float64")
} }
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
...@@ -69,8 +69,8 @@ class TestElementwisePowOp_broadcast_1(TestElementwisePowOp): ...@@ -69,8 +69,8 @@ class TestElementwisePowOp_broadcast_1(TestElementwisePowOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_pow" self.op_type = "elementwise_pow"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 4]).astype("float32"), 'X': np.random.uniform(0.1, 1, [2, 3, 4]).astype("float64"),
'Y': np.random.uniform(0.1, 1, [3]).astype("float32") 'Y': np.random.uniform(0.1, 1, [3]).astype("float64")
} }
self.attrs = {'axis': 1} self.attrs = {'axis': 1}
self.outputs = { self.outputs = {
...@@ -82,8 +82,8 @@ class TestElementwisePowOp_broadcast_2(TestElementwisePowOp): ...@@ -82,8 +82,8 @@ class TestElementwisePowOp_broadcast_2(TestElementwisePowOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_pow" self.op_type = "elementwise_pow"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 4]).astype("float32"), 'X': np.random.uniform(0.1, 1, [2, 3, 4]).astype("float64"),
'Y': np.random.uniform(0.1, 1, [2]).astype("float32") 'Y': np.random.uniform(0.1, 1, [2]).astype("float64")
} }
self.attrs = {'axis': 0} self.attrs = {'axis': 0}
self.outputs = { self.outputs = {
...@@ -95,8 +95,8 @@ class TestElementwisePowOp_broadcast_3(TestElementwisePowOp): ...@@ -95,8 +95,8 @@ class TestElementwisePowOp_broadcast_3(TestElementwisePowOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_pow" self.op_type = "elementwise_pow"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float32"), 'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64"),
'Y': np.random.uniform(0.1, 1, [3, 4]).astype("float32") 'Y': np.random.uniform(0.1, 1, [3, 4]).astype("float64")
} }
self.attrs = {'axis': 1} self.attrs = {'axis': 1}
self.outputs = { self.outputs = {
...@@ -109,8 +109,8 @@ class TestElementwisePowOp_broadcast_4(TestElementwisePowOp): ...@@ -109,8 +109,8 @@ class TestElementwisePowOp_broadcast_4(TestElementwisePowOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_pow" self.op_type = "elementwise_pow"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float32"), 'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64"),
'Y': np.random.uniform(0.1, 1, [2, 3, 1, 5]).astype("float32") 'Y': np.random.uniform(0.1, 1, [2, 3, 1, 5]).astype("float64")
} }
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
......
...@@ -27,7 +27,7 @@ class TestExpandOpRank1(OpTest): ...@@ -27,7 +27,7 @@ class TestExpandOpRank1(OpTest):
self.op_type = "expand" self.op_type = "expand"
self.init_data() self.init_data()
self.inputs = {'X': np.random.random(self.ori_shape).astype("float32")} self.inputs = {'X': np.random.random(self.ori_shape).astype("float64")}
self.attrs = {'expand_times': self.expand_times} self.attrs = {'expand_times': self.expand_times}
output = np.tile(self.inputs['X'], self.expand_times) output = np.tile(self.inputs['X'], self.expand_times)
self.outputs = {'Out': output} self.outputs = {'Out': output}
...@@ -84,7 +84,7 @@ class TestExpandOpRank1_tensor_attr(OpTest): ...@@ -84,7 +84,7 @@ class TestExpandOpRank1_tensor_attr(OpTest):
(1)).astype('int32') * ele)) (1)).astype('int32') * ele))
self.inputs = { self.inputs = {
'X': np.random.random(self.ori_shape).astype("float32"), 'X': np.random.random(self.ori_shape).astype("float64"),
'expand_times_tensor': expand_times_tensor, 'expand_times_tensor': expand_times_tensor,
} }
self.attrs = {"expand_times": self.infer_expand_times} self.attrs = {"expand_times": self.infer_expand_times}
...@@ -124,7 +124,7 @@ class TestExpandOpRank1_tensor(OpTest): ...@@ -124,7 +124,7 @@ class TestExpandOpRank1_tensor(OpTest):
self.init_data() self.init_data()
self.inputs = { self.inputs = {
'X': np.random.random(self.ori_shape).astype("float32"), 'X': np.random.random(self.ori_shape).astype("float64"),
'ExpandTimes': np.array(self.expand_times).astype("int32"), 'ExpandTimes': np.array(self.expand_times).astype("int32"),
} }
self.attrs = {} self.attrs = {}
......
...@@ -24,7 +24,7 @@ class TestFlattenOp(OpTest): ...@@ -24,7 +24,7 @@ class TestFlattenOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "flatten2" self.op_type = "flatten2"
self.init_test_case() self.init_test_case()
self.inputs = {"X": np.random.random(self.in_shape).astype("float32")} self.inputs = {"X": np.random.random(self.in_shape).astype("float64")}
self.init_attrs() self.init_attrs()
self.outputs = { self.outputs = {
"Out": self.inputs["X"].reshape(self.new_shape), "Out": self.inputs["X"].reshape(self.new_shape),
......
...@@ -24,7 +24,7 @@ class TestFlattenOp(OpTest): ...@@ -24,7 +24,7 @@ class TestFlattenOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "flatten" self.op_type = "flatten"
self.init_test_case() self.init_test_case()
self.inputs = {"X": np.random.random(self.in_shape).astype("float32")} self.inputs = {"X": np.random.random(self.in_shape).astype("float64")}
self.init_attrs() self.init_attrs()
self.outputs = {"Out": self.inputs["X"].reshape(self.new_shape)} self.outputs = {"Out": self.inputs["X"].reshape(self.new_shape)}
......
...@@ -29,7 +29,7 @@ class TestFusedEmbeddingSeqPoolOp(OpTest): ...@@ -29,7 +29,7 @@ class TestFusedEmbeddingSeqPoolOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "fused_embedding_seq_pool" self.op_type = "fused_embedding_seq_pool"
self.emb_size = 6 self.emb_size = 6
self.table = np.random.random((17, self.emb_size)).astype("float32") self.table = np.random.random((17, self.emb_size)).astype("float64")
self.ids = np.array([[[4], [3]], [[4], [3]], [[2], [1]], self.ids = np.array([[[4], [3]], [[4], [3]], [[2], [1]],
[[16], [1]]]).astype("int64") [[16], [1]]]).astype("int64")
ids_expand = np.expand_dims(self.ids, axis=1) ids_expand = np.expand_dims(self.ids, axis=1)
......
...@@ -27,7 +27,7 @@ class TestGatherNdOpWithEmptyIndex(OpTest): ...@@ -27,7 +27,7 @@ class TestGatherNdOpWithEmptyIndex(OpTest):
def setUp(self): def setUp(self):
self.op_type = "gather_nd" self.op_type = "gather_nd"
xnp = np.random.random((5, 20)).astype("float32") xnp = np.random.random((5, 20)).astype("float64")
self.inputs = {'X': xnp, 'Index': np.array([[], []]).astype("int32")} self.inputs = {'X': xnp, 'Index': np.array([[], []]).astype("int32")}
self.outputs = { self.outputs = {
'Out': np.vstack((xnp[np.newaxis, :], xnp[np.newaxis, :])) 'Out': np.vstack((xnp[np.newaxis, :], xnp[np.newaxis, :]))
...@@ -48,7 +48,7 @@ class TestGatherNdOpWithLowIndex(OpTest): ...@@ -48,7 +48,7 @@ class TestGatherNdOpWithLowIndex(OpTest):
def setUp(self): def setUp(self):
self.op_type = "gather_nd" self.op_type = "gather_nd"
xnp = np.array( xnp = np.array(
[[65, 17, 2], [14, 25, 1], [76, 22, 3]]).astype("float32") [[65, 17, 2], [14, 25, 1], [76, 22, 3]]).astype("float64")
index = np.array([[1], [2]]).astype("int64") index = np.array([[1], [2]]).astype("int64")
self.inputs = {'X': xnp, 'Index': index} self.inputs = {'X': xnp, 'Index': index}
...@@ -91,7 +91,7 @@ class TestGatherNdOpWithHighRankSame(OpTest): ...@@ -91,7 +91,7 @@ class TestGatherNdOpWithHighRankSame(OpTest):
def setUp(self): def setUp(self):
self.op_type = "gather_nd" self.op_type = "gather_nd"
shape = (20, 9, 8, 1, 31) shape = (20, 9, 8, 1, 31)
xnp = np.random.rand(*shape) xnp = np.random.rand(*shape).astype("float64")
index = np.vstack([np.random.randint(0, s, size=150) for s in shape]).T index = np.vstack([np.random.randint(0, s, size=150) for s in shape]).T
self.inputs = {'X': xnp, 'Index': index.astype("int32")} self.inputs = {'X': xnp, 'Index': index.astype("int32")}
...@@ -112,7 +112,7 @@ class TestGatherNdOpWithHighRankDiff(OpTest): ...@@ -112,7 +112,7 @@ class TestGatherNdOpWithHighRankDiff(OpTest):
def setUp(self): def setUp(self):
self.op_type = "gather_nd" self.op_type = "gather_nd"
shape = (20, 9, 8, 1, 31) shape = (20, 9, 8, 1, 31)
xnp = np.random.rand(*shape).astype("double") xnp = np.random.rand(*shape).astype("float64")
index = np.vstack([np.random.randint(0, s, size=1000) for s in shape]).T index = np.vstack([np.random.randint(0, s, size=1000) for s in shape]).T
index_re = index.reshape([10, 5, 20, 5]) index_re = index.reshape([10, 5, 20, 5])
......
...@@ -41,7 +41,7 @@ class TestGatherOp(OpTest): ...@@ -41,7 +41,7 @@ class TestGatherOp(OpTest):
For multi-dimension input For multi-dimension input
""" """
self.x_shape = (10, 20) self.x_shape = (10, 20)
self.x_type = "float32" self.x_type = "float64"
self.index = [1, 3, 5] self.index = [1, 3, 5]
self.index_type = "int32" self.index_type = "int32"
...@@ -52,7 +52,7 @@ class TestCase1(TestGatherOp): ...@@ -52,7 +52,7 @@ class TestCase1(TestGatherOp):
For one dimension input For one dimension input
""" """
self.x_shape = (100) self.x_shape = (100)
self.x_type = "float32" self.x_type = "float64"
self.index = [1, 3, 5] self.index = [1, 3, 5]
self.index_type = "int32" self.index_type = "int32"
...@@ -63,7 +63,7 @@ class TestCase2(TestGatherOp): ...@@ -63,7 +63,7 @@ class TestCase2(TestGatherOp):
For int64_t index type For int64_t index type
""" """
self.x_shape = (10) self.x_shape = (10)
self.x_type = "float32" self.x_type = "float64"
self.index = [1, 3, 5] self.index = [1, 3, 5]
self.index_type = "int64" self.index_type = "int64"
...@@ -74,7 +74,7 @@ class TestCase3(TestGatherOp): ...@@ -74,7 +74,7 @@ class TestCase3(TestGatherOp):
For other input type For other input type
""" """
self.x_shape = (10, 20) self.x_shape = (10, 20)
self.x_type = "double" self.x_type = "float64"
self.index = [1, 3, 5] self.index = [1, 3, 5]
self.index_type = "int64" self.index_type = "int64"
...@@ -92,7 +92,7 @@ class TestCase5(TestGatherOp): ...@@ -92,7 +92,7 @@ class TestCase5(TestGatherOp):
def config(self): def config(self):
self.x_shape = (10, 20) self.x_shape = (10, 20)
self.attrs = {'overwrite': False} self.attrs = {'overwrite': False}
self.x_type = "float" self.x_type = "float64"
self.index = [1, 1, 3] self.index = [1, 1, 3]
self.index_type = "int32" self.index_type = "int32"
...@@ -101,7 +101,7 @@ class TestCase6(TestGatherOp): ...@@ -101,7 +101,7 @@ class TestCase6(TestGatherOp):
def config(self): def config(self):
self.x_shape = (10, 20) self.x_shape = (10, 20)
self.attrs = {'overwrite': True} self.attrs = {'overwrite': True}
self.x_type = "float" self.x_type = "float64"
self.index = [1, 3] self.index = [1, 3]
self.index_type = "int32" self.index_type = "int32"
......
...@@ -34,7 +34,7 @@ def AffineGrid(theta, size): ...@@ -34,7 +34,7 @@ def AffineGrid(theta, size):
for i in range(len(theta)): for i in range(len(theta)):
ret[i] = np.dot(grid[i].reshape([h * w, 3]), theta[i]) ret[i] = np.dot(grid[i].reshape([h * w, 3]), theta[i])
return ret.reshape([n, h, w, 2]).astype("float32") return ret.reshape([n, h, w, 2]).astype("float64")
def getGridPointValue(data, x, y): def getGridPointValue(data, x, y):
...@@ -43,7 +43,7 @@ def getGridPointValue(data, x, y): ...@@ -43,7 +43,7 @@ def getGridPointValue(data, x, y):
H = data_shape[2] H = data_shape[2]
W = data_shape[3] W = data_shape[3]
out = np.zeros(data_shape, dtype='float') out = np.zeros(data_shape, dtype='float64')
for i in range(N): for i in range(N):
for j in range(H): for j in range(H):
for k in range(W): for k in range(W):
...@@ -68,8 +68,8 @@ def GridSampler(data, grid): ...@@ -68,8 +68,8 @@ def GridSampler(data, grid):
y_max = H - 1 y_max = H - 1
x_max = W - 1 x_max = W - 1
x = 0.5 * ((x.astype('float32') + 1.0) * x_max) x = 0.5 * ((x.astype('float64') + 1.0) * x_max)
y = 0.5 * ((y.astype('float32') + 1.0) * y_max) y = 0.5 * ((y.astype('float64') + 1.0) * y_max)
x0 = np.floor(x).astype('int32') x0 = np.floor(x).astype('int32')
x1 = x0 + 1 x1 = x0 + 1
...@@ -86,7 +86,7 @@ def GridSampler(data, grid): ...@@ -86,7 +86,7 @@ def GridSampler(data, grid):
vc = getGridPointValue(data, x1, y0) vc = getGridPointValue(data, x1, y0)
vd = getGridPointValue(data, x1, y1) vd = getGridPointValue(data, x1, y1)
out = (wa * va + wb * vb + wc * vc + wd * vd).astype('float32') out = (wa * va + wb * vb + wc * vc + wd * vd).astype('float64')
return out return out
...@@ -94,9 +94,9 @@ class TestGridSamplerOp(OpTest): ...@@ -94,9 +94,9 @@ class TestGridSamplerOp(OpTest):
def setUp(self): def setUp(self):
self.initTestCase() self.initTestCase()
self.op_type = 'grid_sampler' self.op_type = 'grid_sampler'
x = np.random.randint(0, 255, self.x_shape).astype('float32') x = np.random.randint(0, 255, self.x_shape).astype('float64')
theta = np.zeros(self.theta_shape).astype('float32') theta = np.zeros(self.theta_shape).astype('float64')
for i in range(self.theta_shape[0]): for i in range(self.theta_shape[0]):
for j in range(2): for j in range(2):
for k in range(3): for k in range(3):
......
...@@ -170,20 +170,20 @@ class TestGRUOriginMode(TestGRUOp): ...@@ -170,20 +170,20 @@ class TestGRUOriginMode(TestGRUOp):
class TestGRUOp2(TestGRUOp): class TestGRUOp2(TestGRUOp):
def set_confs(self): def set_confs(self):
self.D = 19 self.D = 19
self.dtype = 'float32' self.dtype = 'float64'
class TestGRUOp2Len0(TestGRUOp): class TestGRUOp2Len0(TestGRUOp):
def set_confs(self): def set_confs(self):
self.D = 19 self.D = 19
self.lod = [[2, 0, 4]] self.lod = [[2, 0, 4]]
self.dtype = 'float32' self.dtype = 'float64'
class TestGRUOp2OriginMode(TestGRUOp): class TestGRUOp2OriginMode(TestGRUOp):
def set_confs(self): def set_confs(self):
self.D = 19 self.D = 19
self.dtype = 'float32' self.dtype = 'float64'
self.origin_mode = True self.origin_mode = True
...@@ -191,7 +191,7 @@ class TestGRUOp2OriginModeLen0(TestGRUOp): ...@@ -191,7 +191,7 @@ class TestGRUOp2OriginModeLen0(TestGRUOp):
def set_confs(self): def set_confs(self):
self.D = 19 self.D = 19
self.lod = [[0, 3, 4]] self.lod = [[0, 3, 4]]
self.dtype = 'float32' self.dtype = 'float64'
self.origin_mode = True self.origin_mode = True
...@@ -199,7 +199,7 @@ class TestGRUOp2OriginModeLastLen0(TestGRUOp): ...@@ -199,7 +199,7 @@ class TestGRUOp2OriginModeLastLen0(TestGRUOp):
def set_confs(self): def set_confs(self):
self.D = 19 self.D = 19
self.lod = [[0, 3, 0]] self.lod = [[0, 3, 0]]
self.dtype = 'float32' self.dtype = 'float64'
self.origin_mode = True self.origin_mode = True
......
...@@ -36,8 +36,8 @@ class TestKLDivLossOp(OpTest): ...@@ -36,8 +36,8 @@ class TestKLDivLossOp(OpTest):
def setUp(self): def setUp(self):
self.initTestCase() self.initTestCase()
self.op_type = 'kldiv_loss' self.op_type = 'kldiv_loss'
x = np.random.uniform(-10, 10, self.x_shape).astype('float32') x = np.random.uniform(-10, 10, self.x_shape).astype('float64')
target = np.random.uniform(-10, 10, self.x_shape).astype('float32') target = np.random.uniform(-10, 10, self.x_shape).astype('float64')
self.attrs = {"reduction": self.reduction} self.attrs = {"reduction": self.reduction}
...@@ -46,7 +46,7 @@ class TestKLDivLossOp(OpTest): ...@@ -46,7 +46,7 @@ class TestKLDivLossOp(OpTest):
'Target': target, 'Target': target,
} }
loss = kldiv_loss(x, target, self.reduction) loss = kldiv_loss(x, target, self.reduction)
self.outputs = {'Loss': loss.astype('float32')} self.outputs = {'Loss': loss.astype('float64')}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
......
...@@ -22,7 +22,7 @@ from op_test import OpTest ...@@ -22,7 +22,7 @@ from op_test import OpTest
class TestLodResetOpByAttr(OpTest): class TestLodResetOpByAttr(OpTest):
def setUp(self): def setUp(self):
self.op_type = "lod_reset" self.op_type = "lod_reset"
x = np.random.random((10, 20)).astype("float32") x = np.random.random((10, 20)).astype("float64")
lod = [[3, 2, 5]] lod = [[3, 2, 5]]
# target_offset_lod and target_lod are the same lod info represented # target_offset_lod and target_lod are the same lod info represented
# in offset-based format and length-based format, respectively. # in offset-based format and length-based format, respectively.
...@@ -45,7 +45,7 @@ class TestLodResetOpByAttr(OpTest): ...@@ -45,7 +45,7 @@ class TestLodResetOpByAttr(OpTest):
class TestLodResetOpByInput(OpTest): class TestLodResetOpByInput(OpTest):
def setUp(self): def setUp(self):
self.op_type = "lod_reset" self.op_type = "lod_reset"
x = np.random.random((10, 20)).astype("float32") x = np.random.random((10, 20)).astype("float64")
lod = [[3, 2, 5]] lod = [[3, 2, 5]]
# target_offset_lod and target_lod are the same lod info represented # target_offset_lod and target_lod are the same lod info represented
# in offset-based format and length-based format, respectively. # in offset-based format and length-based format, respectively.
...@@ -69,7 +69,7 @@ class TestLodResetOpByInput(OpTest): ...@@ -69,7 +69,7 @@ class TestLodResetOpByInput(OpTest):
class TestLodResetOpBoth(OpTest): class TestLodResetOpBoth(OpTest):
def setUp(self): def setUp(self):
self.op_type = "lod_reset" self.op_type = "lod_reset"
x = np.random.random((10, 20)).astype("float32") x = np.random.random((10, 20)).astype("float64")
lod = [[3, 2, 5]] lod = [[3, 2, 5]]
target_offset_lod_attr = [0, 7, 10] target_offset_lod_attr = [0, 7, 10]
target_offset_lod_in = [0, 4, 7, 10] target_offset_lod_in = [0, 4, 7, 10]
...@@ -93,9 +93,9 @@ class TestLodResetOpBoth(OpTest): ...@@ -93,9 +93,9 @@ class TestLodResetOpBoth(OpTest):
class TestLodResetOpYIsLoDTensor(OpTest): class TestLodResetOpYIsLoDTensor(OpTest):
def setUp(self): def setUp(self):
self.op_type = "lod_reset" self.op_type = "lod_reset"
x = np.random.random((10, 20)).astype("float32") x = np.random.random((10, 20)).astype("float64")
lod = [[3, 2, 5]] lod = [[3, 2, 5]]
y = np.random.random((10, 10)).astype("float32") y = np.random.random((10, 10)).astype("float64")
target_lod = [[4, 3, 3]] target_lod = [[4, 3, 3]]
self.inputs = {'X': (x, lod), 'Y': (y, target_lod)} self.inputs = {'X': (x, lod), 'Y': (y, target_lod)}
self.outputs = {'Out': (x, target_lod)} self.outputs = {'Out': (x, target_lod)}
...@@ -112,7 +112,7 @@ class TestLodResetOpYIsLoDTensor(OpTest): ...@@ -112,7 +112,7 @@ class TestLodResetOpYIsLoDTensor(OpTest):
class TestLodAppendOpByAttr(OpTest): class TestLodAppendOpByAttr(OpTest):
def setUp(self): def setUp(self):
self.op_type = "lod_reset" self.op_type = "lod_reset"
x = np.random.random((10, 20)).astype("float32") x = np.random.random((10, 20)).astype("float64")
lod = [[3, 2, 5]] lod = [[3, 2, 5]]
# target_offset_lod and target_lod are the same lod info represented # target_offset_lod and target_lod are the same lod info represented
# in offset-based format and length-based format, respectively. # in offset-based format and length-based format, respectively.
......
...@@ -27,7 +27,7 @@ from paddle.fluid import Program, program_guard ...@@ -27,7 +27,7 @@ from paddle.fluid import Program, program_guard
class TestLookupTableOp(OpTest): class TestLookupTableOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "lookup_table" self.op_type = "lookup_table"
table = np.random.random((17, 31)).astype("float32") table = np.random.random((17, 31)).astype("float64")
ids = np.random.randint(0, 17, 4).astype("int64") ids = np.random.randint(0, 17, 4).astype("int64")
ids_expand = np.expand_dims(ids, axis=1) ids_expand = np.expand_dims(ids, axis=1)
self.inputs = {'W': table, 'Ids': ids_expand} self.inputs = {'W': table, 'Ids': ids_expand}
...@@ -43,7 +43,7 @@ class TestLookupTableOp(OpTest): ...@@ -43,7 +43,7 @@ class TestLookupTableOp(OpTest):
class TestLookupTableOpWithTensorIds(OpTest): class TestLookupTableOpWithTensorIds(OpTest):
def setUp(self): def setUp(self):
self.op_type = "lookup_table" self.op_type = "lookup_table"
table = np.random.random((17, 31)).astype("float32") table = np.random.random((17, 31)).astype("float64")
ids = np.random.randint( ids = np.random.randint(
low=0, high=17, size=(2, 4, 5, 1)).astype("int64") low=0, high=17, size=(2, 4, 5, 1)).astype("int64")
self.inputs = {'W': table, 'Ids': ids} self.inputs = {'W': table, 'Ids': ids}
......
...@@ -28,7 +28,7 @@ from paddle.fluid import Program, program_guard ...@@ -28,7 +28,7 @@ from paddle.fluid import Program, program_guard
class TestLookupTableOp(OpTest): class TestLookupTableOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "lookup_table_v2" self.op_type = "lookup_table_v2"
table = np.random.random((17, 31)).astype("float32") table = np.random.random((17, 31)).astype("float64")
ids = np.random.randint(0, 17, 4).astype("int64") ids = np.random.randint(0, 17, 4).astype("int64")
self.inputs = {'W': table, 'Ids': ids} self.inputs = {'W': table, 'Ids': ids}
self.outputs = {'Out': table[ids]} self.outputs = {'Out': table[ids]}
...@@ -43,7 +43,7 @@ class TestLookupTableOp(OpTest): ...@@ -43,7 +43,7 @@ class TestLookupTableOp(OpTest):
class TestLookupTableOpWithTensorIds(OpTest): class TestLookupTableOpWithTensorIds(OpTest):
def setUp(self): def setUp(self):
self.op_type = "lookup_table_v2" self.op_type = "lookup_table_v2"
table = np.random.random((17, 31)).astype("float32") table = np.random.random((17, 31)).astype("float64")
ids = np.random.randint(low=0, high=17, size=(2, 4, 5)).astype("int64") ids = np.random.randint(low=0, high=17, size=(2, 4, 5)).astype("int64")
self.inputs = {'W': table, 'Ids': ids} self.inputs = {'W': table, 'Ids': ids}
self.outputs = {'Out': table[ids.flatten()].reshape((2, 4, 5, 31))} self.outputs = {'Out': table[ids.flatten()].reshape((2, 4, 5, 31))}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册