提交 c540aa04 编写于 作者: D dangqingqing

Refine unit test in op_test_util

上级 6512893b
...@@ -33,23 +33,28 @@ class OpTestMeta(type): ...@@ -33,23 +33,28 @@ class OpTestMeta(type):
for place in places: for place in places:
for in_name in func.all_input_args: for in_name in func.all_input_args:
if hasattr(self, in_name): if hasattr(self, "inputs") and in_name in self.inputs:
kwargs[in_name] = in_name kwargs[in_name] = in_name
var = scope.new_var(in_name).get_tensor() var = scope.new_var(in_name).get_tensor()
arr = getattr(self, in_name) arr = self.inputs[in_name]
var.set_dims(arr.shape) var.set_dims(arr.shape)
var.set(arr, place) var.set(arr, place)
else: else:
kwargs[in_name] = "@EMPTY@" kwargs[in_name] = "@EMPTY@"
for out_name in func.all_output_args: for out_name in func.all_output_args:
if hasattr(self, out_name): if not hasattr(self, "outputs"):
kwargs[out_name] = out_name raise ValueError(
scope.new_var(out_name).get_tensor() "The test op must set self.outputs dict.")
if out_name not in self.outputs:
raise ValueError("The %s is not self.outputs dict." %
(out_name))
kwargs[out_name] = out_name
scope.new_var(out_name).get_tensor()
for attr_name in func.all_attr_args: for attr_name in func.all_attr_args:
if hasattr(self, attr_name): if hasattr(self, "attrs") and attr_name in self.attrs:
kwargs[attr_name] = getattr(self, attr_name) kwargs[attr_name] = self.attrs[attr_name]
op = func(**kwargs) op = func(**kwargs)
...@@ -60,7 +65,7 @@ class OpTestMeta(type): ...@@ -60,7 +65,7 @@ class OpTestMeta(type):
for out_name in func.all_output_args: for out_name in func.all_output_args:
actual = numpy.array(scope.find_var(out_name).get_tensor()) actual = numpy.array(scope.find_var(out_name).get_tensor())
expect = getattr(self, out_name) expect = self.outputs[out_name]
# TODO(qijun) The default decimal is 7, but numpy.dot and eigen.mul # TODO(qijun) The default decimal is 7, but numpy.dot and eigen.mul
# has some diff, and could not pass unittest. So I set decimal 3 here. # has some diff, and could not pass unittest. So I set decimal 3 here.
# And I will check this in future. # And I will check this in future.
......
...@@ -12,9 +12,11 @@ class TestAddOp(unittest.TestCase): ...@@ -12,9 +12,11 @@ class TestAddOp(unittest.TestCase):
def setUp(self): def setUp(self):
self.type = "add_two" self.type = "add_two"
self.X = numpy.random.random((102, 105)).astype("float32") self.inputs = {
self.Y = numpy.random.random((102, 105)).astype("float32") 'X': numpy.random.random((102, 105)).astype("float32"),
self.Out = self.X + self.Y 'Y': numpy.random.random((102, 105)).astype("float32")
}
self.outputs = {'Out': self.inputs['X'] + self.inputs['Y']}
class TestAddGradOp(unittest.TestCase): class TestAddGradOp(unittest.TestCase):
......
...@@ -7,15 +7,17 @@ class TestSGD(unittest.TestCase): ...@@ -7,15 +7,17 @@ class TestSGD(unittest.TestCase):
__metaclass__ = OpTestMeta __metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
# TODO this unit test is not passed
self.type = "onehot_cross_entropy" self.type = "onehot_cross_entropy"
batch_size = 100 batch_size = 100
class_num = 10 class_num = 10
self.X = numpy.random.random((batch_size, class_num)).astype("float32") X = numpy.random.random((batch_size, class_num)).astype("float32")
self.label = 5 * numpy.ones(batch_size).astype("int32") label = 5 * numpy.ones(batch_size).astype("int32")
self.inputs = {'X': X, 'label': label}
Y = [] Y = []
for i in range(0, batch_size): for i in range(0, batch_size):
Y.append(-numpy.log(self.X[i][self.label[i]])) Y.append(-numpy.log(X[i][label[i]]))
self.Y = numpy.array(Y).astype("float32") self.outputs = {'Y': numpy.array(Y).astype("float32")}
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -8,8 +8,8 @@ class TestMeanOp(unittest.TestCase): ...@@ -8,8 +8,8 @@ class TestMeanOp(unittest.TestCase):
def setUp(self): def setUp(self):
self.type = "mean" self.type = "mean"
self.X = np.random.random((32, 784)).astype("float32") self.inputs = {'X': np.random.random((32, 784)).astype("float32")}
self.Out = np.mean(self.X) self.outputs = {'Out': np.mean(self.inputs['X'])}
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -8,9 +8,11 @@ class TestMulOp(unittest.TestCase): ...@@ -8,9 +8,11 @@ class TestMulOp(unittest.TestCase):
def setUp(self): def setUp(self):
self.type = "mul" self.type = "mul"
self.X = np.random.random((32, 84)).astype("float32") self.inputs = {
self.Y = np.random.random((84, 100)).astype("float32") 'X': np.random.random((32, 84)).astype("float32"),
self.Out = np.dot(self.X, self.Y) 'Y': np.random.random((84, 100)).astype("float32")
}
self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])}
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -8,9 +8,11 @@ class TestRowwiseAddOp(unittest.TestCase): ...@@ -8,9 +8,11 @@ class TestRowwiseAddOp(unittest.TestCase):
def setUp(self): def setUp(self):
self.type = "rowwise_add" self.type = "rowwise_add"
self.X = np.random.random((32, 84)).astype("float32") self.inputs = {
self.b = np.random.random(84).astype("float32") 'X': np.random.random((32, 84)).astype("float32"),
self.Out = np.add(self.X, self.b) 'b': np.random.random(84).astype("float32")
}
self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['b'])}
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -8,10 +8,13 @@ class TestSGD(unittest.TestCase): ...@@ -8,10 +8,13 @@ class TestSGD(unittest.TestCase):
def setUp(self): def setUp(self):
self.type = "sgd" self.type = "sgd"
self.param = numpy.random.random((102, 105)).astype("float32") w = numpy.random.random((102, 105)).astype("float32")
self.grad = numpy.random.random((102, 105)).astype("float32") g = numpy.random.random((102, 105)).astype("float32")
self.learning_rate = 0.1 lr = 0.1
self.param_out = self.param - self.learning_rate * self.grad
self.inputs = {'param': w, 'grad': g}
self.attrs = {'learning_rate': lr}
self.outputs = {'param_out': w - lr * g}
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -8,8 +8,8 @@ class TestSigmoidOp(unittest.TestCase): ...@@ -8,8 +8,8 @@ class TestSigmoidOp(unittest.TestCase):
def setUp(self): def setUp(self):
self.type = "sigmoid" self.type = "sigmoid"
self.X = np.random.random((32, 100)).astype("float32") self.inputs = {'X': np.random.random((32, 100)).astype("float32")}
self.Y = 1 / (1 + np.exp(-self.X)) self.outputs = {'Y': 1 / (1 + np.exp(-self.inputs['X']))}
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -19,8 +19,10 @@ class TestSoftmaxOp(unittest.TestCase): ...@@ -19,8 +19,10 @@ class TestSoftmaxOp(unittest.TestCase):
def setUp(self): def setUp(self):
self.type = "softmax" self.type = "softmax"
self.X = np.random.random((32, 100)).astype("float32") self.inputs = {'X': np.random.random((32, 100)).astype("float32")}
self.Y = np.apply_along_axis(stable_softmax, 1, self.X) self.outputs = {
'Y': np.apply_along_axis(stable_softmax, 1, self.inputs['X'])
}
class TestSoftmaxGradOp(unittest.TestCase): class TestSoftmaxGradOp(unittest.TestCase):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册