From 2d807f2b4c586656b760e31030a08655f7d298b1 Mon Sep 17 00:00:00 2001 From: qijun Date: Mon, 11 Sep 2017 15:31:33 +0800 Subject: [PATCH] init refine op python tests --- .../paddle/v2/framework/tests/CMakeLists.txt | 2 - python/paddle/v2/framework/tests/op_test.py | 28 ++++-- .../v2/framework/tests/test_add_two_op.py | 23 ++--- .../v2/framework/tests/test_cos_sim_op.py | 49 +++------- .../framework/tests/test_cross_entropy_op.py | 2 +- .../tests/test_fill_zeros_like_op.py | 19 ++-- .../v2/framework/tests/test_gather_op.py | 32 +++--- .../tests/test_gaussian_random_op.py | 8 +- .../framework/tests/test_gradient_checker.py | 42 ++++---- .../v2/framework/tests/test_lookup_table.py | 29 ++---- .../paddle/v2/framework/tests/test_mean_op.py | 24 ++--- .../v2/framework/tests/test_minus_op.py | 23 ++--- .../paddle/v2/framework/tests/test_mul_op.py | 98 +++++-------------- python/paddle/v2/framework/tests/test_net.py | 2 +- .../v2/framework/tests/test_rowwise_add_op.py | 73 ++++++-------- .../tests/test_scale_and_identity_op.py | 41 +++----- .../v2/framework/tests/test_scatter_op.py | 37 +++---- .../paddle/v2/framework/tests/test_sgd_op.py | 17 ++-- .../v2/framework/tests/test_softmax_op.py | 28 ++---- .../tests/test_squared_l2_distance_op.py | 76 ++++++-------- .../paddle/v2/framework/tests/test_sum_op.py | 12 +-- .../v2/framework/tests/test_top_k_op.py | 17 ++-- .../framework/tests/test_uniform_random_op.py | 8 +- 23 files changed, 267 insertions(+), 423 deletions(-) diff --git a/python/paddle/v2/framework/tests/CMakeLists.txt b/python/paddle/v2/framework/tests/CMakeLists.txt index 2117fdf0d5..07997e201a 100644 --- a/python/paddle/v2/framework/tests/CMakeLists.txt +++ b/python/paddle/v2/framework/tests/CMakeLists.txt @@ -19,8 +19,6 @@ py_test(test_scatter_op SRCS test_scatter_op.py) py_test(test_fill_zeros_like_op SRCS test_fill_zeros_like_op.py) py_test(test_top_k_op SRCS test_top_k_op.py) -py_test(gradient_checker SRCS gradient_checker.py) - py_test(test_rowwise_add_op SRCS test_rowwise_add_op.py) py_test(test_default_scope_funcs SRCS test_default_scope_funcs.py) diff --git a/python/paddle/v2/framework/tests/op_test.py b/python/paddle/v2/framework/tests/op_test.py index 3a6a5dca4c..fe094df8e5 100644 --- a/python/paddle/v2/framework/tests/op_test.py +++ b/python/paddle/v2/framework/tests/op_test.py @@ -9,7 +9,7 @@ def grad_var_name(var_name): return var_name + "@GRAD" -def create_op(scope, op_type, inputs, outputs, attrs=None): +def create_op(scope, op_type, inputs, outputs, attrs): kwargs = dict() for in_name, in_dup in Operator.get_op_inputs(op_type): @@ -29,15 +29,16 @@ def create_op(scope, op_type, inputs, outputs, attrs=None): kwargs[out_name] = [] if out_dup: sub_in = outputs[out_name] - for sun_in_name in sub_in: - var = scope.new_var(sun_in_name) - kwargs[out_name].append(sun_in_name) + for sub_in_name in sub_in: + var = scope.new_var(sub_in_name) + kwargs[out_name].append(sub_in_name) else: var = scope.new_var(out_name) kwargs[out_name].append(out_name) for attr_name in Operator.get_op_attr_names(op_type): - kwargs[attr_name] = attrs[attr_name] + if attr_name in attrs: + kwargs[attr_name] = attrs[attr_name] return Operator(op_type, **kwargs) @@ -89,6 +90,7 @@ def get_numeric_gradient(scope, delta=0.005, in_place=False): + print "before set input" set_input(scope, op, inputs, core.CPUPlace()) op.infer_shape(scope) @@ -110,7 +112,7 @@ def get_numeric_gradient(scope, # we use a for loop to compute the gradient of every element. for i in xrange(tensor_size): if in_place: - set_input(op, inputs, core.CPUPlace()) + set_input(scope, op, inputs, core.CPUPlace()) # get one input element throw it's index i. origin = tensor_to_check.get_float_element(i) @@ -120,7 +122,7 @@ def get_numeric_gradient(scope, y_pos = get_output() if in_place: - set_input(op, inputs, core.CPUPlace()) + set_input(scope, op, inputs, core.CPUPlace()) x_neg = origin - delta tensor_to_check.set_float_element(i, x_neg) @@ -168,7 +170,11 @@ def get_gradient(scope, op, inputs, outputs, grad_name, place, class OpTest(unittest.TestCase): def check_output_with_place(self, place): self.scope = core.Scope() - self.op = create_op(self.scope, self.op_type, self.inputs, self.outputs) + op_inputs = self.inputs if hasattr(self, "inputs") else dict() + op_outputs = self.outputs if hasattr(self, "outputs") else dict() + op_attrs = self.attrs if hasattr(self, "attrs") else dict() + self.op = create_op(self.scope, self.op_type, op_inputs, op_outputs, + op_attrs) if isinstance(place, core.GPUPlace) and not self.op.support_gpu(): return set_input(self.scope, self.op, self.inputs, place) @@ -227,7 +233,11 @@ class OpTest(unittest.TestCase): in_place=False, max_relative_error=0.005): self.scope = core.Scope() - self.op = create_op(self.scope, self.op_type, self.inputs, self.outputs) + op_inputs = self.inputs if hasattr(self, "inputs") else dict() + op_outputs = self.outputs if hasattr(self, "outputs") else dict() + op_attrs = self.attrs if hasattr(self, "attrs") else dict() + self.op = create_op(self.scope, self.op_type, op_inputs, op_outputs, + op_attrs) if no_grad_set is None: no_grad_set = set() diff --git a/python/paddle/v2/framework/tests/test_add_two_op.py b/python/paddle/v2/framework/tests/test_add_two_op.py index a578e74eca..3ca34d9b9f 100644 --- a/python/paddle/v2/framework/tests/test_add_two_op.py +++ b/python/paddle/v2/framework/tests/test_add_two_op.py @@ -1,23 +1,20 @@ import unittest +import numpy as np +from op_test import OpTest -import numpy -import paddle.v2.framework.core as core -from paddle.v2.framework.op import Operator - -from op_test_util import OpTestMeta - - -class TestAddOp(unittest.TestCase): - __metaclass__ = OpTestMeta +class TestAddOp(OpTest): def setUp(self): - self.type = "add" + self.op_type = "add" self.inputs = { - 'X': numpy.random.random((102, 105)).astype("float32"), - 'Y': numpy.random.random((102, 105)).astype("float32") + 'X': np.random.random((102, 105)).astype("float32"), + 'Y': np.random.random((102, 105)).astype("float32") } self.outputs = {'Out': self.inputs['X'] + self.inputs['Y']} + def test_check_output(self): + self.check_output() + -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_cos_sim_op.py b/python/paddle/v2/framework/tests/test_cos_sim_op.py index 32013a7999..797cbd8cc5 100644 --- a/python/paddle/v2/framework/tests/test_cos_sim_op.py +++ b/python/paddle/v2/framework/tests/test_cos_sim_op.py @@ -1,17 +1,14 @@ import unittest import numpy as np -from gradient_checker import GradientChecker, create_op -from op_test_util import OpTestMeta +from op_test import OpTest -class TestCosSimOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestCosSimOp(OpTest): def setUp(self): - self.type = "cos_sim" + self.op_type = "cos_sim" self.inputs = { - 'X': np.random.random((32, 64)).astype("float32"), - 'Y': np.random.random((32, 64)).astype("float32") + 'X': np.random.random((10, 5)).astype("float32"), + 'Y': np.random.random((10, 5)).astype("float32") } expect_x_norm = np.linalg.norm(self.inputs['X'], axis=1) expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=1) @@ -23,38 +20,20 @@ class TestCosSimOp(unittest.TestCase): 'Out': np.expand_dims(expect_out, 1) } + def test_check_output(self): + self.check_output() -class TestCosSimGradOp(GradientChecker): - def setUp(self): - self.op = create_op("cos_sim") - self.inputs = { - 'X': np.random.random((10, 5)).astype("float32"), - 'Y': np.random.random((10, 5)).astype("float32") - } - - def test_cpu_gpu_compare(self): - self.compare_grad(self.op, self.inputs) - - def test_normal(self): - self.check_grad( - self.op, self.inputs, ["X", "Y"], "Out", max_relative_error=0.05) + def test_check_grad_normal(self): + self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.05) - def test_ignore_x(self): + def test_check_grad_ingore_x(self): self.check_grad( - self.op, - self.inputs, ["Y"], - "Out", - max_relative_error=0.05, - no_grad_set={"X"}) + ['Y'], 'Out', max_relative_error=0.05, no_grad_set=set('X')) - def test_ignore_y(self): + def test_check_grad_ignore_y(self): self.check_grad( - self.op, - self.inputs, ["X"], - "Out", - max_relative_error=0.05, - no_grad_set={"Y"}) + ['X'], 'Out', max_relative_error=0.05, no_grad_set=set('Y')) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_cross_entropy_op.py b/python/paddle/v2/framework/tests/test_cross_entropy_op.py index fb6a440e23..c2fc102a8b 100644 --- a/python/paddle/v2/framework/tests/test_cross_entropy_op.py +++ b/python/paddle/v2/framework/tests/test_cross_entropy_op.py @@ -21,7 +21,7 @@ class TestCrossEntropy(OpTest): self.check_output() def test_check_grad(self): - self.check_grad(["X"], "Y") + self.check_grad(['X'], 'Y') if __name__ == "__main__": diff --git a/python/paddle/v2/framework/tests/test_fill_zeros_like_op.py b/python/paddle/v2/framework/tests/test_fill_zeros_like_op.py index e5c862605f..2473daaba2 100644 --- a/python/paddle/v2/framework/tests/test_fill_zeros_like_op.py +++ b/python/paddle/v2/framework/tests/test_fill_zeros_like_op.py @@ -1,16 +1,17 @@ import unittest -from op_test_util import OpTestMeta -import numpy +import numpy as np +from op_test import OpTest -class TestFillZerosLikeOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestFillZerosLikeOp(OpTest): def setUp(self): - self.type = "fill_zeros_like" - self.inputs = {'Src': numpy.random.random((219, 232)).astype("float32")} - self.outputs = {'Dst': numpy.zeros_like(self.inputs['Src'])} + self.op_type = "fill_zeros_like" + self.inputs = {'Src': np.random.random((219, 232)).astype("float32")} + self.outputs = {'Dst': np.zeros_like(self.inputs["Src"])} + + def test_check_output(self): + self.check_output() -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_gather_op.py b/python/paddle/v2/framework/tests/test_gather_op.py index e3de3fd0a1..b0ab429ef1 100644 --- a/python/paddle/v2/framework/tests/test_gather_op.py +++ b/python/paddle/v2/framework/tests/test_gather_op.py @@ -1,30 +1,20 @@ import unittest -from op_test_util import OpTestMeta -from gradient_checker import GradientChecker, create_op -import numpy -import paddle.v2.framework.core as core -from paddle.v2.framework.op import Operator +import numpy as np +from op_test import OpTest -class TestGatherOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestGatherOp(OpTest): def setUp(self): - self.type = "gather" - xnp = numpy.random.random((10, 20)).astype("float32") - self.inputs = { - 'X': xnp, - 'Index': numpy.array([1, 3, 5]).astype("int32") - } - self.outputs = {'Out': self.inputs['X'][self.inputs['Index']]} + self.op_type = "gather" + xnp = np.random.random((10, 20)).astype("float32") + self.inputs = {'X': xnp, 'Index': np.array([1, 3, 5]).astype("int32")} + self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]} + def test_check_output(self): + self.check_output() -class TestGatherGradOp(GradientChecker): - def test_gather_grad(self): - op = create_op("gather") - xnp = numpy.random.random((10, 20)).astype("float32") - inputs = {'X': xnp, 'Index': numpy.array([1, 3, 5]).astype("int32")} - self.check_grad(op, inputs, set("X"), "Out") + def test_check_grad(self): + self.check_grad(['X'], 'Out') if __name__ == "__main__": diff --git a/python/paddle/v2/framework/tests/test_gaussian_random_op.py b/python/paddle/v2/framework/tests/test_gaussian_random_op.py index f95ed70b58..1f9e4db783 100644 --- a/python/paddle/v2/framework/tests/test_gaussian_random_op.py +++ b/python/paddle/v2/framework/tests/test_gaussian_random_op.py @@ -14,11 +14,11 @@ class GaussianRandomTest(unittest.TestCase): def gaussian_random_test(self, place): scope = core.Scope() - scope.new_var("Out").get_tensor() + scope.new_var('Out').get_tensor() op = Operator( "gaussian_random", - Out="Out", + Out='Out', dims=[1000, 784], mean=.0, std=1., @@ -27,10 +27,10 @@ class GaussianRandomTest(unittest.TestCase): op.infer_shape(scope) context = core.DeviceContext.create(place) op.run(scope, context) - tensor = numpy.array(scope.find_var("Out").get_tensor()) + tensor = numpy.array(scope.find_var('Out').get_tensor()) self.assertAlmostEqual(numpy.mean(tensor), .0, delta=0.1) self.assertAlmostEqual(numpy.std(tensor), 1., delta=0.1) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_gradient_checker.py b/python/paddle/v2/framework/tests/test_gradient_checker.py index e8a7f848df..abeb01cb34 100644 --- a/python/paddle/v2/framework/tests/test_gradient_checker.py +++ b/python/paddle/v2/framework/tests/test_gradient_checker.py @@ -1,42 +1,44 @@ import unittest -import numpy -from paddle.v2.framework.op import Operator -from gradient_checker import GradientChecker -from gradient_checker import get_numeric_gradient +import numpy as np +import paddle.v2.framework.core as core +from op_test import get_numeric_gradient +from op_test import create_op class GetNumericGradientTest(unittest.TestCase): def test_add_op(self): - add_op = Operator("add", X="X", Y="Y", Out="Z") - x = numpy.random.random((10, 1)).astype("float32") - y = numpy.random.random((10, 1)).astype("float32") - - arr = get_numeric_gradient(add_op, {"X": x, "Y": y}, "Z", "X") + x = np.random.random((10, 1)).astype("float32") + y = np.random.random((10, 1)).astype("float32") + z = x + y + scope = core.Scope() + add_op = create_op(scope, "add", {'X': x, 'Y': y}, {'Out': z}, dict()) + arr = get_numeric_gradient(scope, add_op, {'X': x, 'Y': y}, 'X', 'Out') self.assertAlmostEqual(arr.mean(), 1.0, delta=1e-4) def test_softmax_op(self): def stable_softmax(x): """Compute the softmax of vector x in a numerically stable way.""" - shiftx = x - numpy.max(x) - exps = numpy.exp(shiftx) - return exps / numpy.sum(exps) + shiftx = x - np.max(x) + exps = np.exp(shiftx) + return exps / np.sum(exps) def label_softmax_grad(Y, dY): dX = Y * 0.0 for i in range(Y.shape[0]): - d = numpy.dot(Y[i, :], dY[i, :]) + d = np.dot(Y[i, :], dY[i, :]) dX[i, :] = Y[i, :] * (dY[i, :] - d) return dX - softmax_op = Operator("softmax", X="X", Y="Y") - - X = numpy.random.random((2, 2)).astype("float32") - Y = numpy.apply_along_axis(stable_softmax, 1, X) - dY = numpy.ones(Y.shape) + X = np.random.random((2, 2)).astype("float32") + Y = np.apply_along_axis(stable_softmax, 1, X) + dY = np.ones(Y.shape) dX = label_softmax_grad(Y, dY) - arr = get_numeric_gradient(softmax_op, {"X": X}, "Y", "X") - numpy.testing.assert_almost_equal(arr, dX, decimal=1e-2) + scope = core.Scope() + softmax_op = create_op(scope, "softmax", {"X": X}, {"Y": Y}, dict()) + + arr = get_numeric_gradient(scope, softmax_op, {"X": X}, "X", "Y") + np.testing.assert_almost_equal(arr, dX, decimal=1e-2) if __name__ == "__main__": diff --git a/python/paddle/v2/framework/tests/test_lookup_table.py b/python/paddle/v2/framework/tests/test_lookup_table.py index 4b7ce92c0f..b259bb67e8 100644 --- a/python/paddle/v2/framework/tests/test_lookup_table.py +++ b/python/paddle/v2/framework/tests/test_lookup_table.py @@ -1,31 +1,22 @@ import unittest import numpy as np -from op_test_util import OpTestMeta -from gradient_checker import GradientChecker, create_op +from op_test import OpTest -class TestLookupTableOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestLookupTableOp(OpTest): def setUp(self): - self.type = 'lookup_table' - table = np.random.random((17, 31)).astype('float32') - ids = np.random.randint(0, 17, 4).astype('int32') + self.op_type = "lookup_table" + table = np.random.random((17, 31)).astype("float32") + ids = np.random.randint(0, 17, 4).astype("int32") self.inputs = {'W': table, 'Ids': ids} self.outputs = {'Out': table[ids]} + def test_check_output(self): + self.check_output() -class TestLookupTableGradOp(GradientChecker): - def test_grad(self): - op = create_op('lookup_table') - table = np.random.random((17, 31)).astype('float32') - ids = np.random.randint(0, 17, 4).astype('int32') - inputs = {'W': table, 'Ids': ids} - # comapre gradients - self.compare_grad(op, inputs, set(['Ids'])) - # check gradients - self.check_grad(op, inputs, set('W'), 'Out') + def test_check_grad(self): + self.check_grad(['W'], 'Out', no_grad_set=set('Ids')) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_mean_op.py b/python/paddle/v2/framework/tests/test_mean_op.py index f32b3160d6..7823abd8f8 100644 --- a/python/paddle/v2/framework/tests/test_mean_op.py +++ b/python/paddle/v2/framework/tests/test_mean_op.py @@ -1,24 +1,20 @@ import unittest -from op_test_util import OpTestMeta -from gradient_checker import GradientChecker, create_op import numpy as np +from op_test import OpTest -class TestMeanOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestMeanOp(OpTest): def setUp(self): - self.type = "mean" - self.inputs = {'X': np.random.random((32, 784)).astype("float32")} - self.outputs = {'Out': np.mean(self.inputs['X'])} + self.op_type = "mean" + self.inputs = {'X': np.random.random((10, 10)).astype("float32")} + self.outputs = {'Out': np.mean(self.inputs["X"])} + def test_check_output(self): + self.check_output() -class MeanGradOpTest(GradientChecker): - def test_normal(self): - op = create_op("mean") - inputs = {"X": np.random.random((10, 10)).astype("float32")} - self.check_grad(op, inputs, set("X"), "Out") + def test_checkout_grad(self): + self.check_grad(['X'], 'Out') -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_minus_op.py b/python/paddle/v2/framework/tests/test_minus_op.py index 5abdd4a69b..dea797a1fe 100644 --- a/python/paddle/v2/framework/tests/test_minus_op.py +++ b/python/paddle/v2/framework/tests/test_minus_op.py @@ -1,30 +1,23 @@ import unittest import numpy as np -from gradient_checker import GradientChecker, create_op -from op_test_util import OpTestMeta +from op_test import OpTest -class MinusOpTest(unittest.TestCase): - __metaclass__ = OpTestMeta - +class MinusOpTest(OpTest): def setUp(self): - self.type = "minus" + self.op_type = "minus" self.inputs = { 'X': np.random.random((32, 84)).astype("float32"), 'Y': np.random.random((32, 84)).astype("float32") } self.outputs = {'Out': (self.inputs['X'] - self.inputs['Y'])} + def test_check_output(self): + self.check_output() -class MinusGradTest(GradientChecker): - def test_left(self): - op = create_op("minus") - inputs = { - "X": np.random.random((10, 10)).astype("float32"), - "Y": np.random.random((10, 10)).astype("float32") - } - self.check_grad(op, inputs, ["X", 'Y'], "Out") + def test_check_grad(self): + self.check_grad(['X', 'Y'], 'Out') -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_mul_op.py b/python/paddle/v2/framework/tests/test_mul_op.py index 8c827e242e..b3d95a56b8 100644 --- a/python/paddle/v2/framework/tests/test_mul_op.py +++ b/python/paddle/v2/framework/tests/test_mul_op.py @@ -1,27 +1,35 @@ import unittest import numpy as np -from gradient_checker import GradientChecker, create_op -from op_test_util import OpTestMeta -from paddle.v2.framework.op import Operator +from op_test import OpTest -class TestMulOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestMulOp(OpTest): def setUp(self): - self.type = "mul" + self.op_type = "mul" self.inputs = { 'X': np.random.random((32, 84)).astype("float32"), 'Y': np.random.random((84, 100)).astype("float32") } self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])} + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.5) -class TestMulOp2(unittest.TestCase): - __metaclass__ = OpTestMeta + def test_check_grad_ingore_x(self): + self.check_grad( + ['Y'], 'Out', max_relative_error=0.5, no_grad_set=set("X")) + def test_check_grad_ingore_y(self): + self.check_grad( + ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y')) + + +class TestMulOp2(OpTest): def setUp(self): - self.type = "mul" + self.op_type = "mul" self.inputs = { 'X': np.random.random((15, 4, 12, 10)).astype("float32"), 'Y': np.random.random((4, 30, 8, 2, 9)).astype("float32") @@ -32,72 +40,20 @@ class TestMulOp2(unittest.TestCase): self.inputs['Y'].reshape(4 * 30, 8 * 2 * 9)) } + def test_check_output(self): + self.check_output() -class TestMulGradOp(GradientChecker): - def setUp(self): - self.op = create_op("mul") - self.inputs = { - 'X': np.random.random((32, 84)).astype("float32"), - 'Y': np.random.random((84, 100)).astype("float32") - } - - def test_cpu_gpu_compare(self): - self.compare_grad(self.op, self.inputs) - - def test_normal(self): - # mul op will enlarge the relative error - self.check_grad( - self.op, self.inputs, ["X", "Y"], "Out", max_relative_error=0.5) - - def test_ignore_x(self): - self.check_grad( - self.op, - self.inputs, ["Y"], - "Out", - max_relative_error=0.5, - no_grad_set={"X"}) - - def test_ignore_y(self): - self.check_grad( - self.op, - self.inputs, ["X"], - "Out", - max_relative_error=0.5, - no_grad_set={"Y"}) - - -class TestMulGradTest2(GradientChecker): - def setUp(self): - self.op = Operator( - "mul", X="X", Y="Y", Out="Out", x_num_col_dims=2, y_num_col_dims=2) - self.inputs = { - "X": np.random.random((15, 4, 12, 10)).astype("float32"), - "Y": np.random.random((4, 30, 8, 2, 9)).astype("float32") - } - - def test_cpu_gpu_compare(self): - self.compare_grad(self.op, self.inputs) - - def test_normal(self): - self.check_grad( - self.op, self.inputs, ["X", "Y"], "Out", max_relative_error=0.5) + def test_check_grad_normal(self): + self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.5) - def test_ignore_x(self): + def test_check_grad_ingore_x(self): self.check_grad( - self.op, - self.inputs, ["Y"], - "Out", - max_relative_error=0.5, - no_grad_set={"X"}) + ['Y'], 'Out', max_relative_error=0.5, no_grad_set=set('X')) - def test_ignore_y(self): + def test_check_grad_ignore_y(self): self.check_grad( - self.op, - self.inputs, ["X"], - "Out", - max_relative_error=0.5, - no_grad_set={"Y"}) + ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y')) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_net.py b/python/paddle/v2/framework/tests/test_net.py index e4b7cd480c..50cfb855f2 100644 --- a/python/paddle/v2/framework/tests/test_net.py +++ b/python/paddle/v2/framework/tests/test_net.py @@ -35,5 +35,5 @@ Op(plain_net), inputs:{all[W, X, Y]}, outputs:{all[Out, fc.out, pre_activation]} self.assertEqual(expected, "\n" + str(net)) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_rowwise_add_op.py b/python/paddle/v2/framework/tests/test_rowwise_add_op.py index 8378c1cd21..336645bd99 100644 --- a/python/paddle/v2/framework/tests/test_rowwise_add_op.py +++ b/python/paddle/v2/framework/tests/test_rowwise_add_op.py @@ -1,68 +1,51 @@ import unittest import numpy as np -from op_test_util import OpTestMeta -from gradient_checker import GradientChecker, create_op +from op_test import OpTest -class TestRowwiseAddOp(unittest.TestCase): - __metaclass__ = OpTestMeta - - def setUp(self): - self.type = "rowwise_add" - self.inputs = { - 'X': np.random.random((32, 84)).astype("float32"), - 'b': np.random.random(84).astype("float32") - } - self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['b'])} - - -class TestRowwiseAddOp2(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestRowwiseAddOp(OpTest): def setUp(self): - self.type = "rowwise_add" + self.op_type = "rowwise_add" self.inputs = { - 'X': np.random.random((13, 6, 7, 8)).astype("float32"), - 'b': np.random.random((7, 8)).astype("float32") + 'X': np.random.uniform(0.1, 1, [5, 10]).astype("float32"), + 'b': np.random.uniform(0.1, 1, [10]).astype("float32") } self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['b'])} + def test_check_output(self): + self.check_output() -class TestRowwiseAddGradOp(GradientChecker): - def setUp(self): - self.op = create_op("rowwise_add") - self.inputs = { - "X": np.random.uniform(0.1, 1, [5, 10]).astype("float32"), - "b": np.random.uniform(0.1, 1, [10]).astype("float32") - } + def test_check_grad_normal(self): + self.check_grad(['X', 'b'], 'Out') - def test_normal(self): - self.check_grad(self.op, self.inputs, ["X", "b"], "Out") + def test_check_grad_ingore_b(self): + self.check_grad(['X'], 'Out', no_grad_set=set('b')) - def test_ignore_b(self): - self.check_grad(self.op, self.inputs, ["X"], "Out", no_grad_set={"b"}) + def test_check_grad_ingore_x(self): + self.check_grad(['b'], 'Out', no_grad_set=set('X')) - def test_ignore_x(self): - self.check_grad(self.op, self.inputs, ["b"], "Out", no_grad_set={"X"}) - -class TestRowwiseAddGradOp2(GradientChecker): +class TestRowwiseAddOp2(OpTest): def setUp(self): - self.op = create_op("rowwise_add") + self.op_type = "rowwise_add" self.inputs = { - "X": np.random.uniform(0.1, 1, [2, 3, 2, 5]).astype("float32"), - "b": np.random.uniform(0.1, 1, [2, 5]).astype("float32") + 'X': np.random.uniform(0.1, 1, [2, 3, 2, 5]).astype("float32"), + 'b': np.random.uniform(0.1, 1, [2, 5]).astype("float32") } + self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['b'])} + + def test_check_output(self): + self.check_output() - def test_normal(self): - self.check_grad(self.op, self.inputs, ["X", "b"], "Out") + def test_check_grad_normal(self): + self.check_grad(['X', 'b'], 'Out') - def test_ignore_b(self): - self.check_grad(self.op, self.inputs, ["X"], "Out", no_grad_set={"b"}) + def test_check_grad_ignore_b(self): + self.check_grad(['X'], 'Out', no_grad_set=set('b')) - def test_ignore_x(self): - self.check_grad(self.op, self.inputs, ["b"], "Out", no_grad_set={"X"}) + def test_check_grad_ignore_x(self): + self.check_grad(['b'], 'Out', no_grad_set=set('X')) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_scale_and_identity_op.py b/python/paddle/v2/framework/tests/test_scale_and_identity_op.py index 69b301c376..05d76d4282 100644 --- a/python/paddle/v2/framework/tests/test_scale_and_identity_op.py +++ b/python/paddle/v2/framework/tests/test_scale_and_identity_op.py @@ -1,43 +1,34 @@ import unittest -from op_test_util import OpTestMeta -from gradient_checker import GradientChecker, create_op import numpy as np -from paddle.v2.framework.op import Operator +from op_test import OpTest -class IdentityTest(unittest.TestCase): - __metaclass__ = OpTestMeta - +class IdentityTest(OpTest): def setUp(self): - self.type = "identity" - self.inputs = {'X': np.random.random((32, 784)).astype("float32")} + self.op_type = "identity" + self.inputs = {'X': np.random.random((10, 10)).astype("float32")} self.outputs = {'Out': self.inputs['X']} + def test_check_output(self): + self.check_output() -class IdentityGradOpTest(GradientChecker): - def test_normal(self): - op = create_op("identity") - inputs = {"X": np.random.random((10, 10)).astype("float32")} - self.check_grad(op, inputs, set("X"), "Out") - + def test_check_grad(self): + self.check_grad(['X'], 'Out') -class ScaleTest(unittest.TestCase): - __metaclass__ = OpTestMeta +class ScaleTest(OpTest): def setUp(self): - self.type = "scale" - self.inputs = {'X': np.random.random((32, 784)).astype("float32")} + self.op_type = "scale" + self.inputs = {'X': np.random.random((10, 10)).astype("float32")} self.attrs = {'scale': -2.3} self.outputs = {'Out': self.inputs['X'] * self.attrs['scale']} + def test_check_output(self): + self.check_output() -class ScaleGradTest(GradientChecker): - def test_normal(self): - op = Operator("scale", X="X", Out="Out", scale=3.2) - self.check_grad(op, - {"X": np.random.random((10, 10)).astype("float32")}, - set("X"), "Out") + def test_check_grad(self): + self.check_grad(['X'], 'Out') -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_scatter_op.py b/python/paddle/v2/framework/tests/test_scatter_op.py index c1f9444889..33c73c5263 100644 --- a/python/paddle/v2/framework/tests/test_scatter_op.py +++ b/python/paddle/v2/framework/tests/test_scatter_op.py @@ -1,37 +1,24 @@ import unittest -from op_test_util import OpTestMeta -from gradient_checker import GradientChecker, create_op -import numpy -import paddle.v2.framework.core as core -from paddle.v2.framework.op import Operator +import numpy as np +from op_test import OpTest -class TestScatterOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestScatterOp(OpTest): def setUp(self): - self.type = "scatter" - ref_np = numpy.ones((3, 3)).astype("float32") - index_np = numpy.array([1, 2]).astype("int32") - updates_np = numpy.random.random((2, 3)).astype("float32") - output_np = numpy.copy(ref_np) + self.op_type = "scatter" + ref_np = np.ones((3, 3)).astype("float32") + index_np = np.array([1, 2]).astype("int32") + updates_np = np.random.random((2, 3)).astype("float32") + output_np = np.copy(ref_np) output_np[index_np] += updates_np self.inputs = {'Ref': ref_np, 'Index': index_np, 'Updates': updates_np} self.outputs = {'Out': output_np} + def test_check_output(self): + self.check_output() -class TestScatterGradOp(GradientChecker): - def test_scatter_grad(self): - op = create_op("scatter") - # test data setup - ref_np = numpy.ones((3, 10)).astype("float32") - index_np = numpy.array([1, 2]).astype("int32") - updates_np = numpy.random.random((2, 10)).astype("float32") - output_np = numpy.copy(ref_np) - output_np[index_np] += updates_np - inputs = {'Ref': ref_np, 'Index': index_np, 'Updates': updates_np} - self.check_grad( - op, inputs, set(["Updates", "Ref"]), "Out", in_place=True) + def test_check_grad(self): + self.check_grad(['Updates', 'Ref'], 'Out', in_place=True) if __name__ == "__main__": diff --git a/python/paddle/v2/framework/tests/test_sgd_op.py b/python/paddle/v2/framework/tests/test_sgd_op.py index e5f9ef865e..557cf15ace 100644 --- a/python/paddle/v2/framework/tests/test_sgd_op.py +++ b/python/paddle/v2/framework/tests/test_sgd_op.py @@ -1,21 +1,22 @@ import unittest -import numpy -from op_test_util import OpTestMeta +import numpy as np +from op_test import OpTest -class TestSGD(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestSGD(OpTest): def setUp(self): - self.type = "sgd" - w = numpy.random.random((102, 105)).astype("float32") - g = numpy.random.random((102, 105)).astype("float32") + self.op_type = "sgd" + w = np.random.random((102, 105)).astype("float32") + g = np.random.random((102, 105)).astype("float32") lr = 0.1 self.inputs = {'param': w, 'grad': g} self.attrs = {'learning_rate': lr} self.outputs = {'param_out': w - lr * g} + def test_check_output(self): + self.check_output() + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_softmax_op.py b/python/paddle/v2/framework/tests/test_softmax_op.py index 0d590fa706..1c5802dfd5 100644 --- a/python/paddle/v2/framework/tests/test_softmax_op.py +++ b/python/paddle/v2/framework/tests/test_softmax_op.py @@ -1,9 +1,6 @@ import unittest - import numpy as np - -from gradient_checker import GradientChecker, create_op -from op_test_util import OpTestMeta +from op_test import OpTest def stable_softmax(x): @@ -13,26 +10,21 @@ def stable_softmax(x): return exps / np.sum(exps) -class TestSoftmaxOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestSoftmaxOp(OpTest): def setUp(self): self.type = "softmax" - self.inputs = {"X": np.random.random((10, 10)).astype("float32")} + self.inputs = { + 'X': np.random.uniform(0.1, 1, [10, 10]).astype("float32") + } self.outputs = { - "Y": np.apply_along_axis(stable_softmax, 1, self.inputs["X"]) + 'Y': np.apply_along_axis(stable_softmax, 1, self.inputs['X']) } + def test_check_output(self): + self.check_output() -class TestSoftmaxGradOp(GradientChecker): - def setUp(self): - self.op = create_op("softmax") - self.inputs = { - "X": np.random.uniform(0.1, 1, [10, 10]).astype("float32") - } - - def test_softmax_grad(self): - self.check_grad(self.op, self.inputs, ["X"], "Y") + def test_check_grad(self): + self.check_grad(['X'], 'Y') if __name__ == "__main__": diff --git a/python/paddle/v2/framework/tests/test_squared_l2_distance_op.py b/python/paddle/v2/framework/tests/test_squared_l2_distance_op.py index 2bcdf37df4..dc6ebf5d30 100644 --- a/python/paddle/v2/framework/tests/test_squared_l2_distance_op.py +++ b/python/paddle/v2/framework/tests/test_squared_l2_distance_op.py @@ -1,17 +1,14 @@ import unittest -from op_test_util import OpTestMeta -from gradient_checker import GradientChecker, create_op import numpy as np +from op_test import OpTest -class TestSquaredL2DistanceOp_f0(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestSquaredL2DistanceOp_f0(OpTest): def setUp(self): - self.type = 'squared_l2_distance' + self.op_type = "squared_l2_distance" self.inputs = { - 'X': np.random.uniform(0.1, 1., (32, 64)).astype('float32'), - 'Y': np.random.uniform(0.1, 1., (32, 64)).astype('float32') + 'X': np.random.uniform(0.1, 0.6, (2, 3)).astype("float32"), + 'Y': np.random.uniform(0.1, 0.6, (2, 3)).astype("float32") } sub_res = self.inputs['X'] - self.inputs['Y'] output = sub_res * sub_res @@ -20,15 +17,19 @@ class TestSquaredL2DistanceOp_f0(unittest.TestCase): 'Out': np.expand_dims(output.sum(1), 1) } + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X', 'Y'], 'Out') -class TestSquaredL2DistanceOp_f1(unittest.TestCase): - __metaclass__ = OpTestMeta +class TestSquaredL2DistanceOp_f1(OpTest): def setUp(self): - self.type = 'squared_l2_distance' + self.op_type = "squared_l2_distance" self.inputs = { - 'X': np.random.uniform(0.1, 1., (32, 64)).astype('float32'), - 'Y': np.random.uniform(0.1, 1., (1, 64)).astype('float32') + 'X': np.random.uniform(0.1, 0.6, (2, 3)).astype("float32"), + 'Y': np.random.uniform(0.1, 0.6, (1, 3)).astype("float32") } sub_res = self.inputs['X'] - self.inputs['Y'] output = sub_res * sub_res @@ -37,53 +38,34 @@ class TestSquaredL2DistanceOp_f1(unittest.TestCase): 'Out': np.expand_dims(output.sum(1), 1) } + def test_check_output(self): + self.check_output() -class TestSquaredL2DistanceOp_f2(unittest.TestCase): - __metaclass__ = OpTestMeta + def test_check_grad(self): + self.check_grad(['X', 'Y'], 'Out') + +class TestSquaredL2DistanceOp_f2(OpTest): def setUp(self): - self.type = 'squared_l2_distance' + self.op_type = "squared_l2_distance" self.inputs = { - 'X': np.random.uniform(0.1, 1., (32, 64, 128)).astype('float32'), - 'Y': np.random.uniform(0.1, 1., (1, 64, 128)).astype('float32') + 'X': np.random.uniform(0.1, 0.6, (2, 3, 4)).astype("float32"), + 'Y': np.random.uniform(0.1, 0.6, (1, 3, 4)).astype("float32") } sub_res = self.inputs['X'] - self.inputs['Y'] - sub_res = sub_res.reshape((32, 64 * 128)) + sub_res = sub_res.reshape((2, 3 * 4)) output = sub_res * sub_res self.outputs = { 'sub_result': sub_res, 'Out': np.expand_dims(output.sum(1), 1) } + def test_check_output(self): + self.check_output() -class TestSquaredL2DistanceGradOp(GradientChecker): - def test_squared_l2_distance_b0(self): - op = create_op("squared_l2_distance") - inputs = { - 'X': np.random.uniform(0.1, .6, (2, 3)).astype('float32'), - 'Y': np.random.uniform(0.1, .6, (2, 3)).astype('float32') - } - self.compare_grad(op, inputs) - self.check_grad(op, inputs, set(["X", "Y"]), "Out") - - def test_squared_l2_distance_b1(self): - op = create_op("squared_l2_distance") - inputs = { - 'X': np.random.uniform(0.1, .6, (2, 3)).astype('float32'), - 'Y': np.random.uniform(0.1, .6, (1, 3)).astype('float32') - } - self.compare_grad(op, inputs) - self.check_grad(op, inputs, set(["X", "Y"]), "Out") - - def test_squared_l2_distance_b2(self): - op = create_op("squared_l2_distance") - inputs = { - 'X': np.random.uniform(0.1, .6, (2, 3, 4)).astype('float32'), - 'Y': np.random.uniform(0.1, .6, (1, 3, 4)).astype('float32') - } - self.compare_grad(op, inputs) - self.check_grad(op, inputs, set(["X", "Y"]), "Out") + def test_check_grad(self): + self.check_grad(['X', 'Y'], 'Out') -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_sum_op.py b/python/paddle/v2/framework/tests/test_sum_op.py index 66417d70e8..f8eb34c076 100644 --- a/python/paddle/v2/framework/tests/test_sum_op.py +++ b/python/paddle/v2/framework/tests/test_sum_op.py @@ -6,10 +6,10 @@ from op_test import OpTest class TestSumOp(OpTest): def setUp(self): self.op_type = "sum" - x0 = np.random.random((3, 4)).astype('float32') - x1 = np.random.random((3, 4)).astype('float32') - x2 = np.random.random((3, 4)).astype('float32') - self.inputs = {"X": {"x0": x0, "x1": x1, "x2": x2}} + x0 = np.random.random((3, 4)).astype("float32") + x1 = np.random.random((3, 4)).astype("float32") + x2 = np.random.random((3, 4)).astype("float32") + self.inputs = {'X': {'x0': x0, 'x1': x1, 'x2': x2}} y = x0 + x1 + x2 self.outputs = {'Out': y} @@ -17,8 +17,8 @@ class TestSumOp(OpTest): self.check_output() def test_check_grad(self): - self.check_grad(["x0"], "Out") + self.check_grad(['x0'], 'Out') -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_top_k_op.py b/python/paddle/v2/framework/tests/test_top_k_op.py index e841d96d26..cab799256d 100644 --- a/python/paddle/v2/framework/tests/test_top_k_op.py +++ b/python/paddle/v2/framework/tests/test_top_k_op.py @@ -1,14 +1,11 @@ import unittest import numpy as np -from gradient_checker import GradientChecker, create_op -from op_test_util import OpTestMeta +from op_test import OpTest -class TestTopkOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestTopkOp(OpTest): def setUp(self): - self.type = "top_k" + self.op_type = "top_k" k = 1 input = np.random.random((32, 84)).astype("float32") output = np.ndarray((32, k)) @@ -25,11 +22,9 @@ class TestTopkOp(unittest.TestCase): self.outputs = {'Out': output, 'Indices': indices} -class TestTopkOp3d(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestTopkOp3d(OpTest): def setUp(self): - self.type = "top_k" + self.op_type = "top_k" k = 1 input = np.random.random((32, 2, 84)).astype("float32") input_flat_2d = input.reshape(64, 84) @@ -48,5 +43,5 @@ class TestTopkOp3d(unittest.TestCase): self.outputs = {'Out': output, 'Indices': indices} -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_uniform_random_op.py b/python/paddle/v2/framework/tests/test_uniform_random_op.py index c3d2bb44da..76a5e36e56 100644 --- a/python/paddle/v2/framework/tests/test_uniform_random_op.py +++ b/python/paddle/v2/framework/tests/test_uniform_random_op.py @@ -14,11 +14,11 @@ class UniformRandomTest(unittest.TestCase): def uniform_random_test(self, place): scope = core.Scope() - scope.new_var("X").get_tensor() + scope.new_var('X').get_tensor() op = Operator( "uniform_random", - Out="X", + Out='X', dims=[1000, 784], min=-5.0, max=10.0, @@ -27,9 +27,9 @@ class UniformRandomTest(unittest.TestCase): op.infer_shape(scope) ctx = core.DeviceContext.create(place) op.run(scope, ctx) - tensor = numpy.array(scope.find_var("X").get_tensor()) + tensor = numpy.array(scope.find_var('X').get_tensor()) self.assertAlmostEqual(tensor.mean(), 2.5, delta=0.1) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() -- GitLab