提交 2d807f2b 编写于 作者: Q qijun

init refine op python tests

上级 c1696696
...@@ -19,8 +19,6 @@ py_test(test_scatter_op SRCS test_scatter_op.py) ...@@ -19,8 +19,6 @@ py_test(test_scatter_op SRCS test_scatter_op.py)
py_test(test_fill_zeros_like_op SRCS test_fill_zeros_like_op.py) py_test(test_fill_zeros_like_op SRCS test_fill_zeros_like_op.py)
py_test(test_top_k_op SRCS test_top_k_op.py) py_test(test_top_k_op SRCS test_top_k_op.py)
py_test(gradient_checker SRCS gradient_checker.py)
py_test(test_rowwise_add_op SRCS test_rowwise_add_op.py) py_test(test_rowwise_add_op SRCS test_rowwise_add_op.py)
py_test(test_default_scope_funcs SRCS test_default_scope_funcs.py) py_test(test_default_scope_funcs SRCS test_default_scope_funcs.py)
......
...@@ -9,7 +9,7 @@ def grad_var_name(var_name): ...@@ -9,7 +9,7 @@ def grad_var_name(var_name):
return var_name + "@GRAD" return var_name + "@GRAD"
def create_op(scope, op_type, inputs, outputs, attrs=None): def create_op(scope, op_type, inputs, outputs, attrs):
kwargs = dict() kwargs = dict()
for in_name, in_dup in Operator.get_op_inputs(op_type): for in_name, in_dup in Operator.get_op_inputs(op_type):
...@@ -29,14 +29,15 @@ def create_op(scope, op_type, inputs, outputs, attrs=None): ...@@ -29,14 +29,15 @@ def create_op(scope, op_type, inputs, outputs, attrs=None):
kwargs[out_name] = [] kwargs[out_name] = []
if out_dup: if out_dup:
sub_in = outputs[out_name] sub_in = outputs[out_name]
for sun_in_name in sub_in: for sub_in_name in sub_in:
var = scope.new_var(sun_in_name) var = scope.new_var(sub_in_name)
kwargs[out_name].append(sun_in_name) kwargs[out_name].append(sub_in_name)
else: else:
var = scope.new_var(out_name) var = scope.new_var(out_name)
kwargs[out_name].append(out_name) kwargs[out_name].append(out_name)
for attr_name in Operator.get_op_attr_names(op_type): for attr_name in Operator.get_op_attr_names(op_type):
if attr_name in attrs:
kwargs[attr_name] = attrs[attr_name] kwargs[attr_name] = attrs[attr_name]
return Operator(op_type, **kwargs) return Operator(op_type, **kwargs)
...@@ -89,6 +90,7 @@ def get_numeric_gradient(scope, ...@@ -89,6 +90,7 @@ def get_numeric_gradient(scope,
delta=0.005, delta=0.005,
in_place=False): in_place=False):
print "before set input"
set_input(scope, op, inputs, core.CPUPlace()) set_input(scope, op, inputs, core.CPUPlace())
op.infer_shape(scope) op.infer_shape(scope)
...@@ -110,7 +112,7 @@ def get_numeric_gradient(scope, ...@@ -110,7 +112,7 @@ def get_numeric_gradient(scope,
# we use a for loop to compute the gradient of every element. # we use a for loop to compute the gradient of every element.
for i in xrange(tensor_size): for i in xrange(tensor_size):
if in_place: if in_place:
set_input(op, inputs, core.CPUPlace()) set_input(scope, op, inputs, core.CPUPlace())
# get one input element throw it's index i. # get one input element throw it's index i.
origin = tensor_to_check.get_float_element(i) origin = tensor_to_check.get_float_element(i)
...@@ -120,7 +122,7 @@ def get_numeric_gradient(scope, ...@@ -120,7 +122,7 @@ def get_numeric_gradient(scope,
y_pos = get_output() y_pos = get_output()
if in_place: if in_place:
set_input(op, inputs, core.CPUPlace()) set_input(scope, op, inputs, core.CPUPlace())
x_neg = origin - delta x_neg = origin - delta
tensor_to_check.set_float_element(i, x_neg) tensor_to_check.set_float_element(i, x_neg)
...@@ -168,7 +170,11 @@ def get_gradient(scope, op, inputs, outputs, grad_name, place, ...@@ -168,7 +170,11 @@ def get_gradient(scope, op, inputs, outputs, grad_name, place,
class OpTest(unittest.TestCase): class OpTest(unittest.TestCase):
def check_output_with_place(self, place): def check_output_with_place(self, place):
self.scope = core.Scope() self.scope = core.Scope()
self.op = create_op(self.scope, self.op_type, self.inputs, self.outputs) op_inputs = self.inputs if hasattr(self, "inputs") else dict()
op_outputs = self.outputs if hasattr(self, "outputs") else dict()
op_attrs = self.attrs if hasattr(self, "attrs") else dict()
self.op = create_op(self.scope, self.op_type, op_inputs, op_outputs,
op_attrs)
if isinstance(place, core.GPUPlace) and not self.op.support_gpu(): if isinstance(place, core.GPUPlace) and not self.op.support_gpu():
return return
set_input(self.scope, self.op, self.inputs, place) set_input(self.scope, self.op, self.inputs, place)
...@@ -227,7 +233,11 @@ class OpTest(unittest.TestCase): ...@@ -227,7 +233,11 @@ class OpTest(unittest.TestCase):
in_place=False, in_place=False,
max_relative_error=0.005): max_relative_error=0.005):
self.scope = core.Scope() self.scope = core.Scope()
self.op = create_op(self.scope, self.op_type, self.inputs, self.outputs) op_inputs = self.inputs if hasattr(self, "inputs") else dict()
op_outputs = self.outputs if hasattr(self, "outputs") else dict()
op_attrs = self.attrs if hasattr(self, "attrs") else dict()
self.op = create_op(self.scope, self.op_type, op_inputs, op_outputs,
op_attrs)
if no_grad_set is None: if no_grad_set is None:
no_grad_set = set() no_grad_set = set()
......
import unittest import unittest
import numpy as np
from op_test import OpTest
import numpy
import paddle.v2.framework.core as core
from paddle.v2.framework.op import Operator
from op_test_util import OpTestMeta
class TestAddOp(unittest.TestCase):
__metaclass__ = OpTestMeta
class TestAddOp(OpTest):
def setUp(self): def setUp(self):
self.type = "add" self.op_type = "add"
self.inputs = { self.inputs = {
'X': numpy.random.random((102, 105)).astype("float32"), 'X': np.random.random((102, 105)).astype("float32"),
'Y': numpy.random.random((102, 105)).astype("float32") 'Y': np.random.random((102, 105)).astype("float32")
} }
self.outputs = {'Out': self.inputs['X'] + self.inputs['Y']} self.outputs = {'Out': self.inputs['X'] + self.inputs['Y']}
def test_check_output(self):
self.check_output()
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
import unittest import unittest
import numpy as np import numpy as np
from gradient_checker import GradientChecker, create_op from op_test import OpTest
from op_test_util import OpTestMeta
class TestCosSimOp(unittest.TestCase): class TestCosSimOp(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "cos_sim" self.op_type = "cos_sim"
self.inputs = { self.inputs = {
'X': np.random.random((32, 64)).astype("float32"), 'X': np.random.random((10, 5)).astype("float32"),
'Y': np.random.random((32, 64)).astype("float32") 'Y': np.random.random((10, 5)).astype("float32")
} }
expect_x_norm = np.linalg.norm(self.inputs['X'], axis=1) expect_x_norm = np.linalg.norm(self.inputs['X'], axis=1)
expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=1) expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=1)
...@@ -23,38 +20,20 @@ class TestCosSimOp(unittest.TestCase): ...@@ -23,38 +20,20 @@ class TestCosSimOp(unittest.TestCase):
'Out': np.expand_dims(expect_out, 1) 'Out': np.expand_dims(expect_out, 1)
} }
def test_check_output(self):
self.check_output()
class TestCosSimGradOp(GradientChecker): def test_check_grad_normal(self):
def setUp(self): self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.05)
self.op = create_op("cos_sim")
self.inputs = {
'X': np.random.random((10, 5)).astype("float32"),
'Y': np.random.random((10, 5)).astype("float32")
}
def test_cpu_gpu_compare(self):
self.compare_grad(self.op, self.inputs)
def test_normal(self):
self.check_grad(
self.op, self.inputs, ["X", "Y"], "Out", max_relative_error=0.05)
def test_ignore_x(self): def test_check_grad_ingore_x(self):
self.check_grad( self.check_grad(
self.op, ['Y'], 'Out', max_relative_error=0.05, no_grad_set=set('X'))
self.inputs, ["Y"],
"Out",
max_relative_error=0.05,
no_grad_set={"X"})
def test_ignore_y(self): def test_check_grad_ignore_y(self):
self.check_grad( self.check_grad(
self.op, ['X'], 'Out', max_relative_error=0.05, no_grad_set=set('Y'))
self.inputs, ["X"],
"Out",
max_relative_error=0.05,
no_grad_set={"Y"})
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -21,7 +21,7 @@ class TestCrossEntropy(OpTest): ...@@ -21,7 +21,7 @@ class TestCrossEntropy(OpTest):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(["X"], "Y") self.check_grad(['X'], 'Y')
if __name__ == "__main__": if __name__ == "__main__":
......
import unittest import unittest
from op_test_util import OpTestMeta import numpy as np
import numpy from op_test import OpTest
class TestFillZerosLikeOp(unittest.TestCase): class TestFillZerosLikeOp(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "fill_zeros_like" self.op_type = "fill_zeros_like"
self.inputs = {'Src': numpy.random.random((219, 232)).astype("float32")} self.inputs = {'Src': np.random.random((219, 232)).astype("float32")}
self.outputs = {'Dst': numpy.zeros_like(self.inputs['Src'])} self.outputs = {'Dst': np.zeros_like(self.inputs["Src"])}
def test_check_output(self):
self.check_output()
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
import unittest import unittest
from op_test_util import OpTestMeta import numpy as np
from gradient_checker import GradientChecker, create_op from op_test import OpTest
import numpy
import paddle.v2.framework.core as core
from paddle.v2.framework.op import Operator
class TestGatherOp(unittest.TestCase): class TestGatherOp(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "gather" self.op_type = "gather"
xnp = numpy.random.random((10, 20)).astype("float32") xnp = np.random.random((10, 20)).astype("float32")
self.inputs = { self.inputs = {'X': xnp, 'Index': np.array([1, 3, 5]).astype("int32")}
'X': xnp, self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]}
'Index': numpy.array([1, 3, 5]).astype("int32")
}
self.outputs = {'Out': self.inputs['X'][self.inputs['Index']]}
def test_check_output(self):
self.check_output()
class TestGatherGradOp(GradientChecker): def test_check_grad(self):
def test_gather_grad(self): self.check_grad(['X'], 'Out')
op = create_op("gather")
xnp = numpy.random.random((10, 20)).astype("float32")
inputs = {'X': xnp, 'Index': numpy.array([1, 3, 5]).astype("int32")}
self.check_grad(op, inputs, set("X"), "Out")
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -14,11 +14,11 @@ class GaussianRandomTest(unittest.TestCase): ...@@ -14,11 +14,11 @@ class GaussianRandomTest(unittest.TestCase):
def gaussian_random_test(self, place): def gaussian_random_test(self, place):
scope = core.Scope() scope = core.Scope()
scope.new_var("Out").get_tensor() scope.new_var('Out').get_tensor()
op = Operator( op = Operator(
"gaussian_random", "gaussian_random",
Out="Out", Out='Out',
dims=[1000, 784], dims=[1000, 784],
mean=.0, mean=.0,
std=1., std=1.,
...@@ -27,10 +27,10 @@ class GaussianRandomTest(unittest.TestCase): ...@@ -27,10 +27,10 @@ class GaussianRandomTest(unittest.TestCase):
op.infer_shape(scope) op.infer_shape(scope)
context = core.DeviceContext.create(place) context = core.DeviceContext.create(place)
op.run(scope, context) op.run(scope, context)
tensor = numpy.array(scope.find_var("Out").get_tensor()) tensor = numpy.array(scope.find_var('Out').get_tensor())
self.assertAlmostEqual(numpy.mean(tensor), .0, delta=0.1) self.assertAlmostEqual(numpy.mean(tensor), .0, delta=0.1)
self.assertAlmostEqual(numpy.std(tensor), 1., delta=0.1) self.assertAlmostEqual(numpy.std(tensor), 1., delta=0.1)
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
import unittest import unittest
import numpy import numpy as np
from paddle.v2.framework.op import Operator import paddle.v2.framework.core as core
from gradient_checker import GradientChecker from op_test import get_numeric_gradient
from gradient_checker import get_numeric_gradient from op_test import create_op
class GetNumericGradientTest(unittest.TestCase): class GetNumericGradientTest(unittest.TestCase):
def test_add_op(self): def test_add_op(self):
add_op = Operator("add", X="X", Y="Y", Out="Z") x = np.random.random((10, 1)).astype("float32")
x = numpy.random.random((10, 1)).astype("float32") y = np.random.random((10, 1)).astype("float32")
y = numpy.random.random((10, 1)).astype("float32") z = x + y
scope = core.Scope()
arr = get_numeric_gradient(add_op, {"X": x, "Y": y}, "Z", "X") add_op = create_op(scope, "add", {'X': x, 'Y': y}, {'Out': z}, dict())
arr = get_numeric_gradient(scope, add_op, {'X': x, 'Y': y}, 'X', 'Out')
self.assertAlmostEqual(arr.mean(), 1.0, delta=1e-4) self.assertAlmostEqual(arr.mean(), 1.0, delta=1e-4)
def test_softmax_op(self): def test_softmax_op(self):
def stable_softmax(x): def stable_softmax(x):
"""Compute the softmax of vector x in a numerically stable way.""" """Compute the softmax of vector x in a numerically stable way."""
shiftx = x - numpy.max(x) shiftx = x - np.max(x)
exps = numpy.exp(shiftx) exps = np.exp(shiftx)
return exps / numpy.sum(exps) return exps / np.sum(exps)
def label_softmax_grad(Y, dY): def label_softmax_grad(Y, dY):
dX = Y * 0.0 dX = Y * 0.0
for i in range(Y.shape[0]): for i in range(Y.shape[0]):
d = numpy.dot(Y[i, :], dY[i, :]) d = np.dot(Y[i, :], dY[i, :])
dX[i, :] = Y[i, :] * (dY[i, :] - d) dX[i, :] = Y[i, :] * (dY[i, :] - d)
return dX return dX
softmax_op = Operator("softmax", X="X", Y="Y") X = np.random.random((2, 2)).astype("float32")
Y = np.apply_along_axis(stable_softmax, 1, X)
X = numpy.random.random((2, 2)).astype("float32") dY = np.ones(Y.shape)
Y = numpy.apply_along_axis(stable_softmax, 1, X)
dY = numpy.ones(Y.shape)
dX = label_softmax_grad(Y, dY) dX = label_softmax_grad(Y, dY)
arr = get_numeric_gradient(softmax_op, {"X": X}, "Y", "X") scope = core.Scope()
numpy.testing.assert_almost_equal(arr, dX, decimal=1e-2) softmax_op = create_op(scope, "softmax", {"X": X}, {"Y": Y}, dict())
arr = get_numeric_gradient(scope, softmax_op, {"X": X}, "X", "Y")
np.testing.assert_almost_equal(arr, dX, decimal=1e-2)
if __name__ == "__main__": if __name__ == "__main__":
......
import unittest import unittest
import numpy as np import numpy as np
from op_test_util import OpTestMeta from op_test import OpTest
from gradient_checker import GradientChecker, create_op
class TestLookupTableOp(unittest.TestCase): class TestLookupTableOp(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = 'lookup_table' self.op_type = "lookup_table"
table = np.random.random((17, 31)).astype('float32') table = np.random.random((17, 31)).astype("float32")
ids = np.random.randint(0, 17, 4).astype('int32') ids = np.random.randint(0, 17, 4).astype("int32")
self.inputs = {'W': table, 'Ids': ids} self.inputs = {'W': table, 'Ids': ids}
self.outputs = {'Out': table[ids]} self.outputs = {'Out': table[ids]}
def test_check_output(self):
self.check_output()
class TestLookupTableGradOp(GradientChecker): def test_check_grad(self):
def test_grad(self): self.check_grad(['W'], 'Out', no_grad_set=set('Ids'))
op = create_op('lookup_table')
table = np.random.random((17, 31)).astype('float32')
ids = np.random.randint(0, 17, 4).astype('int32')
inputs = {'W': table, 'Ids': ids}
# comapre gradients
self.compare_grad(op, inputs, set(['Ids']))
# check gradients
self.check_grad(op, inputs, set('W'), 'Out')
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
import unittest import unittest
from op_test_util import OpTestMeta
from gradient_checker import GradientChecker, create_op
import numpy as np import numpy as np
from op_test import OpTest
class TestMeanOp(unittest.TestCase): class TestMeanOp(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "mean" self.op_type = "mean"
self.inputs = {'X': np.random.random((32, 784)).astype("float32")} self.inputs = {'X': np.random.random((10, 10)).astype("float32")}
self.outputs = {'Out': np.mean(self.inputs['X'])} self.outputs = {'Out': np.mean(self.inputs["X"])}
def test_check_output(self):
self.check_output()
class MeanGradOpTest(GradientChecker): def test_checkout_grad(self):
def test_normal(self): self.check_grad(['X'], 'Out')
op = create_op("mean")
inputs = {"X": np.random.random((10, 10)).astype("float32")}
self.check_grad(op, inputs, set("X"), "Out")
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
import unittest import unittest
import numpy as np import numpy as np
from gradient_checker import GradientChecker, create_op from op_test import OpTest
from op_test_util import OpTestMeta
class MinusOpTest(unittest.TestCase): class MinusOpTest(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "minus" self.op_type = "minus"
self.inputs = { self.inputs = {
'X': np.random.random((32, 84)).astype("float32"), 'X': np.random.random((32, 84)).astype("float32"),
'Y': np.random.random((32, 84)).astype("float32") 'Y': np.random.random((32, 84)).astype("float32")
} }
self.outputs = {'Out': (self.inputs['X'] - self.inputs['Y'])} self.outputs = {'Out': (self.inputs['X'] - self.inputs['Y'])}
def test_check_output(self):
self.check_output()
class MinusGradTest(GradientChecker): def test_check_grad(self):
def test_left(self): self.check_grad(['X', 'Y'], 'Out')
op = create_op("minus")
inputs = {
"X": np.random.random((10, 10)).astype("float32"),
"Y": np.random.random((10, 10)).astype("float32")
}
self.check_grad(op, inputs, ["X", 'Y'], "Out")
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
import unittest import unittest
import numpy as np import numpy as np
from gradient_checker import GradientChecker, create_op from op_test import OpTest
from op_test_util import OpTestMeta
from paddle.v2.framework.op import Operator
class TestMulOp(unittest.TestCase): class TestMulOp(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "mul" self.op_type = "mul"
self.inputs = { self.inputs = {
'X': np.random.random((32, 84)).astype("float32"), 'X': np.random.random((32, 84)).astype("float32"),
'Y': np.random.random((84, 100)).astype("float32") 'Y': np.random.random((84, 100)).astype("float32")
} }
self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])} self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])}
def test_check_output(self):
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.5)
class TestMulOp2(unittest.TestCase): def test_check_grad_ingore_x(self):
__metaclass__ = OpTestMeta self.check_grad(
['Y'], 'Out', max_relative_error=0.5, no_grad_set=set("X"))
def test_check_grad_ingore_y(self):
self.check_grad(
['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y'))
class TestMulOp2(OpTest):
def setUp(self): def setUp(self):
self.type = "mul" self.op_type = "mul"
self.inputs = { self.inputs = {
'X': np.random.random((15, 4, 12, 10)).astype("float32"), 'X': np.random.random((15, 4, 12, 10)).astype("float32"),
'Y': np.random.random((4, 30, 8, 2, 9)).astype("float32") 'Y': np.random.random((4, 30, 8, 2, 9)).astype("float32")
...@@ -32,72 +40,20 @@ class TestMulOp2(unittest.TestCase): ...@@ -32,72 +40,20 @@ class TestMulOp2(unittest.TestCase):
self.inputs['Y'].reshape(4 * 30, 8 * 2 * 9)) self.inputs['Y'].reshape(4 * 30, 8 * 2 * 9))
} }
def test_check_output(self):
self.check_output()
class TestMulGradOp(GradientChecker): def test_check_grad_normal(self):
def setUp(self): self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.5)
self.op = create_op("mul")
self.inputs = {
'X': np.random.random((32, 84)).astype("float32"),
'Y': np.random.random((84, 100)).astype("float32")
}
def test_cpu_gpu_compare(self):
self.compare_grad(self.op, self.inputs)
def test_normal(self):
# mul op will enlarge the relative error
self.check_grad(
self.op, self.inputs, ["X", "Y"], "Out", max_relative_error=0.5)
def test_ignore_x(self):
self.check_grad(
self.op,
self.inputs, ["Y"],
"Out",
max_relative_error=0.5,
no_grad_set={"X"})
def test_ignore_y(self):
self.check_grad(
self.op,
self.inputs, ["X"],
"Out",
max_relative_error=0.5,
no_grad_set={"Y"})
class TestMulGradTest2(GradientChecker):
def setUp(self):
self.op = Operator(
"mul", X="X", Y="Y", Out="Out", x_num_col_dims=2, y_num_col_dims=2)
self.inputs = {
"X": np.random.random((15, 4, 12, 10)).astype("float32"),
"Y": np.random.random((4, 30, 8, 2, 9)).astype("float32")
}
def test_cpu_gpu_compare(self):
self.compare_grad(self.op, self.inputs)
def test_normal(self):
self.check_grad(
self.op, self.inputs, ["X", "Y"], "Out", max_relative_error=0.5)
def test_ignore_x(self): def test_check_grad_ingore_x(self):
self.check_grad( self.check_grad(
self.op, ['Y'], 'Out', max_relative_error=0.5, no_grad_set=set('X'))
self.inputs, ["Y"],
"Out",
max_relative_error=0.5,
no_grad_set={"X"})
def test_ignore_y(self): def test_check_grad_ignore_y(self):
self.check_grad( self.check_grad(
self.op, ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y'))
self.inputs, ["X"],
"Out",
max_relative_error=0.5,
no_grad_set={"Y"})
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -35,5 +35,5 @@ Op(plain_net), inputs:{all[W, X, Y]}, outputs:{all[Out, fc.out, pre_activation]} ...@@ -35,5 +35,5 @@ Op(plain_net), inputs:{all[W, X, Y]}, outputs:{all[Out, fc.out, pre_activation]}
self.assertEqual(expected, "\n" + str(net)) self.assertEqual(expected, "\n" + str(net))
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
import unittest import unittest
import numpy as np import numpy as np
from op_test_util import OpTestMeta from op_test import OpTest
from gradient_checker import GradientChecker, create_op
class TestRowwiseAddOp(unittest.TestCase): class TestRowwiseAddOp(OpTest):
__metaclass__ = OpTestMeta
def setUp(self):
self.type = "rowwise_add"
self.inputs = {
'X': np.random.random((32, 84)).astype("float32"),
'b': np.random.random(84).astype("float32")
}
self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['b'])}
class TestRowwiseAddOp2(unittest.TestCase):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "rowwise_add" self.op_type = "rowwise_add"
self.inputs = { self.inputs = {
'X': np.random.random((13, 6, 7, 8)).astype("float32"), 'X': np.random.uniform(0.1, 1, [5, 10]).astype("float32"),
'b': np.random.random((7, 8)).astype("float32") 'b': np.random.uniform(0.1, 1, [10]).astype("float32")
} }
self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['b'])} self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['b'])}
def test_check_output(self):
self.check_output()
class TestRowwiseAddGradOp(GradientChecker): def test_check_grad_normal(self):
def setUp(self): self.check_grad(['X', 'b'], 'Out')
self.op = create_op("rowwise_add")
self.inputs = {
"X": np.random.uniform(0.1, 1, [5, 10]).astype("float32"),
"b": np.random.uniform(0.1, 1, [10]).astype("float32")
}
def test_normal(self): def test_check_grad_ingore_b(self):
self.check_grad(self.op, self.inputs, ["X", "b"], "Out") self.check_grad(['X'], 'Out', no_grad_set=set('b'))
def test_ignore_b(self): def test_check_grad_ingore_x(self):
self.check_grad(self.op, self.inputs, ["X"], "Out", no_grad_set={"b"}) self.check_grad(['b'], 'Out', no_grad_set=set('X'))
def test_ignore_x(self):
self.check_grad(self.op, self.inputs, ["b"], "Out", no_grad_set={"X"})
class TestRowwiseAddOp2(OpTest):
class TestRowwiseAddGradOp2(GradientChecker):
def setUp(self): def setUp(self):
self.op = create_op("rowwise_add") self.op_type = "rowwise_add"
self.inputs = { self.inputs = {
"X": np.random.uniform(0.1, 1, [2, 3, 2, 5]).astype("float32"), 'X': np.random.uniform(0.1, 1, [2, 3, 2, 5]).astype("float32"),
"b": np.random.uniform(0.1, 1, [2, 5]).astype("float32") 'b': np.random.uniform(0.1, 1, [2, 5]).astype("float32")
} }
self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['b'])}
def test_check_output(self):
self.check_output()
def test_normal(self): def test_check_grad_normal(self):
self.check_grad(self.op, self.inputs, ["X", "b"], "Out") self.check_grad(['X', 'b'], 'Out')
def test_ignore_b(self): def test_check_grad_ignore_b(self):
self.check_grad(self.op, self.inputs, ["X"], "Out", no_grad_set={"b"}) self.check_grad(['X'], 'Out', no_grad_set=set('b'))
def test_ignore_x(self): def test_check_grad_ignore_x(self):
self.check_grad(self.op, self.inputs, ["b"], "Out", no_grad_set={"X"}) self.check_grad(['b'], 'Out', no_grad_set=set('X'))
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
import unittest import unittest
from op_test_util import OpTestMeta
from gradient_checker import GradientChecker, create_op
import numpy as np import numpy as np
from paddle.v2.framework.op import Operator from op_test import OpTest
class IdentityTest(unittest.TestCase): class IdentityTest(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "identity" self.op_type = "identity"
self.inputs = {'X': np.random.random((32, 784)).astype("float32")} self.inputs = {'X': np.random.random((10, 10)).astype("float32")}
self.outputs = {'Out': self.inputs['X']} self.outputs = {'Out': self.inputs['X']}
def test_check_output(self):
self.check_output()
class IdentityGradOpTest(GradientChecker): def test_check_grad(self):
def test_normal(self): self.check_grad(['X'], 'Out')
op = create_op("identity")
inputs = {"X": np.random.random((10, 10)).astype("float32")}
self.check_grad(op, inputs, set("X"), "Out")
class ScaleTest(unittest.TestCase):
__metaclass__ = OpTestMeta
class ScaleTest(OpTest):
def setUp(self): def setUp(self):
self.type = "scale" self.op_type = "scale"
self.inputs = {'X': np.random.random((32, 784)).astype("float32")} self.inputs = {'X': np.random.random((10, 10)).astype("float32")}
self.attrs = {'scale': -2.3} self.attrs = {'scale': -2.3}
self.outputs = {'Out': self.inputs['X'] * self.attrs['scale']} self.outputs = {'Out': self.inputs['X'] * self.attrs['scale']}
def test_check_output(self):
self.check_output()
class ScaleGradTest(GradientChecker): def test_check_grad(self):
def test_normal(self): self.check_grad(['X'], 'Out')
op = Operator("scale", X="X", Out="Out", scale=3.2)
self.check_grad(op,
{"X": np.random.random((10, 10)).astype("float32")},
set("X"), "Out")
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
import unittest import unittest
from op_test_util import OpTestMeta import numpy as np
from gradient_checker import GradientChecker, create_op from op_test import OpTest
import numpy
import paddle.v2.framework.core as core
from paddle.v2.framework.op import Operator
class TestScatterOp(unittest.TestCase): class TestScatterOp(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "scatter" self.op_type = "scatter"
ref_np = numpy.ones((3, 3)).astype("float32") ref_np = np.ones((3, 3)).astype("float32")
index_np = numpy.array([1, 2]).astype("int32") index_np = np.array([1, 2]).astype("int32")
updates_np = numpy.random.random((2, 3)).astype("float32") updates_np = np.random.random((2, 3)).astype("float32")
output_np = numpy.copy(ref_np) output_np = np.copy(ref_np)
output_np[index_np] += updates_np output_np[index_np] += updates_np
self.inputs = {'Ref': ref_np, 'Index': index_np, 'Updates': updates_np} self.inputs = {'Ref': ref_np, 'Index': index_np, 'Updates': updates_np}
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output()
class TestScatterGradOp(GradientChecker): def test_check_grad(self):
def test_scatter_grad(self): self.check_grad(['Updates', 'Ref'], 'Out', in_place=True)
op = create_op("scatter")
# test data setup
ref_np = numpy.ones((3, 10)).astype("float32")
index_np = numpy.array([1, 2]).astype("int32")
updates_np = numpy.random.random((2, 10)).astype("float32")
output_np = numpy.copy(ref_np)
output_np[index_np] += updates_np
inputs = {'Ref': ref_np, 'Index': index_np, 'Updates': updates_np}
self.check_grad(
op, inputs, set(["Updates", "Ref"]), "Out", in_place=True)
if __name__ == "__main__": if __name__ == "__main__":
......
import unittest import unittest
import numpy import numpy as np
from op_test_util import OpTestMeta from op_test import OpTest
class TestSGD(unittest.TestCase): class TestSGD(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "sgd" self.op_type = "sgd"
w = numpy.random.random((102, 105)).astype("float32") w = np.random.random((102, 105)).astype("float32")
g = numpy.random.random((102, 105)).astype("float32") g = np.random.random((102, 105)).astype("float32")
lr = 0.1 lr = 0.1
self.inputs = {'param': w, 'grad': g} self.inputs = {'param': w, 'grad': g}
self.attrs = {'learning_rate': lr} self.attrs = {'learning_rate': lr}
self.outputs = {'param_out': w - lr * g} self.outputs = {'param_out': w - lr * g}
def test_check_output(self):
self.check_output()
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest
from gradient_checker import GradientChecker, create_op
from op_test_util import OpTestMeta
def stable_softmax(x): def stable_softmax(x):
...@@ -13,26 +10,21 @@ def stable_softmax(x): ...@@ -13,26 +10,21 @@ def stable_softmax(x):
return exps / np.sum(exps) return exps / np.sum(exps)
class TestSoftmaxOp(unittest.TestCase): class TestSoftmaxOp(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "softmax" self.type = "softmax"
self.inputs = {"X": np.random.random((10, 10)).astype("float32")} self.inputs = {
'X': np.random.uniform(0.1, 1, [10, 10]).astype("float32")
}
self.outputs = { self.outputs = {
"Y": np.apply_along_axis(stable_softmax, 1, self.inputs["X"]) 'Y': np.apply_along_axis(stable_softmax, 1, self.inputs['X'])
} }
def test_check_output(self):
self.check_output()
class TestSoftmaxGradOp(GradientChecker): def test_check_grad(self):
def setUp(self): self.check_grad(['X'], 'Y')
self.op = create_op("softmax")
self.inputs = {
"X": np.random.uniform(0.1, 1, [10, 10]).astype("float32")
}
def test_softmax_grad(self):
self.check_grad(self.op, self.inputs, ["X"], "Y")
if __name__ == "__main__": if __name__ == "__main__":
......
import unittest import unittest
from op_test_util import OpTestMeta
from gradient_checker import GradientChecker, create_op
import numpy as np import numpy as np
from op_test import OpTest
class TestSquaredL2DistanceOp_f0(unittest.TestCase): class TestSquaredL2DistanceOp_f0(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = 'squared_l2_distance' self.op_type = "squared_l2_distance"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1., (32, 64)).astype('float32'), 'X': np.random.uniform(0.1, 0.6, (2, 3)).astype("float32"),
'Y': np.random.uniform(0.1, 1., (32, 64)).astype('float32') 'Y': np.random.uniform(0.1, 0.6, (2, 3)).astype("float32")
} }
sub_res = self.inputs['X'] - self.inputs['Y'] sub_res = self.inputs['X'] - self.inputs['Y']
output = sub_res * sub_res output = sub_res * sub_res
...@@ -20,15 +17,19 @@ class TestSquaredL2DistanceOp_f0(unittest.TestCase): ...@@ -20,15 +17,19 @@ class TestSquaredL2DistanceOp_f0(unittest.TestCase):
'Out': np.expand_dims(output.sum(1), 1) 'Out': np.expand_dims(output.sum(1), 1)
} }
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X', 'Y'], 'Out')
class TestSquaredL2DistanceOp_f1(unittest.TestCase):
__metaclass__ = OpTestMeta
class TestSquaredL2DistanceOp_f1(OpTest):
def setUp(self): def setUp(self):
self.type = 'squared_l2_distance' self.op_type = "squared_l2_distance"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1., (32, 64)).astype('float32'), 'X': np.random.uniform(0.1, 0.6, (2, 3)).astype("float32"),
'Y': np.random.uniform(0.1, 1., (1, 64)).astype('float32') 'Y': np.random.uniform(0.1, 0.6, (1, 3)).astype("float32")
} }
sub_res = self.inputs['X'] - self.inputs['Y'] sub_res = self.inputs['X'] - self.inputs['Y']
output = sub_res * sub_res output = sub_res * sub_res
...@@ -37,53 +38,34 @@ class TestSquaredL2DistanceOp_f1(unittest.TestCase): ...@@ -37,53 +38,34 @@ class TestSquaredL2DistanceOp_f1(unittest.TestCase):
'Out': np.expand_dims(output.sum(1), 1) 'Out': np.expand_dims(output.sum(1), 1)
} }
def test_check_output(self):
self.check_output()
class TestSquaredL2DistanceOp_f2(unittest.TestCase): def test_check_grad(self):
__metaclass__ = OpTestMeta self.check_grad(['X', 'Y'], 'Out')
class TestSquaredL2DistanceOp_f2(OpTest):
def setUp(self): def setUp(self):
self.type = 'squared_l2_distance' self.op_type = "squared_l2_distance"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1., (32, 64, 128)).astype('float32'), 'X': np.random.uniform(0.1, 0.6, (2, 3, 4)).astype("float32"),
'Y': np.random.uniform(0.1, 1., (1, 64, 128)).astype('float32') 'Y': np.random.uniform(0.1, 0.6, (1, 3, 4)).astype("float32")
} }
sub_res = self.inputs['X'] - self.inputs['Y'] sub_res = self.inputs['X'] - self.inputs['Y']
sub_res = sub_res.reshape((32, 64 * 128)) sub_res = sub_res.reshape((2, 3 * 4))
output = sub_res * sub_res output = sub_res * sub_res
self.outputs = { self.outputs = {
'sub_result': sub_res, 'sub_result': sub_res,
'Out': np.expand_dims(output.sum(1), 1) 'Out': np.expand_dims(output.sum(1), 1)
} }
def test_check_output(self):
self.check_output()
class TestSquaredL2DistanceGradOp(GradientChecker): def test_check_grad(self):
def test_squared_l2_distance_b0(self): self.check_grad(['X', 'Y'], 'Out')
op = create_op("squared_l2_distance")
inputs = {
'X': np.random.uniform(0.1, .6, (2, 3)).astype('float32'),
'Y': np.random.uniform(0.1, .6, (2, 3)).astype('float32')
}
self.compare_grad(op, inputs)
self.check_grad(op, inputs, set(["X", "Y"]), "Out")
def test_squared_l2_distance_b1(self):
op = create_op("squared_l2_distance")
inputs = {
'X': np.random.uniform(0.1, .6, (2, 3)).astype('float32'),
'Y': np.random.uniform(0.1, .6, (1, 3)).astype('float32')
}
self.compare_grad(op, inputs)
self.check_grad(op, inputs, set(["X", "Y"]), "Out")
def test_squared_l2_distance_b2(self):
op = create_op("squared_l2_distance")
inputs = {
'X': np.random.uniform(0.1, .6, (2, 3, 4)).astype('float32'),
'Y': np.random.uniform(0.1, .6, (1, 3, 4)).astype('float32')
}
self.compare_grad(op, inputs)
self.check_grad(op, inputs, set(["X", "Y"]), "Out")
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -6,10 +6,10 @@ from op_test import OpTest ...@@ -6,10 +6,10 @@ from op_test import OpTest
class TestSumOp(OpTest): class TestSumOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "sum" self.op_type = "sum"
x0 = np.random.random((3, 4)).astype('float32') x0 = np.random.random((3, 4)).astype("float32")
x1 = np.random.random((3, 4)).astype('float32') x1 = np.random.random((3, 4)).astype("float32")
x2 = np.random.random((3, 4)).astype('float32') x2 = np.random.random((3, 4)).astype("float32")
self.inputs = {"X": {"x0": x0, "x1": x1, "x2": x2}} self.inputs = {'X': {'x0': x0, 'x1': x1, 'x2': x2}}
y = x0 + x1 + x2 y = x0 + x1 + x2
self.outputs = {'Out': y} self.outputs = {'Out': y}
...@@ -17,8 +17,8 @@ class TestSumOp(OpTest): ...@@ -17,8 +17,8 @@ class TestSumOp(OpTest):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(["x0"], "Out") self.check_grad(['x0'], 'Out')
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
import unittest import unittest
import numpy as np import numpy as np
from gradient_checker import GradientChecker, create_op from op_test import OpTest
from op_test_util import OpTestMeta
class TestTopkOp(unittest.TestCase): class TestTopkOp(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "top_k" self.op_type = "top_k"
k = 1 k = 1
input = np.random.random((32, 84)).astype("float32") input = np.random.random((32, 84)).astype("float32")
output = np.ndarray((32, k)) output = np.ndarray((32, k))
...@@ -25,11 +22,9 @@ class TestTopkOp(unittest.TestCase): ...@@ -25,11 +22,9 @@ class TestTopkOp(unittest.TestCase):
self.outputs = {'Out': output, 'Indices': indices} self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp3d(unittest.TestCase): class TestTopkOp3d(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "top_k" self.op_type = "top_k"
k = 1 k = 1
input = np.random.random((32, 2, 84)).astype("float32") input = np.random.random((32, 2, 84)).astype("float32")
input_flat_2d = input.reshape(64, 84) input_flat_2d = input.reshape(64, 84)
...@@ -48,5 +43,5 @@ class TestTopkOp3d(unittest.TestCase): ...@@ -48,5 +43,5 @@ class TestTopkOp3d(unittest.TestCase):
self.outputs = {'Out': output, 'Indices': indices} self.outputs = {'Out': output, 'Indices': indices}
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -14,11 +14,11 @@ class UniformRandomTest(unittest.TestCase): ...@@ -14,11 +14,11 @@ class UniformRandomTest(unittest.TestCase):
def uniform_random_test(self, place): def uniform_random_test(self, place):
scope = core.Scope() scope = core.Scope()
scope.new_var("X").get_tensor() scope.new_var('X').get_tensor()
op = Operator( op = Operator(
"uniform_random", "uniform_random",
Out="X", Out='X',
dims=[1000, 784], dims=[1000, 784],
min=-5.0, min=-5.0,
max=10.0, max=10.0,
...@@ -27,9 +27,9 @@ class UniformRandomTest(unittest.TestCase): ...@@ -27,9 +27,9 @@ class UniformRandomTest(unittest.TestCase):
op.infer_shape(scope) op.infer_shape(scope)
ctx = core.DeviceContext.create(place) ctx = core.DeviceContext.create(place)
op.run(scope, ctx) op.run(scope, ctx)
tensor = numpy.array(scope.find_var("X").get_tensor()) tensor = numpy.array(scope.find_var('X').get_tensor())
self.assertAlmostEqual(tensor.mean(), 2.5, delta=0.1) self.assertAlmostEqual(tensor.mean(), 2.5, delta=0.1)
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册