提交 d71190f0 编写于 作者: Y Yu Yang 提交者: GitHub

Merge pull request #4022 from QiJune/refine_op_py_tests

refine all operator python tests using new test framework
...@@ -19,8 +19,6 @@ py_test(test_scatter_op SRCS test_scatter_op.py) ...@@ -19,8 +19,6 @@ py_test(test_scatter_op SRCS test_scatter_op.py)
py_test(test_fill_zeros_like_op SRCS test_fill_zeros_like_op.py) py_test(test_fill_zeros_like_op SRCS test_fill_zeros_like_op.py)
py_test(test_top_k_op SRCS test_top_k_op.py) py_test(test_top_k_op SRCS test_top_k_op.py)
py_test(gradient_checker SRCS gradient_checker.py)
py_test(test_rowwise_add_op SRCS test_rowwise_add_op.py) py_test(test_rowwise_add_op SRCS test_rowwise_add_op.py)
py_test(test_default_scope_funcs SRCS test_default_scope_funcs.py) py_test(test_default_scope_funcs SRCS test_default_scope_funcs.py)
......
import unittest
import numpy
import itertools
import paddle.v2.framework.core as core
from paddle.v2.framework.op import Operator
__all__ = ['get_numeric_gradient']
def create_op(op_type):
# TODO need to set attrs
kwargs = dict()
for in_name in Operator.get_op_input_names(op_type):
kwargs[in_name] = in_name
for out_name in Operator.get_op_output_names(op_type):
kwargs[out_name] = out_name
return Operator(op_type, **kwargs)
def grad_var_name(var_name):
return var_name + "@GRAD"
def empty_var_name():
return "@EMPTY@"
def get_numeric_gradient(op,
input_values,
output_name,
input_to_check,
delta=0.005,
local_scope=None,
in_place=False):
"""
Get Numeric Gradient for an operator's input.
:param op: C++ operator instance, could be an network
:param input_values: The input variables. Should be an dictionary, key is
variable name. Value is numpy array.
:param output_name: The final output variable name.
:param input_to_check: The input variable need to get gradient.
:param delta: The perturbation value for numeric gradient method. The
smaller delta is, the more accurate result will get. But if that delta is
too small, it could occur numerical stability problem.
:param local_scope: The local scope used for get_numeric_gradient.
:return: The gradient array in numpy format.
"""
if local_scope is None:
local_scope = core.Scope()
# Create all input variable in local_scope
for var_name in input_values:
var = local_scope.new_var(var_name)
tensor = var.get_tensor()
tensor.set_dims(input_values[var_name].shape)
tensor.alloc_float(core.CPUPlace())
tensor.set(input_values[var_name], core.CPUPlace())
# Create all output variable in local_scope
opts = op.outputs()
for key in opts:
for output in opts[key]:
if local_scope.find_var(output) is None:
local_scope.new_var(output).get_tensor()
op.infer_shape(local_scope)
# allocate output memory
for key in opts:
for output in opts[key]:
local_scope.find_var(output).get_tensor().alloc_float(core.CPUPlace(
))
cpu_ctx = core.DeviceContext.create(core.CPUPlace())
def get_output():
op.run(local_scope, cpu_ctx)
return numpy.array(local_scope.find_var(output_name).get_tensor()).sum()
def product(dim):
return reduce(lambda a, b: a * b, dim, 1)
def restore_inputs():
for var_name in input_values:
tensor_ = local_scope.find_var(var_name).get_tensor()
tensor_.set(numpy.copy(input_values[var_name]), core.CPUPlace())
# get the input tensor that we want to get it's numeric gradient.
tensor_to_check = local_scope.find_var(input_to_check).get_tensor()
tensor_size = product(tensor_to_check.get_dims())
# prepare a numpy array to store the gradient.
gradient_flat = numpy.zeros(shape=(tensor_size, ), dtype='float32')
# we only compute gradient of one element each time.
# we use a for loop to compute the gradient of every element.
for i in xrange(tensor_size):
if in_place:
restore_inputs()
# get one input element throw it's index i.
origin = tensor_to_check.get_float_element(i)
# add delta to it, run op and then get the sum of the result tensor.
x_pos = origin + delta
tensor_to_check.set_float_element(i, x_pos)
y_pos = get_output()
# plus delta to this element, run op and get the sum of the result tensor.
if in_place:
restore_inputs()
x_neg = origin - delta
tensor_to_check.set_float_element(i, x_neg)
y_neg = get_output()
# restore old value
tensor_to_check.set_float_element(i, origin)
# compute the gradient of this element and store it into a numpy array.
gradient_flat[i] = (y_pos - y_neg) / delta / 2
# reshape the gradient result to the shape of the source tensor.
return gradient_flat.reshape(tensor_to_check.get_dims())
class GradientChecker(unittest.TestCase):
def __get_gradient(self, forward_op, backward_op, input_value, grad_names,
place):
"""Get the input gradients after running forward and backward operators
on the given places.
:param forward_op: forward operator
:type forward_op: Operator
:param backward_op: backward operator
:type backward_op: Operator
:param input_value: input values.
:type input_value: dict{string:numpy.array}
:param grad_names: the names of returned input gradients.
:type input_value: a list of string
:param place: the device type.
:type place: CPUPlace or GPUPlace
:return: the input grdients of given grad_names.
:rtype: a list of numpy.array
"""
scope = core.Scope()
ctx = core.DeviceContext.create(place)
inputs = forward_op.inputs()
in_names = [item for k in inputs for item in inputs[k]]
outputs = forward_op.outputs()
out_names = [item for k in outputs for item in outputs[k]]
# create input var and set value
for name, value in input_value.iteritems():
if name not in in_names:
raise ValueError(name + "does not exist in Op's inputs.")
var = scope.new_var(name).get_tensor()
var.set_dims(value.shape)
var.set(value, place)
# run forward op
for out_name in out_names:
scope.new_var(out_name)
forward_op.infer_shape(scope)
forward_op.run(scope, ctx)
# set output var's shape
# set output grad to ones
for name in out_names:
out_tensor = scope.find_var(name).get_tensor()
grad_tensor = scope.new_var(grad_var_name(name)).get_tensor()
grad_tensor.set_dims(out_tensor.shape())
data = numpy.ones(out_tensor.shape(), dtype=numpy.float32)
grad_tensor.set(data, place)
# run backward op
backward_outs = backward_op.outputs()
backward_names = [
item for key in backward_outs for item in backward_outs[key]
]
for name in backward_names:
scope.new_var(name)
backward_op.infer_shape(scope)
backward_op.run(scope, ctx)
outs = [
numpy.array(scope.find_var(name).get_tensor())
for name in grad_names
]
return outs
def compare_grad(self, forward_op, input_value, no_grad_set=None):
""" Compare the input gradients between CPU and GPU for the given forward
operator.
:param forward_op: forward operator
:type forward_op: Operator
:param input_value: input values.
:type input_value: dict{string:numpy.array}
:param no_grad_set: the set of variables names without gradients.
:type no_grad_set: a set of string
:raises: AssertionError, there is different gradient value.
"""
if no_grad_set is None:
no_grad_set = set()
backward_op = core.Operator.backward(forward_op, no_grad_set)
# return if not compile with GPU or not implementing GPU kernel
if not (core.is_compile_gpu() and backward_op.support_gpu()):
return
outputs = backward_op.outputs()
out_names = [item for k in outputs for item in outputs[k]]
out_names = filter(lambda x: x != empty_var_name(), out_names)
cpu_grads = self.__get_gradient(forward_op, backward_op, input_value,
out_names, core.CPUPlace())
gpu_grads = self.__get_gradient(forward_op, backward_op, input_value,
out_names, core.GPUPlace(0))
for c_grad, g_grad, name in itertools.izip(cpu_grads, gpu_grads,
out_names):
self.assertTrue(
numpy.allclose(
c_grad, g_grad, atol=1e-4),
"output name: " + name + " has diff")
def __assert_is_close(self, numeric_grads, analytic_grads, names,
max_relative_error, msg_prefix):
"""Use relative error for the comparison.
:param numeric_grads: the numerical graidents.
:type numeric_grads: a list of numpy.array
:param analytic_grads: the analytical graidents.
:type analytic_grads: a list of numpy.array
:param name: the names of gradients, used to print for debug.
:type names: a list of string
:param msg_prefix: string info, used to print for debug.
:type msf_prefix: string
"""
for a, b, name in itertools.izip(numeric_grads, analytic_grads, names):
abs_a = numpy.abs(a)
# if abs_a is nearly zero, then use abs error for a, not relative
# error.
abs_a[abs_a < 1e-3] = 1
diff_mat = numpy.abs(a - b) / abs_a
max_diff = numpy.max(diff_mat)
def err_msg():
offset = numpy.argmax(diff_mat > max_relative_error)
return "%s Variable %s max gradient diff %f over limit %f, the first " \
"error element is %d" % (
msg_prefix, name, max_diff, max_relative_error, offset)
self.assertLessEqual(max_diff, max_relative_error, err_msg())
def check_grad(self,
forward_op,
input_vars,
inputs_to_check,
output_name,
no_grad_set=None,
only_cpu=False,
in_place=False,
max_relative_error=0.005):
"""
:param forward_op: used to create backward_op
:param input_vars: numpy value of input variable. The following
computation will use these variables.
:param inputs_to_check: inputs var names that should check gradient.
:param output_name: the output variable name of forward network.
:param max_relative_error: The relative tolerance parameter.
:param no_grad_set: used when create backward ops
:param only_cpu: only compute and check gradient on cpu kernel.
:return:
"""
if no_grad_set is None:
no_grad_set = set()
no_tmp_out = forward_op.no_intermediate_outputs()
if len(no_tmp_out) != 1:
raise ValueError("non temp out_names should be 1")
inputs = forward_op.inputs()
in_names = [item for k in inputs for item in inputs[k]]
for no_grad in no_grad_set:
if no_grad not in in_names:
raise ValueError("no_grad should be in in_names")
if no_grad in inputs_to_check:
raise ValueError("no_grad should not be in inputs_to_check")
backward_op = core.Operator.backward(forward_op, no_grad_set)
places = [core.CPUPlace()]
if not only_cpu and core.is_compile_gpu() and backward_op.support_gpu():
places.append(core.GPUPlace(0))
# get numerical gradients
numeric_grads = [
get_numeric_gradient(
forward_op, input_vars, output_name, name, in_place=in_place)
for name in inputs_to_check
]
check_names = [grad_var_name(name) for name in inputs_to_check]
for place in places:
analytic_grads = self.__get_gradient(forward_op, backward_op,
input_vars, check_names, place)
self.__assert_is_close(numeric_grads, analytic_grads, check_names,
max_relative_error,
"Gradient Check On %s" % str(place))
...@@ -65,7 +65,7 @@ def set_output_grad(scope, op, outputs, place): ...@@ -65,7 +65,7 @@ def set_output_grad(scope, op, outputs, place):
if out_name in outputs: if out_name in outputs:
if out_dup: if out_dup:
sub_out = outputs[out_name] sub_out = outputs[out_name]
for sub_out_name, sub_out_grad in sub_out: for sub_out_name, _ in sub_out:
out_tensor = scope.find_var(sub_out_name).get_tensor() out_tensor = scope.find_var(sub_out_name).get_tensor()
grad_tensor = scope.new_var(grad_var_name( grad_tensor = scope.new_var(grad_var_name(
sub_out_name)).get_tensor() sub_out_name)).get_tensor()
......
import numpy
import paddle.v2.framework.core as core
from paddle.v2.framework.op import Operator
class OpTestMeta(type):
"""
Operator Test ClassMeta.
It injects `test_all` method into user's OperatorTest class, to make Python
unittest module run that method.
The `test_all` read what value is stored in `self`. It use self's values to
create and run a operator, and check whether that op is OK or not.
See `test_add_two_op` for example usage.
"""
def __new__(cls, name, bases, attrs):
obj = super(OpTestMeta, cls).__new__(cls, name, bases, attrs)
def test_all(self):
scope = core.Scope()
kwargs = dict()
places = [core.CPUPlace()]
if core.is_compile_gpu():
places.append(core.GPUPlace(0))
for place in places:
for in_name, in_dup in Operator.get_op_inputs(self.type):
if hasattr(self, 'inputs') and in_name in self.inputs:
kwargs[in_name] = []
if in_dup:
arrays = self.inputs[in_name]
for index, arr in enumerate(arrays):
var = scope.new_var(in_name + str(index))
tensor = var.get_tensor()
tensor.set_dims(arr.shape)
tensor.set(arr, place)
kwargs[in_name].append(in_name + str(index))
else:
kwargs[in_name] = in_name
var = scope.new_var(in_name).get_tensor()
arr = self.inputs[in_name]
var.set_dims(arr.shape)
var.set(arr, place)
else:
kwargs[in_name] = "@EMPTY@"
for out_name, out_dup in Operator.get_op_outputs(self.type):
if not hasattr(self, "outputs"):
raise ValueError(
"The test op must set self.outputs dict.")
if out_name not in self.outputs:
raise ValueError("The %s is not in self.outputs dict." %
(out_name))
kwargs[out_name] = out_name
scope.new_var(out_name).get_tensor()
for attr_name in Operator.get_op_attr_names(self.type):
if hasattr(self, "attrs") and attr_name in self.attrs:
kwargs[attr_name] = self.attrs[attr_name]
op = Operator(self.type, **kwargs)
if isinstance(place, core.GPUPlace) and not op.support_gpu():
return
op.infer_shape(scope)
ctx = core.DeviceContext.create(place)
op.run(scope, ctx)
for out_name, out_dup in Operator.get_op_outputs(self.type):
actual = numpy.array(scope.find_var(out_name).get_tensor())
expect = self.outputs[out_name]
self.assertTrue(
numpy.allclose(
actual, expect, atol=1e-05),
"output name: " + out_name + " has diff")
obj.test_all = test_all
return obj
import unittest import unittest
import numpy as np
from op_test import OpTest
import numpy
import paddle.v2.framework.core as core
from paddle.v2.framework.op import Operator
from op_test_util import OpTestMeta
class TestAddOp(unittest.TestCase):
__metaclass__ = OpTestMeta
class TestAddOp(OpTest):
def setUp(self): def setUp(self):
self.type = "add" self.op_type = "add"
self.inputs = { self.inputs = {
'X': numpy.random.random((102, 105)).astype("float32"), 'X': np.random.random((102, 105)).astype("float32"),
'Y': numpy.random.random((102, 105)).astype("float32") 'Y': np.random.random((102, 105)).astype("float32")
} }
self.outputs = {'Out': self.inputs['X'] + self.inputs['Y']} self.outputs = {'Out': self.inputs['X'] + self.inputs['Y']}
def test_check_output(self):
self.check_output()
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
import unittest import unittest
import numpy as np import numpy as np
from gradient_checker import GradientChecker, create_op from op_test import OpTest
from op_test_util import OpTestMeta
class TestConcatOp(unittest.TestCase): class TestConcatOp(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "concat" self.op_type = "concat"
x0 = np.random.random((2, 3, 2, 5)).astype('float32') x0 = np.random.random((2, 3, 2, 5)).astype('float32')
x1 = np.random.random((2, 3, 3, 5)).astype('float32') x1 = np.random.random((2, 3, 3, 5)).astype('float32')
x2 = np.random.random((2, 3, 4, 5)).astype('float32') x2 = np.random.random((2, 3, 4, 5)).astype('float32')
axis = 2 axis = 2
self.inputs = {'X': [x0, x1, x2]} self.inputs = {'X': [('x0', x0), ('x1', x1), ('x2', x2)]}
self.attrs = {'axis': axis} self.attrs = {'axis': axis}
self.outputs = {'Out': np.concatenate((x0, x1, x2), axis=axis)} self.outputs = {'Out': np.concatenate((x0, x1, x2), axis=axis)}
def test_check_output(self):
self.check_output()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
import unittest import unittest
import numpy as np import numpy as np
from gradient_checker import GradientChecker, create_op from op_test import OpTest
from op_test_util import OpTestMeta
class TestCosSimOp(unittest.TestCase): class TestCosSimOp(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "cos_sim" self.op_type = "cos_sim"
self.inputs = { self.inputs = {
'X': np.random.random((32, 64)).astype("float32"), 'X': np.random.random((10, 5)).astype("float32"),
'Y': np.random.random((32, 64)).astype("float32") 'Y': np.random.random((10, 5)).astype("float32")
} }
expect_x_norm = np.linalg.norm(self.inputs['X'], axis=1) expect_x_norm = np.linalg.norm(self.inputs['X'], axis=1)
expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=1) expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=1)
...@@ -23,38 +20,20 @@ class TestCosSimOp(unittest.TestCase): ...@@ -23,38 +20,20 @@ class TestCosSimOp(unittest.TestCase):
'Out': np.expand_dims(expect_out, 1) 'Out': np.expand_dims(expect_out, 1)
} }
def test_check_output(self):
self.check_output()
class TestCosSimGradOp(GradientChecker): def test_check_grad_normal(self):
def setUp(self): self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.05)
self.op = create_op("cos_sim")
self.inputs = {
'X': np.random.random((10, 5)).astype("float32"),
'Y': np.random.random((10, 5)).astype("float32")
}
def test_cpu_gpu_compare(self):
self.compare_grad(self.op, self.inputs)
def test_normal(self):
self.check_grad(
self.op, self.inputs, ["X", "Y"], "Out", max_relative_error=0.05)
def test_ignore_x(self): def test_check_grad_ingore_x(self):
self.check_grad( self.check_grad(
self.op, ['Y'], 'Out', max_relative_error=0.05, no_grad_set=set('X'))
self.inputs, ["Y"],
"Out",
max_relative_error=0.05,
no_grad_set={"X"})
def test_ignore_y(self): def test_check_grad_ignore_y(self):
self.check_grad( self.check_grad(
self.op, ['X'], 'Out', max_relative_error=0.05, no_grad_set=set('Y'))
self.inputs, ["X"],
"Out",
max_relative_error=0.05,
no_grad_set={"Y"})
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -21,7 +21,7 @@ class TestCrossEntropy(OpTest): ...@@ -21,7 +21,7 @@ class TestCrossEntropy(OpTest):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(["X"], "Y") self.check_grad(['X'], 'Y')
if __name__ == "__main__": if __name__ == "__main__":
......
import unittest import unittest
from op_test_util import OpTestMeta import numpy as np
import numpy from op_test import OpTest
class TestFillZerosLikeOp(unittest.TestCase): class TestFillZerosLikeOp(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "fill_zeros_like" self.op_type = "fill_zeros_like"
self.inputs = {'Src': numpy.random.random((219, 232)).astype("float32")} self.inputs = {'Src': np.random.random((219, 232)).astype("float32")}
self.outputs = {'Dst': numpy.zeros_like(self.inputs['Src'])} self.outputs = {'Dst': np.zeros_like(self.inputs["Src"])}
def test_check_output(self):
self.check_output()
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
import unittest import unittest
from op_test_util import OpTestMeta import numpy as np
from gradient_checker import GradientChecker, create_op from op_test import OpTest
import numpy
import paddle.v2.framework.core as core
from paddle.v2.framework.op import Operator
class TestGatherOp(unittest.TestCase): class TestGatherOp(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "gather" self.op_type = "gather"
xnp = numpy.random.random((10, 20)).astype("float32") xnp = np.random.random((10, 20)).astype("float32")
self.inputs = { self.inputs = {'X': xnp, 'Index': np.array([1, 3, 5]).astype("int32")}
'X': xnp, self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]}
'Index': numpy.array([1, 3, 5]).astype("int32")
}
self.outputs = {'Out': self.inputs['X'][self.inputs['Index']]}
def test_check_output(self):
self.check_output()
class TestGatherGradOp(GradientChecker): def test_check_grad(self):
def test_gather_grad(self): self.check_grad(['X'], 'Out')
op = create_op("gather")
xnp = numpy.random.random((10, 20)).astype("float32")
inputs = {'X': xnp, 'Index': numpy.array([1, 3, 5]).astype("int32")}
self.check_grad(op, inputs, set("X"), "Out")
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -14,11 +14,11 @@ class GaussianRandomTest(unittest.TestCase): ...@@ -14,11 +14,11 @@ class GaussianRandomTest(unittest.TestCase):
def gaussian_random_test(self, place): def gaussian_random_test(self, place):
scope = core.Scope() scope = core.Scope()
scope.new_var("Out").get_tensor() scope.new_var('Out').get_tensor()
op = Operator( op = Operator(
"gaussian_random", "gaussian_random",
Out="Out", Out='Out',
dims=[1000, 784], dims=[1000, 784],
mean=.0, mean=.0,
std=1., std=1.,
...@@ -27,10 +27,10 @@ class GaussianRandomTest(unittest.TestCase): ...@@ -27,10 +27,10 @@ class GaussianRandomTest(unittest.TestCase):
op.infer_shape(scope) op.infer_shape(scope)
context = core.DeviceContext.create(place) context = core.DeviceContext.create(place)
op.run(scope, context) op.run(scope, context)
tensor = numpy.array(scope.find_var("Out").get_tensor()) tensor = numpy.array(scope.find_var('Out').get_tensor())
self.assertAlmostEqual(numpy.mean(tensor), .0, delta=0.1) self.assertAlmostEqual(numpy.mean(tensor), .0, delta=0.1)
self.assertAlmostEqual(numpy.std(tensor), 1., delta=0.1) self.assertAlmostEqual(numpy.std(tensor), 1., delta=0.1)
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
import unittest import unittest
import numpy import numpy as np
from paddle.v2.framework.op import Operator import paddle.v2.framework.core as core
from gradient_checker import GradientChecker from op_test import get_numeric_gradient
from gradient_checker import get_numeric_gradient from op_test import create_op
class GetNumericGradientTest(unittest.TestCase): class GetNumericGradientTest(unittest.TestCase):
def test_add_op(self): def test_add_op(self):
add_op = Operator("add", X="X", Y="Y", Out="Z") x = np.random.random((10, 1)).astype("float32")
x = numpy.random.random((10, 1)).astype("float32") y = np.random.random((10, 1)).astype("float32")
y = numpy.random.random((10, 1)).astype("float32") z = x + y
scope = core.Scope()
arr = get_numeric_gradient(add_op, {"X": x, "Y": y}, "Z", "X") add_op = create_op(scope, "add", {'X': x, 'Y': y}, {'Out': z}, dict())
arr = get_numeric_gradient(scope, add_op, {'X': x, 'Y': y}, 'X', 'Out')
self.assertAlmostEqual(arr.mean(), 1.0, delta=1e-4) self.assertAlmostEqual(arr.mean(), 1.0, delta=1e-4)
def test_softmax_op(self): def test_softmax_op(self):
def stable_softmax(x): def stable_softmax(x):
"""Compute the softmax of vector x in a numerically stable way.""" """Compute the softmax of vector x in a numerically stable way."""
shiftx = x - numpy.max(x) shiftx = x - np.max(x)
exps = numpy.exp(shiftx) exps = np.exp(shiftx)
return exps / numpy.sum(exps) return exps / np.sum(exps)
def label_softmax_grad(Y, dY): def label_softmax_grad(Y, dY):
dX = Y * 0.0 dX = Y * 0.0
for i in range(Y.shape[0]): for i in range(Y.shape[0]):
d = numpy.dot(Y[i, :], dY[i, :]) d = np.dot(Y[i, :], dY[i, :])
dX[i, :] = Y[i, :] * (dY[i, :] - d) dX[i, :] = Y[i, :] * (dY[i, :] - d)
return dX return dX
softmax_op = Operator("softmax", X="X", Y="Y") X = np.random.random((2, 2)).astype("float32")
Y = np.apply_along_axis(stable_softmax, 1, X)
X = numpy.random.random((2, 2)).astype("float32") dY = np.ones(Y.shape)
Y = numpy.apply_along_axis(stable_softmax, 1, X)
dY = numpy.ones(Y.shape)
dX = label_softmax_grad(Y, dY) dX = label_softmax_grad(Y, dY)
arr = get_numeric_gradient(softmax_op, {"X": X}, "Y", "X") scope = core.Scope()
numpy.testing.assert_almost_equal(arr, dX, decimal=1e-2) softmax_op = create_op(scope, "softmax", {"X": X}, {"Y": Y}, dict())
arr = get_numeric_gradient(scope, softmax_op, {"X": X}, "X", "Y")
np.testing.assert_almost_equal(arr, dX, decimal=1e-2)
if __name__ == "__main__": if __name__ == "__main__":
......
import unittest import unittest
import numpy as np import numpy as np
from op_test_util import OpTestMeta from op_test import OpTest
from gradient_checker import GradientChecker, create_op
class TestLookupTableOp(unittest.TestCase): class TestLookupTableOp(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = 'lookup_table' self.op_type = "lookup_table"
table = np.random.random((17, 31)).astype('float32') table = np.random.random((17, 31)).astype("float32")
ids = np.random.randint(0, 17, 4).astype('int32') ids = np.random.randint(0, 17, 4).astype("int32")
self.inputs = {'W': table, 'Ids': ids} self.inputs = {'W': table, 'Ids': ids}
self.outputs = {'Out': table[ids]} self.outputs = {'Out': table[ids]}
def test_check_output(self):
self.check_output()
class TestLookupTableGradOp(GradientChecker): def test_check_grad(self):
def test_grad(self): self.check_grad(['W'], 'Out', no_grad_set=set('Ids'))
op = create_op('lookup_table')
table = np.random.random((17, 31)).astype('float32')
ids = np.random.randint(0, 17, 4).astype('int32')
inputs = {'W': table, 'Ids': ids}
# comapre gradients
self.compare_grad(op, inputs, set(['Ids']))
# check gradients
self.check_grad(op, inputs, set('W'), 'Out')
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
import unittest import unittest
from op_test_util import OpTestMeta
from gradient_checker import GradientChecker, create_op
import numpy as np import numpy as np
from op_test import OpTest
class TestMeanOp(unittest.TestCase): class TestMeanOp(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "mean" self.op_type = "mean"
self.inputs = {'X': np.random.random((32, 784)).astype("float32")} self.inputs = {'X': np.random.random((10, 10)).astype("float32")}
self.outputs = {'Out': np.mean(self.inputs['X'])} self.outputs = {'Out': np.mean(self.inputs["X"])}
def test_check_output(self):
self.check_output()
class MeanGradOpTest(GradientChecker): def test_checkout_grad(self):
def test_normal(self): self.check_grad(['X'], 'Out')
op = create_op("mean")
inputs = {"X": np.random.random((10, 10)).astype("float32")}
self.check_grad(op, inputs, set("X"), "Out")
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
import unittest import unittest
import numpy as np import numpy as np
from gradient_checker import GradientChecker, create_op from op_test import OpTest
from op_test_util import OpTestMeta
class MinusOpTest(unittest.TestCase): class MinusOpTest(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "minus" self.op_type = "minus"
self.inputs = { self.inputs = {
'X': np.random.random((32, 84)).astype("float32"), 'X': np.random.random((32, 84)).astype("float32"),
'Y': np.random.random((32, 84)).astype("float32") 'Y': np.random.random((32, 84)).astype("float32")
} }
self.outputs = {'Out': (self.inputs['X'] - self.inputs['Y'])} self.outputs = {'Out': (self.inputs['X'] - self.inputs['Y'])}
def test_check_output(self):
self.check_output()
class MinusGradTest(GradientChecker): def test_check_grad(self):
def test_left(self): self.check_grad(['X', 'Y'], 'Out')
op = create_op("minus")
inputs = {
"X": np.random.random((10, 10)).astype("float32"),
"Y": np.random.random((10, 10)).astype("float32")
}
self.check_grad(op, inputs, ["X", 'Y'], "Out")
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
import unittest import unittest
import numpy as np import numpy as np
from gradient_checker import GradientChecker, create_op from op_test import OpTest
from op_test_util import OpTestMeta
from paddle.v2.framework.op import Operator
class TestMulOp(unittest.TestCase): class TestMulOp(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "mul" self.op_type = "mul"
self.inputs = { self.inputs = {
'X': np.random.random((32, 84)).astype("float32"), 'X': np.random.random((32, 84)).astype("float32"),
'Y': np.random.random((84, 100)).astype("float32") 'Y': np.random.random((84, 100)).astype("float32")
} }
self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])} self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])}
def test_check_output(self):
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.5)
class TestMulOp2(unittest.TestCase): def test_check_grad_ingore_x(self):
__metaclass__ = OpTestMeta self.check_grad(
['Y'], 'Out', max_relative_error=0.5, no_grad_set=set("X"))
def test_check_grad_ingore_y(self):
self.check_grad(
['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y'))
class TestMulOp2(OpTest):
def setUp(self): def setUp(self):
self.type = "mul" self.op_type = "mul"
self.inputs = { self.inputs = {
'X': np.random.random((15, 4, 12, 10)).astype("float32"), 'X': np.random.random((15, 4, 12, 10)).astype("float32"),
'Y': np.random.random((4, 30, 8, 2, 9)).astype("float32") 'Y': np.random.random((4, 30, 8, 2, 9)).astype("float32")
...@@ -32,72 +40,20 @@ class TestMulOp2(unittest.TestCase): ...@@ -32,72 +40,20 @@ class TestMulOp2(unittest.TestCase):
self.inputs['Y'].reshape(4 * 30, 8 * 2 * 9)) self.inputs['Y'].reshape(4 * 30, 8 * 2 * 9))
} }
def test_check_output(self):
self.check_output()
class TestMulGradOp(GradientChecker): def test_check_grad_normal(self):
def setUp(self): self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.5)
self.op = create_op("mul")
self.inputs = {
'X': np.random.random((32, 84)).astype("float32"),
'Y': np.random.random((84, 100)).astype("float32")
}
def test_cpu_gpu_compare(self):
self.compare_grad(self.op, self.inputs)
def test_normal(self):
# mul op will enlarge the relative error
self.check_grad(
self.op, self.inputs, ["X", "Y"], "Out", max_relative_error=0.5)
def test_ignore_x(self):
self.check_grad(
self.op,
self.inputs, ["Y"],
"Out",
max_relative_error=0.5,
no_grad_set={"X"})
def test_ignore_y(self):
self.check_grad(
self.op,
self.inputs, ["X"],
"Out",
max_relative_error=0.5,
no_grad_set={"Y"})
class TestMulGradTest2(GradientChecker):
def setUp(self):
self.op = Operator(
"mul", X="X", Y="Y", Out="Out", x_num_col_dims=2, y_num_col_dims=2)
self.inputs = {
"X": np.random.random((15, 4, 12, 10)).astype("float32"),
"Y": np.random.random((4, 30, 8, 2, 9)).astype("float32")
}
def test_cpu_gpu_compare(self):
self.compare_grad(self.op, self.inputs)
def test_normal(self):
self.check_grad(
self.op, self.inputs, ["X", "Y"], "Out", max_relative_error=0.5)
def test_ignore_x(self): def test_check_grad_ingore_x(self):
self.check_grad( self.check_grad(
self.op, ['Y'], 'Out', max_relative_error=0.5, no_grad_set=set('X'))
self.inputs, ["Y"],
"Out",
max_relative_error=0.5,
no_grad_set={"X"})
def test_ignore_y(self): def test_check_grad_ignore_y(self):
self.check_grad( self.check_grad(
self.op, ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y'))
self.inputs, ["X"],
"Out",
max_relative_error=0.5,
no_grad_set={"Y"})
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -35,5 +35,5 @@ Op(plain_net), inputs:{all[W, X, Y]}, outputs:{all[Out, fc.out, pre_activation]} ...@@ -35,5 +35,5 @@ Op(plain_net), inputs:{all[W, X, Y]}, outputs:{all[Out, fc.out, pre_activation]}
self.assertEqual(expected, "\n" + str(net)) self.assertEqual(expected, "\n" + str(net))
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
import unittest import unittest
import numpy as np import numpy as np
from op_test_util import OpTestMeta from op_test import OpTest
from gradient_checker import GradientChecker, create_op
class TestRowwiseAddOp(unittest.TestCase): class TestRowwiseAddOp(OpTest):
__metaclass__ = OpTestMeta
def setUp(self):
self.type = "rowwise_add"
self.inputs = {
'X': np.random.random((32, 84)).astype("float32"),
'b': np.random.random(84).astype("float32")
}
self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['b'])}
class TestRowwiseAddOp2(unittest.TestCase):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "rowwise_add" self.op_type = "rowwise_add"
self.inputs = { self.inputs = {
'X': np.random.random((13, 6, 7, 8)).astype("float32"), 'X': np.random.uniform(0.1, 1, [5, 10]).astype("float32"),
'b': np.random.random((7, 8)).astype("float32") 'b': np.random.uniform(0.1, 1, [10]).astype("float32")
} }
self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['b'])} self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['b'])}
def test_check_output(self):
self.check_output()
class TestRowwiseAddGradOp(GradientChecker): def test_check_grad_normal(self):
def setUp(self): self.check_grad(['X', 'b'], 'Out')
self.op = create_op("rowwise_add")
self.inputs = {
"X": np.random.uniform(0.1, 1, [5, 10]).astype("float32"),
"b": np.random.uniform(0.1, 1, [10]).astype("float32")
}
def test_normal(self): def test_check_grad_ingore_b(self):
self.check_grad(self.op, self.inputs, ["X", "b"], "Out") self.check_grad(['X'], 'Out', no_grad_set=set('b'))
def test_ignore_b(self): def test_check_grad_ingore_x(self):
self.check_grad(self.op, self.inputs, ["X"], "Out", no_grad_set={"b"}) self.check_grad(['b'], 'Out', no_grad_set=set('X'))
def test_ignore_x(self):
self.check_grad(self.op, self.inputs, ["b"], "Out", no_grad_set={"X"})
class TestRowwiseAddOp2(OpTest):
class TestRowwiseAddGradOp2(GradientChecker):
def setUp(self): def setUp(self):
self.op = create_op("rowwise_add") self.op_type = "rowwise_add"
self.inputs = { self.inputs = {
"X": np.random.uniform(0.1, 1, [2, 3, 2, 5]).astype("float32"), 'X': np.random.uniform(0.1, 1, [2, 3, 2, 5]).astype("float32"),
"b": np.random.uniform(0.1, 1, [2, 5]).astype("float32") 'b': np.random.uniform(0.1, 1, [2, 5]).astype("float32")
} }
self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['b'])}
def test_check_output(self):
self.check_output()
def test_normal(self): def test_check_grad_normal(self):
self.check_grad(self.op, self.inputs, ["X", "b"], "Out") self.check_grad(['X', 'b'], 'Out')
def test_ignore_b(self): def test_check_grad_ignore_b(self):
self.check_grad(self.op, self.inputs, ["X"], "Out", no_grad_set={"b"}) self.check_grad(['X'], 'Out', no_grad_set=set('b'))
def test_ignore_x(self): def test_check_grad_ignore_x(self):
self.check_grad(self.op, self.inputs, ["b"], "Out", no_grad_set={"X"}) self.check_grad(['b'], 'Out', no_grad_set=set('X'))
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
import unittest import unittest
from op_test_util import OpTestMeta
from gradient_checker import GradientChecker, create_op
import numpy as np import numpy as np
from paddle.v2.framework.op import Operator from op_test import OpTest
class IdentityTest(unittest.TestCase): class IdentityTest(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "identity" self.op_type = "identity"
self.inputs = {'X': np.random.random((32, 784)).astype("float32")} self.inputs = {'X': np.random.random((10, 10)).astype("float32")}
self.outputs = {'Out': self.inputs['X']} self.outputs = {'Out': self.inputs['X']}
def test_check_output(self):
self.check_output()
class IdentityGradOpTest(GradientChecker): def test_check_grad(self):
def test_normal(self): self.check_grad(['X'], 'Out')
op = create_op("identity")
inputs = {"X": np.random.random((10, 10)).astype("float32")}
self.check_grad(op, inputs, set("X"), "Out")
class ScaleTest(unittest.TestCase):
__metaclass__ = OpTestMeta
class ScaleTest(OpTest):
def setUp(self): def setUp(self):
self.type = "scale" self.op_type = "scale"
self.inputs = {'X': np.random.random((32, 784)).astype("float32")} self.inputs = {'X': np.random.random((10, 10)).astype("float32")}
self.attrs = {'scale': -2.3} self.attrs = {'scale': -2.3}
self.outputs = {'Out': self.inputs['X'] * self.attrs['scale']} self.outputs = {'Out': self.inputs['X'] * self.attrs['scale']}
def test_check_output(self):
self.check_output()
class ScaleGradTest(GradientChecker): def test_check_grad(self):
def test_normal(self): self.check_grad(['X'], 'Out')
op = Operator("scale", X="X", Out="Out", scale=3.2)
self.check_grad(op,
{"X": np.random.random((10, 10)).astype("float32")},
set("X"), "Out")
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
import unittest import unittest
from op_test_util import OpTestMeta import numpy as np
from gradient_checker import GradientChecker, create_op from op_test import OpTest
import numpy
import paddle.v2.framework.core as core
from paddle.v2.framework.op import Operator
class TestScatterOp(unittest.TestCase): class TestScatterOp(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "scatter" self.op_type = "scatter"
ref_np = numpy.ones((3, 3)).astype("float32") ref_np = np.ones((3, 3)).astype("float32")
index_np = numpy.array([1, 2]).astype("int32") index_np = np.array([1, 2]).astype("int32")
updates_np = numpy.random.random((2, 3)).astype("float32") updates_np = np.random.random((2, 3)).astype("float32")
output_np = numpy.copy(ref_np) output_np = np.copy(ref_np)
output_np[index_np] += updates_np output_np[index_np] += updates_np
self.inputs = {'Ref': ref_np, 'Index': index_np, 'Updates': updates_np} self.inputs = {'Ref': ref_np, 'Index': index_np, 'Updates': updates_np}
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output()
class TestScatterGradOp(GradientChecker): def test_check_grad(self):
def test_scatter_grad(self): self.check_grad(['Updates', 'Ref'], 'Out', in_place=True)
op = create_op("scatter")
# test data setup
ref_np = numpy.ones((3, 10)).astype("float32")
index_np = numpy.array([1, 2]).astype("int32")
updates_np = numpy.random.random((2, 10)).astype("float32")
output_np = numpy.copy(ref_np)
output_np[index_np] += updates_np
inputs = {'Ref': ref_np, 'Index': index_np, 'Updates': updates_np}
self.check_grad(
op, inputs, set(["Updates", "Ref"]), "Out", in_place=True)
if __name__ == "__main__": if __name__ == "__main__":
......
import unittest import unittest
import numpy import numpy as np
from op_test_util import OpTestMeta from op_test import OpTest
class TestSGD(unittest.TestCase): class TestSGD(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "sgd" self.op_type = "sgd"
w = numpy.random.random((102, 105)).astype("float32") w = np.random.random((102, 105)).astype("float32")
g = numpy.random.random((102, 105)).astype("float32") g = np.random.random((102, 105)).astype("float32")
lr = 0.1 lr = 0.1
self.inputs = {'param': w, 'grad': g} self.inputs = {'param': w, 'grad': g}
self.attrs = {'learning_rate': lr} self.attrs = {'learning_rate': lr}
self.outputs = {'param_out': w - lr * g} self.outputs = {'param_out': w - lr * g}
def test_check_output(self):
self.check_output()
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest
from gradient_checker import GradientChecker, create_op
from op_test_util import OpTestMeta
def stable_softmax(x): def stable_softmax(x):
...@@ -13,26 +10,21 @@ def stable_softmax(x): ...@@ -13,26 +10,21 @@ def stable_softmax(x):
return exps / np.sum(exps) return exps / np.sum(exps)
class TestSoftmaxOp(unittest.TestCase): class TestSoftmaxOp(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "softmax" self.op_type = "softmax"
self.inputs = {"X": np.random.random((10, 10)).astype("float32")} self.inputs = {
'X': np.random.uniform(0.1, 1, [10, 10]).astype("float32")
}
self.outputs = { self.outputs = {
"Y": np.apply_along_axis(stable_softmax, 1, self.inputs["X"]) 'Y': np.apply_along_axis(stable_softmax, 1, self.inputs['X'])
} }
def test_check_output(self):
self.check_output()
class TestSoftmaxGradOp(GradientChecker): def test_check_grad(self):
def setUp(self): self.check_grad(['X'], 'Y')
self.op = create_op("softmax")
self.inputs = {
"X": np.random.uniform(0.1, 1, [10, 10]).astype("float32")
}
def test_softmax_grad(self):
self.check_grad(self.op, self.inputs, ["X"], "Y")
if __name__ == "__main__": if __name__ == "__main__":
......
import unittest import unittest
from op_test_util import OpTestMeta
from gradient_checker import GradientChecker, create_op
import numpy as np import numpy as np
from op_test import OpTest
class TestSquaredL2DistanceOp_f0(unittest.TestCase): class TestSquaredL2DistanceOp_f0(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = 'squared_l2_distance' self.op_type = "squared_l2_distance"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1., (32, 64)).astype('float32'), 'X': np.random.uniform(0.1, 0.6, (2, 3)).astype("float32"),
'Y': np.random.uniform(0.1, 1., (32, 64)).astype('float32') 'Y': np.random.uniform(0.1, 0.6, (2, 3)).astype("float32")
} }
sub_res = self.inputs['X'] - self.inputs['Y'] sub_res = self.inputs['X'] - self.inputs['Y']
output = sub_res * sub_res output = sub_res * sub_res
...@@ -20,15 +17,19 @@ class TestSquaredL2DistanceOp_f0(unittest.TestCase): ...@@ -20,15 +17,19 @@ class TestSquaredL2DistanceOp_f0(unittest.TestCase):
'Out': np.expand_dims(output.sum(1), 1) 'Out': np.expand_dims(output.sum(1), 1)
} }
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X', 'Y'], 'Out')
class TestSquaredL2DistanceOp_f1(unittest.TestCase):
__metaclass__ = OpTestMeta
class TestSquaredL2DistanceOp_f1(OpTest):
def setUp(self): def setUp(self):
self.type = 'squared_l2_distance' self.op_type = "squared_l2_distance"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1., (32, 64)).astype('float32'), 'X': np.random.uniform(0.1, 0.6, (2, 3)).astype("float32"),
'Y': np.random.uniform(0.1, 1., (1, 64)).astype('float32') 'Y': np.random.uniform(0.1, 0.6, (1, 3)).astype("float32")
} }
sub_res = self.inputs['X'] - self.inputs['Y'] sub_res = self.inputs['X'] - self.inputs['Y']
output = sub_res * sub_res output = sub_res * sub_res
...@@ -37,53 +38,34 @@ class TestSquaredL2DistanceOp_f1(unittest.TestCase): ...@@ -37,53 +38,34 @@ class TestSquaredL2DistanceOp_f1(unittest.TestCase):
'Out': np.expand_dims(output.sum(1), 1) 'Out': np.expand_dims(output.sum(1), 1)
} }
def test_check_output(self):
self.check_output()
class TestSquaredL2DistanceOp_f2(unittest.TestCase): def test_check_grad(self):
__metaclass__ = OpTestMeta self.check_grad(['X', 'Y'], 'Out')
class TestSquaredL2DistanceOp_f2(OpTest):
def setUp(self): def setUp(self):
self.type = 'squared_l2_distance' self.op_type = "squared_l2_distance"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1., (32, 64, 128)).astype('float32'), 'X': np.random.uniform(0.1, 0.6, (2, 3, 4)).astype("float32"),
'Y': np.random.uniform(0.1, 1., (1, 64, 128)).astype('float32') 'Y': np.random.uniform(0.1, 0.6, (1, 3, 4)).astype("float32")
} }
sub_res = self.inputs['X'] - self.inputs['Y'] sub_res = self.inputs['X'] - self.inputs['Y']
sub_res = sub_res.reshape((32, 64 * 128)) sub_res = sub_res.reshape((2, 3 * 4))
output = sub_res * sub_res output = sub_res * sub_res
self.outputs = { self.outputs = {
'sub_result': sub_res, 'sub_result': sub_res,
'Out': np.expand_dims(output.sum(1), 1) 'Out': np.expand_dims(output.sum(1), 1)
} }
def test_check_output(self):
self.check_output()
class TestSquaredL2DistanceGradOp(GradientChecker): def test_check_grad(self):
def test_squared_l2_distance_b0(self): self.check_grad(['X', 'Y'], 'Out')
op = create_op("squared_l2_distance")
inputs = {
'X': np.random.uniform(0.1, .6, (2, 3)).astype('float32'),
'Y': np.random.uniform(0.1, .6, (2, 3)).astype('float32')
}
self.compare_grad(op, inputs)
self.check_grad(op, inputs, set(["X", "Y"]), "Out")
def test_squared_l2_distance_b1(self):
op = create_op("squared_l2_distance")
inputs = {
'X': np.random.uniform(0.1, .6, (2, 3)).astype('float32'),
'Y': np.random.uniform(0.1, .6, (1, 3)).astype('float32')
}
self.compare_grad(op, inputs)
self.check_grad(op, inputs, set(["X", "Y"]), "Out")
def test_squared_l2_distance_b2(self):
op = create_op("squared_l2_distance")
inputs = {
'X': np.random.uniform(0.1, .6, (2, 3, 4)).astype('float32'),
'Y': np.random.uniform(0.1, .6, (1, 3, 4)).astype('float32')
}
self.compare_grad(op, inputs)
self.check_grad(op, inputs, set(["X", "Y"]), "Out")
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -17,8 +17,8 @@ class TestSumOp(OpTest): ...@@ -17,8 +17,8 @@ class TestSumOp(OpTest):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(["x0"], "Out") self.check_grad(['x0'], 'Out')
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
import unittest import unittest
import numpy as np import numpy as np
from gradient_checker import GradientChecker, create_op from op_test import OpTest
from op_test_util import OpTestMeta
class TestTopkOp(unittest.TestCase): class TestTopkOp(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "top_k" self.op_type = "top_k"
k = 1 k = 1
input = np.random.random((32, 84)).astype("float32") input = np.random.random((32, 84)).astype("float32")
output = np.ndarray((32, k)) output = np.ndarray((32, k))
...@@ -25,11 +22,9 @@ class TestTopkOp(unittest.TestCase): ...@@ -25,11 +22,9 @@ class TestTopkOp(unittest.TestCase):
self.outputs = {'Out': output, 'Indices': indices} self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp3d(unittest.TestCase): class TestTopkOp3d(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "top_k" self.op_type = "top_k"
k = 1 k = 1
input = np.random.random((32, 2, 84)).astype("float32") input = np.random.random((32, 2, 84)).astype("float32")
input_flat_2d = input.reshape(64, 84) input_flat_2d = input.reshape(64, 84)
...@@ -48,5 +43,5 @@ class TestTopkOp3d(unittest.TestCase): ...@@ -48,5 +43,5 @@ class TestTopkOp3d(unittest.TestCase):
self.outputs = {'Out': output, 'Indices': indices} self.outputs = {'Out': output, 'Indices': indices}
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -14,11 +14,11 @@ class UniformRandomTest(unittest.TestCase): ...@@ -14,11 +14,11 @@ class UniformRandomTest(unittest.TestCase):
def uniform_random_test(self, place): def uniform_random_test(self, place):
scope = core.Scope() scope = core.Scope()
scope.new_var("X").get_tensor() scope.new_var('X').get_tensor()
op = Operator( op = Operator(
"uniform_random", "uniform_random",
Out="X", Out='X',
dims=[1000, 784], dims=[1000, 784],
min=-5.0, min=-5.0,
max=10.0, max=10.0,
...@@ -27,9 +27,9 @@ class UniformRandomTest(unittest.TestCase): ...@@ -27,9 +27,9 @@ class UniformRandomTest(unittest.TestCase):
op.infer_shape(scope) op.infer_shape(scope)
ctx = core.DeviceContext.create(place) ctx = core.DeviceContext.create(place)
op.run(scope, ctx) op.run(scope, ctx)
tensor = numpy.array(scope.find_var("X").get_tensor()) tensor = numpy.array(scope.find_var('X').get_tensor())
self.assertAlmostEqual(tensor.mean(), 2.5, delta=0.1) self.assertAlmostEqual(tensor.mean(), 2.5, delta=0.1)
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册