提交 5e36757c 编写于 作者: K Kexin Zhao

fix test

上级 151cfff9
...@@ -187,74 +187,99 @@ def set_output_grad(scope, outputs, place, feed_dict=None): ...@@ -187,74 +187,99 @@ def set_output_grad(scope, outputs, place, feed_dict=None):
class TestBatchNormOpInference(OpTest): class TestBatchNormOpInference(OpTest):
def setUp(self): def __assert_close(self, tensor, np_array, msg, atol=1e-4):
self.op_type = "conv2d" self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg)
self.is_test = True
self.dtype = np.float32
self.data_layout = "NCHW"
init_dtype()
init_data_layout()
init_test_case()
epsilon = 0.00001 def test_inference(self):
shape = self.shape def test_with_place(place, data_layout, dtype, shape):
if len(shape) == 2: epsilon = 0.00001
x_shape = shape if len(shape) == 2:
c = x_shape[1] x_shape = shape
else: c = x_shape[1]
n, h, w, c = shape[0], shape[1], shape[2], shape[3]
if self.data_layout == "NHWC":
x_shape = [n, h, w, c]
elif self.data_layout == "NCHW":
x_shape = [n, c, h, w]
else: else:
raise ValueError("Unknown data layout.") n, h, w, c = shape[0], shape[1], shape[2], shape[3]
scale_shape = [c] if data_layout == "NHWC":
x_shape = [n, h, w, c]
elif data_layout == "NCHW":
x_shape = [n, c, h, w]
else:
raise ValueError("Unknown data layout.")
scale_shape = [c]
x_val = np.random.random_sample(x_shape).astype(self.dtype) x_val = np.random.random_sample(x_shape).astype(dtype)
scale_val = np.random.random_sample(scale_shape).astype(self.dtype) scale_val = np.random.random_sample(scale_shape).astype(dtype)
bias_val = np.random.random_sample(scale_shape).astype(self.dtype) bias_val = np.random.random_sample(scale_shape).astype(dtype)
mean = np.zeros(scale_shape).astype(self.dtype) mean = np.zeros(scale_shape).astype(dtype)
variance = np.ones(scale_shape).astype(self.dtype) variance = np.ones(scale_shape).astype(dtype)
saved_mean = np.zeros(scale_shape).astype(self.dtype) y_out = _reference_testing(x_val, scale_val, bias_val, mean,
saved_variance = np.ones(scale_shape).astype(self.dtype) variance, epsilon,
data_layout).astype(dtype)
y_out = _reference_testing(x_val, scale_val, bias_val, mean, variance, scope = core.Scope()
epsilon, self.data_layout).astype(self.dtype)
# create input
self.inputs = { x_tensor = create_or_get_tensor(
'X': OpTest.np_dtype_to_fluid_dtype(x_val), scope, "x_val", OpTest.np_dtype_to_fluid_dtype(x_val), place)
'Scale': OpTest.np_dtype_to_fluid_dtype(scale_val), scale_tensor = create_or_get_tensor(
'Bias': OpTest.np_dtype_to_fluid_dtype(bias_val), scope, "scale_val",
'Mean': OpTest.np_dtype_to_fluid_dtype(mean), OpTest.np_dtype_to_fluid_dtype(scale_val), place)
'Variance': OpTest.np_dtype_to_fluid_dtype(variance) bias_tensor = create_or_get_tensor(
} scope, "bias_val",
self.attrs = { OpTest.np_dtype_to_fluid_dtype(bias_val), place)
'is_test': self.is_test, mean_tensor = create_or_get_tensor(
'epsilon': epsilon, scope, "mean", OpTest.np_dtype_to_fluid_dtype(mean), place)
'data_layout': self.data_layout variance_tensor = create_or_get_tensor(
} scope, "variance",
self.outputs = { OpTest.np_dtype_to_fluid_dtype(variance), place)
'Y': y_out,
'MeanOut': mean, # create output
'VarianceOut': variance, y_tensor = create_or_get_tensor(scope, "y_out", None, place)
'SavedMean': saved_mean, saved_mean_tensor = create_or_get_tensor(scope, "saved_mean", None,
'SavedVariance': saved_variance place)
} saved_variance_tensor = create_or_get_tensor(
scope, "saved_variance", None, place)
def test_check_output(self): mean_out_tensor = mean_tensor
self.check_output() variance_out_tensor = variance_tensor
def init_dtype(self): batch_norm_op = Operator(
pass "batch_norm",
# inputs
def init_data_layout(self): X="x_val",
pass Scale="scale_val",
Bias="bias_val",
def init_test_case(self): Mean="mean",
self.shape = [2, 3, 4, 5] Variance="variance",
# outputs
Y="y_out",
MeanOut="mean",
VarianceOut="variance",
SavedMean="saved_mean",
SavedVariance="saved_variance",
# attrs
is_test=True,
data_layout=data_layout,
epsilon=epsilon)
batch_norm_op.run(scope, place)
# check inference result
self.__assert_close(
y_tensor, y_out, "inference output are different at " +
str(place) + ", " + data_layout + ", " + str(np.dtype(dtype)))
places = [core.CPUPlace()]
if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"):
place = core.CUDAPlace(0)
if self.dtype != np.float16 or core.is_float16_supported(place):
places.append(place)
for place in places:
for data_format in ["NCHW", "NHWC"]:
for dtype in [np.float32, np.float16]:
test_with_place(place, data_format, dtype, [2, 3, 4, 5])
test_with_place(place, data_format, dtype, [2, 3])
class TestBatchNormOpTraining(OpTest): class TestBatchNormOpTraining(OpTest):
...@@ -288,8 +313,7 @@ class TestBatchNormOpTraining(OpTest): ...@@ -288,8 +313,7 @@ class TestBatchNormOpTraining(OpTest):
# transfer (N, C, H, W) back to (N, H, W, C) # transfer (N, C, H, W) back to (N, H, W, C)
y_out2_trans = np.transpose(y_out2, (0, 2, 3, 1)) y_out2_trans = np.transpose(y_out2, (0, 2, 3, 1))
self.__assert_close(y_out, y_out2_trans, self.__assert_close(y_out, y_out2_trans, "inference output")
"inference outputs of two formats have differences")
print 'python: NHWC, NCHW, inference checking passed' print 'python: NHWC, NCHW, inference checking passed'
def test_python_training(self): def test_python_training(self):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册