提交 c548e370 编写于 作者: L lidanqing 提交者: Tao Luo

UT coverage for guassian_mkldnn_op and batch_norm_mkldnn_op (#19011)

* integrations problem
test=develop

* add batch_norm_mkldnn_op backward-reuse test and guassian seed=0 test
test=develop
上级 6ac32d09
...@@ -19,13 +19,14 @@ import paddle.fluid.core as core ...@@ -19,13 +19,14 @@ import paddle.fluid.core as core
import paddle.fluid as fluid import paddle.fluid as fluid
def __assert_close(test_case, tensor, np_array, msg, atol=1e-4):
test_case.assertTrue(
np.allclose(
np.array(tensor), np_array, atol=atol), msg)
def check_if_mkldnn_primitives_exist_in_bwd(test_case, op_type, x, out, def check_if_mkldnn_primitives_exist_in_bwd(test_case, op_type, x, out,
out_grad, x_grad): out_grad, x_grad):
def __assert_close(tensor, np_array, msg, atol=1e-4):
test_case.assertTrue(
np.allclose(
np.array(tensor), np_array, atol=atol), msg)
place = core.CPUPlace() place = core.CPUPlace()
var_dict = {'x': x, 'out': out, 'out@GRAD': out_grad, 'x@GRAD': x_grad} var_dict = {'x': x, 'out': out, 'out@GRAD': out_grad, 'x@GRAD': x_grad}
...@@ -69,7 +70,81 @@ def check_if_mkldnn_primitives_exist_in_bwd(test_case, op_type, x, out, ...@@ -69,7 +70,81 @@ def check_if_mkldnn_primitives_exist_in_bwd(test_case, op_type, x, out,
for name in ['x', 'out@GRAD']}, for name in ['x', 'out@GRAD']},
fetch_list=['x@GRAD', 'out']) fetch_list=['x@GRAD', 'out'])
__assert_close(x_grad, out[0], 'x@GRAD') __assert_close(test_case, x_grad, out[0], 'x@GRAD')
def check_if_mkldnn_batchnorm_primitives_exist_in_bwd(
test_case, var_dict, place, shape, data_layout):
var_names = [
'x', 'scale', 'bias', 'mean', 'variance', 'y', 'saved_mean',
'saved_variance'
]
ground_truth = {name: var_dict[name] for name in var_names}
program = fluid.Program()
with fluid.program_guard(program):
block = program.global_block()
for name in ground_truth:
block.create_var(
name=name, dtype='float32', shape=ground_truth[name].shape)
bn_op = block.append_op(
type="batch_norm",
inputs={
"X": block.var('x'),
"Scale": block.var('scale'),
"Bias": block.var('bias'),
"Mean": block.var('mean'),
"Variance": block.var('variance')
},
outputs={
"Y": block.var('y'),
"MeanOut": block.var('mean'), # share memory
"VarianceOut": block.var('variance'), # share memory
"SavedMean": block.var('saved_mean'),
"SavedVariance": block.var('saved_variance')
},
attrs={
"momentum": test_case.momentum,
"epsilon": test_case.epsilon,
"is_test": False,
"data_layout": data_layout,
"use_mkldnn": test_case.use_mkldnn,
"fuse_with_relu": test_case.fuse_with_relu,
"use_global_stats": test_case.use_global_stats
})
block.create_var(
name='y@GRAD', dtype='float32', shape=var_dict['y'].shape)
# generate backward op_desc
grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc(
bn_op.desc, test_case.no_grad_set, [])
grad_op_desc = grad_op_desc_list[0]
new_op_desc = block.desc.append_op()
new_op_desc.copy_from(grad_op_desc)
for var_name in grad_op_desc.output_arg_names():
block.desc.var(var_name.encode("ascii"))
grad_op_desc.infer_var_type(block.desc)
grad_op_desc.infer_shape(block.desc)
for arg in grad_op_desc.output_arg_names():
grad_var = block.desc.find_var(arg.encode("ascii"))
grad_var.set_dtype(core.VarDesc.VarType.FP32)
exe = fluid.Executor(place)
# Do at least 2 iterations
for i in range(2):
out = exe.run(
program,
feed={
name: var_dict[name]
for name in
['x', 'scale', 'bias', 'mean', 'variance', 'y@GRAD']
},
fetch_list=test_case.fetch_list)
for id, name in enumerate(test_case.fetch_list):
__assert_close(test_case, var_dict[name], out[id], name)
print("MKLDNN op test forward passed: ", str(place), data_layout)
def format_reorder(out, size): def format_reorder(out, size):
......
...@@ -22,6 +22,7 @@ import paddle.fluid as fluid ...@@ -22,6 +22,7 @@ import paddle.fluid as fluid
from paddle.fluid.tests.unittests.op_test import OpTest from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.framework import grad_var_name from paddle.fluid.framework import grad_var_name
from paddle.fluid.tests.unittests.test_batch_norm_op import TestBatchNormOpInference, TestBatchNormOpTraining, _reference_training, _reference_grad from paddle.fluid.tests.unittests.test_batch_norm_op import TestBatchNormOpInference, TestBatchNormOpTraining, _reference_training, _reference_grad
from mkldnn_op_test import check_if_mkldnn_batchnorm_primitives_exist_in_bwd
class TestMKLDNNBatchNormOpTraining(TestBatchNormOpTraining): class TestMKLDNNBatchNormOpTraining(TestBatchNormOpTraining):
...@@ -43,6 +44,36 @@ class TestMKLDNNBatchNormOpTraining(TestBatchNormOpTraining): ...@@ -43,6 +44,36 @@ class TestMKLDNNBatchNormOpTraining(TestBatchNormOpTraining):
return y, mean_out, variance_out, saved_mean, saved_variance, x_grad, scale_grad, bias_grad return y, mean_out, variance_out, saved_mean, saved_variance, x_grad, scale_grad, bias_grad
class TestMKLDNNBatchNormOpExistedPrimitives(TestMKLDNNBatchNormOpTraining):
def init_test_case(self):
TestMKLDNNBatchNormOpTraining.init_test_case(self)
self.fetch_list = ['y', 'x@GRAD']
def test_forward_backward(self):
place = core.CPUPlace()
shape = [2, 3, 4, 5]
scale_shape = [3]
data_layout = "NCHW"
# initialize the ground-truth
np.random.seed(123)
x = np.random.random_sample(shape).astype(np.float32)
scale = np.random.random_sample(scale_shape).astype(np.float32)
bias = np.random.random_sample(scale_shape).astype(np.float32)
mean, variance = self.set_mean_variance(scale_shape, x, data_layout)
y_grad = np.random.random_sample(shape).astype(np.float32)
y, mean_out, variance_out, saved_mean, saved_variance, x_grad, scale_grad, bias_grad = self.ref_forward_backward(
x, y_grad, scale, bias, mean, variance, self.epsilon, self.momentum,
shape, data_layout)
var_dict = locals()
var_dict['y@GRAD'] = y_grad
var_dict['x@GRAD'] = x_grad
var_dict['scale@GRAD'] = scale_grad
var_dict['bias@GRAD'] = bias_grad
check_if_mkldnn_batchnorm_primitives_exist_in_bwd(self, var_dict, place,
shape, data_layout)
class TestMKLDNNBatchNormOpInference(TestBatchNormOpInference): class TestMKLDNNBatchNormOpInference(TestBatchNormOpInference):
def init_kernel_type(self): def init_kernel_type(self):
self.use_mkldnn = True self.use_mkldnn = True
...@@ -50,7 +81,6 @@ class TestMKLDNNBatchNormOpInference(TestBatchNormOpInference): ...@@ -50,7 +81,6 @@ class TestMKLDNNBatchNormOpInference(TestBatchNormOpInference):
def test_check_output(self): def test_check_output(self):
place = core.CPUPlace() place = core.CPUPlace()
data_format = "NCHW" data_format = "NCHW"
self.check_with_place(place, data_format, self.dtype, [2, 3, 4, 5]) self.check_with_place(place, data_format, self.dtype, [2, 3, 4, 5])
...@@ -62,7 +92,6 @@ class TestMKLDNNBatchNormOpWithReluInference(TestBatchNormOpInference): ...@@ -62,7 +92,6 @@ class TestMKLDNNBatchNormOpWithReluInference(TestBatchNormOpInference):
def test_check_output(self): def test_check_output(self):
place = core.CPUPlace() place = core.CPUPlace()
data_format = "NCHW" data_format = "NCHW"
self.check_with_place(place, data_format, self.dtype, [2, 3, 4, 5]) self.check_with_place(place, data_format, self.dtype, [2, 3, 4, 5])
......
...@@ -19,7 +19,22 @@ import unittest ...@@ -19,7 +19,22 @@ import unittest
from paddle.fluid.tests.unittests.test_gaussian_random_op import TestGaussianRandomOp from paddle.fluid.tests.unittests.test_gaussian_random_op import TestGaussianRandomOp
class TestMKLDNN(TestGaussianRandomOp): class TestMKLDNNGaussianRandomOpSeed10(TestGaussianRandomOp):
def init_kernel_type(self):
self.use_mkldnn = True
class TestMKLDNNGaussianRandomOpSeed0(TestGaussianRandomOp):
def setUp(self):
TestGaussianRandomOp.setUp(self)
self.attrs = {
"shape": [1000, 784],
"mean": .0,
"std": 1.,
"seed": 0,
"use_mkldnn": self.use_mkldnn
}
def init_kernel_type(self): def init_kernel_type(self):
self.use_mkldnn = True self.use_mkldnn = True
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册