提交 21156b8d 编写于 作者: L lidanqing 提交者: ceci3

MKLDNN: Add UT for conv_transpose_mkldnn op. (#16030)

* MKLDNN: Add UT for conv_transpose_mkldnn op.
test=develop

* MKLDNN: Add fuse_bias check UT for conv_transpose_mkldnn op.
test=develop
上级 b1a49e87
...@@ -127,6 +127,12 @@ void Conv2DTransposeOpMaker::Make() { ...@@ -127,6 +127,12 @@ void Conv2DTransposeOpMaker::Make() {
"output feature channels," "output feature channels,"
"H is the height of the filter, and W is the width of the filter. " "H is the height of the filter, and W is the width of the filter. "
"We enforce groups number == 1 in the convolution transpose scenario."); "We enforce groups number == 1 in the convolution transpose scenario.");
AddInput("Bias",
"(Tensor) Bias to be added to each output of filter application."
"The format of output tensor is X (one-dimensional) of size equal"
"to the number of output channels. Only used with MKL-DNN.")
.AsDispensable();
AddOutput("Output", AddOutput("Output",
"(Tensor) The output tensor of convolution transpose operator. " "(Tensor) The output tensor of convolution transpose operator. "
"The format of output tensor is also NCHW."); "The format of output tensor is also NCHW.");
......
...@@ -15,36 +15,22 @@ ...@@ -15,36 +15,22 @@
from __future__ import print_function from __future__ import print_function
import unittest import unittest
import numpy as np
import paddle.fluid.core as core
from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.test_conv2d_transpose_op import TestConv2dTransposeOp, TestWithPad, TestWithStride from paddle.fluid.tests.unittests.test_conv2d_transpose_op import conv2dtranspose_forward_naive, TestConv2dTransposeOp
class TestMKLDNN(TestConv2dTransposeOp): def conv2d_bias_naive(out, bias):
def init_op_type(self): _, out_c, _, _ = out.shape
self.is_test = True
self.use_mkldnn = True
self.data_format = "NCHW"
self.op_type = "conv2d_transpose"
self._cpu_only = True
def test_check_grad(self):
return
def test_check_grad_no_input(self): for l in range(out_c):
return out[:, l, :, :] = out[:, l, :, :] + bias[l]
return out
def test_check_grad_no_filter(self):
return
class TestMKLDNNWithPad(TestWithPad): class TestConv2dTransposeMKLDNNOp(TestConv2dTransposeOp):
def init_op_type(self):
self.is_test = True
self.use_mkldnn = True
self.data_format = "NCHW"
self.op_type = "conv2d_transpose"
self._cpu_only = True
def test_check_grad(self): def test_check_grad(self):
return return
...@@ -54,24 +40,64 @@ class TestMKLDNNWithPad(TestWithPad): ...@@ -54,24 +40,64 @@ class TestMKLDNNWithPad(TestWithPad):
def test_check_grad_no_filter(self): def test_check_grad_no_filter(self):
return return
class TestMKLDNNWithStride(TestWithStride):
def init_op_type(self): def init_op_type(self):
self.is_test = True
self.use_mkldnn = True
self.data_format = "NCHW" self.data_format = "NCHW"
self.op_type = "conv2d_transpose" self.op_type = "conv2d_transpose"
self._cpu_only = True self._cpu_only = True
def test_check_grad(self): def init_test_case(self):
return self.use_mkldnn = True
self.is_test = True
def test_check_grad_no_input(self): self.pad = [0, 0]
return self.fuse_bias = False
self.bias_size = None
def test_check_grad_no_filter(self): self.fuse_relu = False
return self.stride = [1, 1]
self.dilations = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
if __name__ == '__main__': f_c = self.input_size[1]
unittest.main() self.filter_size = [f_c, 6, 3, 3]
self.groups = 1
def setUp(self):
TestConv2dTransposeOp.setUp(self)
output = self.outputs['Output']
if self.fuse_bias and self.bias_size is not None:
bias = np.random.random(self.bias_size).astype(self.dtype)
output = conv2d_bias_naive(output, bias)
output = output.astype(self.dtype)
self.attrs['fuse_bias'] = self.fuse_bias
self.inputs['Bias'] = OpTest.np_dtype_to_fluid_dtype(bias)
if self.fuse_relu:
output = np.maximum(output, 0).astype(self.dtype)
self.attrs['fuse_bias'] = self.fuse_bias
self.attrs['fuse_relu'] = self.fuse_relu
self.outputs['Output'] = output
class TestMKLDNNFuseBias(TestConv2dTransposeMKLDNNOp):
def init_test_case(self):
TestConv2dTransposeMKLDNNOp.init_test_case(self)
self.pad = [1, 1]
self.fuse_bias = True
self.bias_size = [6]
class TestMKLDNNWithPad(TestConv2dTransposeMKLDNNOp):
def init_test_case(self):
TestConv2dTransposeMKLDNNOp.init_test_case(self)
self.pad = [1, 1]
self.input_size = [2, 3, 10, 10]
class TestMKLDNNWithStride(TestConv2dTransposeMKLDNNOp):
def init_test_case(self):
TestConv2dTransposeMKLDNNOp.init_test_case(self)
self.pad = [1, 1]
self.stride = [2, 2]
self.input_size = [2, 3, 6, 6] # NCHW
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册