未验证 提交 7c304580 编写于 作者: H HongyuJia 提交者: GitHub

[Opt depthwise_conv2d] Simplify depthwise_conv2d use_cudnn attribute (#48010)

* simplify depthwise_conv2d phi kernel selection

* fix depthwise_conv2d
上级 8e6315e4
......@@ -391,7 +391,7 @@
optional : mask
- backward_op : depthwise_conv2d_double_grad
forward : depthwise_conv2d_grad (Tensor input, Tensor filter, Tensor grad_out, int[] strides, int[] paddings, str padding_algorithm, int groups, int[] dilations, str data_format, bool use_gpudnn) -> Tensor(grad_input), Tensor(grad_filter)
forward : depthwise_conv2d_grad (Tensor input, Tensor filter, Tensor grad_out, int[] strides, int[] paddings, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(grad_input), Tensor(grad_filter)
args : (Tensor input, Tensor filter, Tensor grad_out, Tensor grad_input_grad, Tensor grad_filter_grad, int[] strides, int[] paddings, str padding_algorithm, int groups, int[] dilations, str data_format)
output : Tensor(input_grad), Tensor(filter_grad), Tensor(grad_out_grad)
infer_meta :
......@@ -402,8 +402,8 @@
optional : grad_input_grad, grad_filter_grad
- backward_op : depthwise_conv2d_grad
forward : depthwise_conv2d (Tensor input, Tensor filter, int[] strides, int[] paddings, str padding_algorithm, int groups, int[] dilations, str data_format, bool use_gpudnn) -> Tensor(out)
args : (Tensor input, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, str padding_algorithm, int groups, int[] dilations, str data_format, bool use_gpudnn)
forward : depthwise_conv2d (Tensor input, Tensor filter, int[] strides, int[] paddings, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(out)
args : (Tensor input, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, str padding_algorithm, int groups, int[] dilations, str data_format)
output : Tensor(input_grad), Tensor(filter_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
......@@ -411,7 +411,7 @@
kernel :
func : depthwise_conv2d_grad
param : [input, filter, out_grad, strides, paddings, padding_algorithm, groups, dilations, data_format]
use_gpudnn : use_gpudnn
use_gpudnn : True
backward : depthwise_conv2d_double_grad
- backward_op : depthwise_conv2d_transpose_grad
......
......@@ -541,7 +541,7 @@
backward : deformable_conv_grad
- op : depthwise_conv2d
args : (Tensor x, Tensor filter, int[] strides, int[] paddings, str padding_algorithm, int groups, int[] dilations, str data_format, bool use_gpudnn)
args : (Tensor x, Tensor filter, int[] strides, int[] paddings, str padding_algorithm, int groups, int[] dilations, str data_format)
output : Tensor(out)
infer_meta :
func : DepthwiseConvInferMeta
......@@ -549,7 +549,7 @@
kernel :
func : depthwise_conv2d
param : [x, filter, strides, paddings, padding_algorithm, groups, dilations, data_format]
use_gpudnn : use_gpudnn
use_gpudnn : true
backward : depthwise_conv2d_grad
- op : depthwise_conv2d_transpose
......
......@@ -172,7 +172,6 @@ def _conv_nd(
groups,
dilation,
data_format,
use_cudnn,
)
if bias is not None:
channel_dim = (
......@@ -484,7 +483,7 @@ def conv1d(
conv2d_data_format,
)
else:
out = getattr(_C_ops, l_type)(
out = _C_ops.depthwise_conv2d(
x,
weight,
stride,
......@@ -497,7 +496,6 @@ def conv1d(
-1,
False,
False,
use_cudnn,
)
if bias is not None:
out = nn.elementwise_add(out, bias, axis=channel_dim)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册