未验证 提交 f551d9fe 编写于 作者: C Chen Weihang 提交者: GitHub

[Phi] Fix depthwise conv yaml error (#43379)

* fix depthwise conv yaml error

* fix depthwise conv double grad error
上级 b4a93884
...@@ -663,13 +663,13 @@ void ConvCudnnGradGradKernel( ...@@ -663,13 +663,13 @@ void ConvCudnnGradGradKernel(
} }
template <typename T, typename Context> template <typename T, typename Context>
void DepthwiseConvCudnnGradGradKernel( void DepthwiseConvDoubleGradGPUDNNKernel(
const Context& ctx, const Context& ctx,
const paddle::optional<DenseTensor>& input_grad_grad,
const paddle::optional<DenseTensor>& filter_grad_grad,
const DenseTensor& out_grad,
const DenseTensor& input, const DenseTensor& input,
const DenseTensor& filter, const DenseTensor& filter,
const DenseTensor& out_grad,
const paddle::optional<DenseTensor>& input_grad_grad,
const paddle::optional<DenseTensor>& filter_grad_grad,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings_t, const std::vector<int>& paddings_t,
const std::string& padding_algorithm, const std::string& padding_algorithm,
...@@ -680,9 +680,9 @@ void DepthwiseConvCudnnGradGradKernel( ...@@ -680,9 +680,9 @@ void DepthwiseConvCudnnGradGradKernel(
int workspace_size_MB, int workspace_size_MB,
bool exhaustive_search_t, bool exhaustive_search_t,
bool fuse_relu, bool fuse_relu,
DenseTensor* out_grad_grad,
DenseTensor* input_grad, DenseTensor* input_grad,
DenseTensor* filter_grad) { DenseTensor* filter_grad,
DenseTensor* out_grad_grad) {
ConvCudnnGradGradKernel<T>(ctx, ConvCudnnGradGradKernel<T>(ctx,
input, input,
filter, filter,
...@@ -763,7 +763,7 @@ PD_REGISTER_KERNEL(conv3d_grad_grad, ...@@ -763,7 +763,7 @@ PD_REGISTER_KERNEL(conv3d_grad_grad,
PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad, PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad,
GPU, GPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::DepthwiseConvCudnnGradGradKernel, phi::DepthwiseConvDoubleGradGPUDNNKernel,
float, float,
phi::dtype::float16) {} phi::dtype::float16) {}
#else #else
...@@ -789,7 +789,7 @@ PD_REGISTER_KERNEL(conv3d_grad_grad, ...@@ -789,7 +789,7 @@ PD_REGISTER_KERNEL(conv3d_grad_grad,
PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad, PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad,
GPU, GPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::DepthwiseConvCudnnGradGradKernel, phi::DepthwiseConvDoubleGradGPUDNNKernel,
float, float,
double, double,
phi::dtype::float16, phi::dtype::float16,
...@@ -816,7 +816,7 @@ PD_REGISTER_KERNEL(conv3d_grad_grad, ...@@ -816,7 +816,7 @@ PD_REGISTER_KERNEL(conv3d_grad_grad,
PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad, PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad,
GPU, GPU,
ALL_LAYOUT, ALL_LAYOUT,
phi::DepthwiseConvCudnnGradGradKernel, phi::DepthwiseConvDoubleGradGPUDNNKernel,
float, float,
double, double,
phi::dtype::float16) {} phi::dtype::float16) {}
......
...@@ -53,7 +53,7 @@ KernelSignature DepthwiseConv2dGradOpArgumentMapping( ...@@ -53,7 +53,7 @@ KernelSignature DepthwiseConv2dGradOpArgumentMapping(
KernelSignature DepthwiseConv2dDoubleGradOpArgumentMapping( KernelSignature DepthwiseConv2dDoubleGradOpArgumentMapping(
const ArgumentMappingContext& ctx) { const ArgumentMappingContext& ctx) {
return KernelSignature("depthwise_conv2d_grad_grad", return KernelSignature("depthwise_conv2d_grad_grad",
{"DDInput", "DDFilter", "DOutput", "Input", "Filter"}, {"Input", "Filter", "DOutput", "DDInput", "DDFilter"},
{"strides", {"strides",
"paddings", "paddings",
"padding_algorithm", "padding_algorithm",
...@@ -64,7 +64,7 @@ KernelSignature DepthwiseConv2dDoubleGradOpArgumentMapping( ...@@ -64,7 +64,7 @@ KernelSignature DepthwiseConv2dDoubleGradOpArgumentMapping(
"workspace_size_MB", "workspace_size_MB",
"exhaustive_search", "exhaustive_search",
"fuse_relu_before_depthwise_conv"}, "fuse_relu_before_depthwise_conv"},
{"DDOutput", "DInput", "DFilter"}); {"DInput", "DFilter", "DDOutput"});
} }
} // namespace phi } // namespace phi
......
...@@ -143,7 +143,7 @@ def _conv_nd(x, ...@@ -143,7 +143,7 @@ def _conv_nd(x,
if in_dygraph_mode() and op_type == "depthwise_conv2d": if in_dygraph_mode() and op_type == "depthwise_conv2d":
pre_bias = _C_ops.final_state_depthwise_conv2d( pre_bias = _C_ops.final_state_depthwise_conv2d(
x, weight, stride, padding, padding_algorithm, groups, dilation, x, weight, stride, padding, padding_algorithm, groups, dilation,
data_format, False, -1, False, False) data_format, False, -1, False, False, use_cudnn)
if bias is not None: if bias is not None:
channel_dim = channel_dim + len( channel_dim = channel_dim + len(
x.shape) if channel_dim < 0 else channel_dim x.shape) if channel_dim < 0 else channel_dim
......
...@@ -499,15 +499,16 @@ ...@@ -499,15 +499,16 @@
backward : deformable_conv_grad backward : deformable_conv_grad
- api : depthwise_conv2d - api : depthwise_conv2d
args : (Tensor x, Tensor filter, int[] strides, int[] paddings, str padding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search, bool fuse_relu) args : (Tensor x, Tensor filter, int[] strides, int[] paddings, str padding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search, bool fuse_relu, bool use_gpudnn)
output : Tensor(out) output : Tensor(out)
invoke : conv2d_impl(x, filter, strides, paddings, padding_algorithm, groups, dilations, data_format, use_addto, workspace_size_MB, exhaustive_search) infer_meta :
func : ConvInferMeta
param : [x, filter, strides, paddings, padding_algorithm, groups, dilations, data_format, use_addto, workspace_size_MB, exhaustive_search]
kernel :
func : depthwise_conv2d
param : [x, filter, strides, paddings, padding_algorithm, groups, dilations, data_format, use_addto, workspace_size_MB, exhaustive_search, fuse_relu]
use_gpudnn : use_gpudnn
backward : depthwise_conv2d_grad backward : depthwise_conv2d_grad
# infer_meta :
# func : ConvTransposeInferMeta
# prams: [x, filter, strides, paddings, padding_algorithm, groups, dilations, data_format, use_addto, workspace_size_MB, exhaustive_search]
# kernel :
# func : depthwise_conv2d
- api : depthwise_conv2d_transpose - api : depthwise_conv2d_transpose
args : (Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) args : (Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
......
...@@ -495,22 +495,27 @@ ...@@ -495,22 +495,27 @@
optional : mask optional : mask
- backward_api : depthwise_conv2d_grad - backward_api : depthwise_conv2d_grad
forward : depthwise_conv2d (Tensor input, Tensor filter, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search, bool fuse_relu) -> Tensor(out) forward : depthwise_conv2d (Tensor input, Tensor filter, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search, bool fuse_relu, bool use_gpudnn) -> Tensor(out)
args : (Tensor input, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search, bool fuse_relu) args : (Tensor input, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search, bool fuse_relu, bool use_gpudnn)
output : Tensor(input_grad), Tensor(filter_grad) output : Tensor(input_grad), Tensor(filter_grad)
invoke : conv2d_grad_impl(input, filter, out_grad, strides, paddings, paddding_algorithm, groups, dilations, data_format, use_addto, workspace_size_MB, exhaustive_search, input_grad, filter_grad) infer_meta :
func : GeneralBinaryGradInferMeta
param : [input, filter]
kernel :
func : depthwise_conv2d_grad
param : [input, filter, out_grad, strides, paddings, paddding_algorithm, groups, dilations, data_format, use_addto, workspace_size_MB, exhaustive_search, fuse_relu]
use_gpudnn : use_gpudnn
backward : depthwise_conv2d_grad_grad backward : depthwise_conv2d_grad_grad
- backward_api : depthwise_conv2d_grad_grad - backward_api : depthwise_conv2d_grad_grad
forward : depthwise_conv2d_grad (Tensor input, Tensor filter, Tensor grad_out, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search, bool fuse_relu) -> Tensor(grad_input), Tensor(grad_filter) forward : depthwise_conv2d_grad (Tensor input, Tensor filter, Tensor grad_out, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search, bool fuse_relu, bool use_gpudnn) -> Tensor(grad_input), Tensor(grad_filter)
args : (Tensor input, Tensor filter, Tensor grad_out, Tensor grad_input_grad, Tensor grad_filter_grad, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search) args : (Tensor input, Tensor filter, Tensor grad_out, Tensor grad_input_grad, Tensor grad_filter_grad, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search, bool fuse_relu)
output : Tensor(input_grad), Tensor(filter_grad), Tensor(grad_out_grad) output : Tensor(input_grad), Tensor(filter_grad), Tensor(grad_out_grad)
infer_meta : infer_meta :
func : GeneralTernaryGradInferMeta func : GeneralTernaryGradInferMeta
param: [input, filter, grad_out] param: [input, filter, grad_out]
kernel : kernel :
func : conv2d_grad_grad func : depthwise_conv2d_grad_grad
use_gpudnn : true
optional : grad_input_grad, grad_filter_grad optional : grad_input_grad, grad_filter_grad
- backward_api : depthwise_conv2d_transpose_grad - backward_api : depthwise_conv2d_transpose_grad
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册