Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
1eb96eec
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
1eb96eec
编写于
3月 21, 2022
作者:
F
From00
提交者:
GitHub
3月 21, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Move conv-transpose OPs to phi (#40675)
* Move conv-transpose OPs to phi * Fix CI errors * Fix CI errors
上级
b77e20ac
变更
24
隐藏空白更改
内联
并排
Showing
24 changed file
with
3294 addition
and
2275 deletion
+3294
-2275
paddle/fluid/framework/ir/mkldnn/mkldnn_conv_bn_fuse_pass_tester.cc
...id/framework/ir/mkldnn/mkldnn_conv_bn_fuse_pass_tester.cc
+1
-1
paddle/fluid/inference/tensorrt/convert/test_conv2d_op.cc
paddle/fluid/inference/tensorrt/convert/test_conv2d_op.cc
+1
-1
paddle/fluid/operators/conv_transpose_cudnn_op.cu
paddle/fluid/operators/conv_transpose_cudnn_op.cu
+0
-1286
paddle/fluid/operators/conv_transpose_op.cc
paddle/fluid/operators/conv_transpose_op.cc
+45
-231
paddle/fluid/operators/conv_transpose_op.cu
paddle/fluid/operators/conv_transpose_op.cu
+0
-185
paddle/fluid/operators/conv_transpose_op.h
paddle/fluid/operators/conv_transpose_op.h
+5
-518
paddle/fluid/operators/conv_transpose_op_npu.cc
paddle/fluid/operators/conv_transpose_op_npu.cc
+8
-4
paddle/fluid/operators/conv_transpose_op_xpu.cc
paddle/fluid/operators/conv_transpose_op_xpu.cc
+11
-4
paddle/phi/infermeta/backward.cc
paddle/phi/infermeta/backward.cc
+39
-0
paddle/phi/infermeta/backward.h
paddle/phi/infermeta/backward.h
+31
-0
paddle/phi/infermeta/binary.cc
paddle/phi/infermeta/binary.cc
+237
-45
paddle/phi/infermeta/binary.h
paddle/phi/infermeta/binary.h
+13
-0
paddle/phi/kernels/conv_transpose_grad_kernel.h
paddle/phi/kernels/conv_transpose_grad_kernel.h
+90
-0
paddle/phi/kernels/conv_transpose_kernel.h
paddle/phi/kernels/conv_transpose_kernel.h
+65
-0
paddle/phi/kernels/cpu/conv_transpose_grad_kernel.cc
paddle/phi/kernels/cpu/conv_transpose_grad_kernel.cc
+70
-0
paddle/phi/kernels/cpu/conv_transpose_kernel.cc
paddle/phi/kernels/cpu/conv_transpose_kernel.cc
+66
-0
paddle/phi/kernels/funcs/slice.h
paddle/phi/kernels/funcs/slice.h
+51
-0
paddle/phi/kernels/gpu/conv_transpose_grad_kernel.cu
paddle/phi/kernels/gpu/conv_transpose_grad_kernel.cu
+157
-0
paddle/phi/kernels/gpu/conv_transpose_kernel.cu
paddle/phi/kernels/gpu/conv_transpose_kernel.cu
+118
-0
paddle/phi/kernels/gpudnn/conv_transpose_grad_kernel.cu
paddle/phi/kernels/gpudnn/conv_transpose_grad_kernel.cu
+1122
-0
paddle/phi/kernels/gpudnn/conv_transpose_kernel.cu
paddle/phi/kernels/gpudnn/conv_transpose_kernel.cu
+381
-0
paddle/phi/kernels/impl/conv_transpose_grad_kernel_impl.h
paddle/phi/kernels/impl/conv_transpose_grad_kernel_impl.h
+364
-0
paddle/phi/kernels/impl/conv_transpose_kernel_impl.h
paddle/phi/kernels/impl/conv_transpose_kernel_impl.h
+278
-0
paddle/phi/ops/compat/conv_transpose_sig.cc
paddle/phi/ops/compat/conv_transpose_sig.cc
+141
-0
未找到文件。
paddle/fluid/framework/ir/mkldnn/mkldnn_conv_bn_fuse_pass_tester.cc
浏览文件 @
1eb96eec
...
...
@@ -28,7 +28,7 @@
USE_OP_ITSELF
(
batch_norm
);
USE_OP_DEVICE_KERNEL
(
batch_norm
,
MKLDNN
);
USE_OP
(
conv2d_transpose
);
USE_OP
_ITSELF
(
conv2d_transpose
);
USE_OP_DEVICE_KERNEL
(
conv2d_transpose
,
MKLDNN
);
USE_OP_ITSELF
(
elementwise_add
);
USE_OP_DEVICE_KERNEL
(
elementwise_add
,
MKLDNN
);
...
...
paddle/fluid/inference/tensorrt/convert/test_conv2d_op.cc
浏览文件 @
1eb96eec
...
...
@@ -17,7 +17,7 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/ut_helper.h"
USE_OP_ITSELF
(
conv2d
);
USE_OP
(
conv2d_transpose
);
USE_OP
_ITSELF
(
conv2d_transpose
);
namespace
paddle
{
namespace
inference
{
...
...
paddle/fluid/operators/conv_transpose_cudnn_op.cu
已删除
100644 → 0
浏览文件 @
b77e20ac
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/memory.h"
#ifdef PADDLE_WITH_HIP
#include "paddle/fluid/operators/conv_miopen_helper.h"
#else
#include "paddle/fluid/operators/conv_cudnn_helper.h"
#endif
#include "paddle/fluid/operators/conv_transpose_op.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/padding.h"
namespace
paddle
{
namespace
operators
{
using
Tensor
=
framework
::
Tensor
;
template
<
typename
T
,
int
D
>
static
void
DataTranspose
(
const
framework
::
ExecutionContext
&
ctx
,
const
Tensor
*
input
,
Tensor
*
output
,
const
std
::
vector
<
int
>&
axis
,
int
flag
=
0
)
{
auto
&
dev_ctx
=
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>();
phi
::
funcs
::
Transpose
<
platform
::
CUDADeviceContext
,
T
,
D
>
transpose
;
auto
in_dims
=
input
->
dims
();
std
::
vector
<
int64_t
>
input_transpose_vec
;
for
(
size_t
i
=
0
;
i
<
axis
.
size
();
++
i
)
{
if
(
flag
==
0
)
input_transpose_vec
.
push_back
(
in_dims
[
axis
[
i
]]);
else
input_transpose_vec
.
push_back
(
in_dims
[
i
]);
}
framework
::
DDim
input_transpose_dims
(
phi
::
make_ddim
(
input_transpose_vec
));
output
->
mutable_data
<
T
>
(
input_transpose_dims
,
ctx
.
GetPlace
());
transpose
(
dev_ctx
,
*
input
,
output
,
axis
);
}
template
<
typename
T
>
class
CUDNNConvTransposeOpKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_EQ
(
platform
::
is_gpu_place
(
ctx
.
GetPlace
()),
true
,
paddle
::
platform
::
errors
::
PreconditionNotMet
(
"It must use CUDAPlace."
));
auto
*
input
=
ctx
.
Input
<
Tensor
>
(
"Input"
);
auto
*
filter
=
ctx
.
Input
<
Tensor
>
(
"Filter"
);
auto
*
output
=
ctx
.
Output
<
Tensor
>
(
"Output"
);
std
::
vector
<
int
>
strides
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"strides"
);
std
::
vector
<
int
>
paddings
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"paddings"
);
std
::
string
padding_algorithm
=
ctx
.
Attr
<
std
::
string
>
(
"padding_algorithm"
);
// cudnn v5 does not support dilations
std
::
vector
<
int
>
dilations
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"dilations"
);
int
groups
=
ctx
.
Attr
<
int
>
(
"groups"
);
const
T
*
filter_data
=
filter
->
data
<
T
>
();
const
std
::
string
data_layout_str
=
ctx
.
Attr
<
std
::
string
>
(
"data_format"
);
const
paddle
::
platform
::
DataLayout
data_layout
=
(
data_layout_str
!=
"NHWC"
?
platform
::
DataLayout
::
kNCHW
:
platform
::
DataLayout
::
kNHWC
);
// if channel_last, transpose to channel_first
Tensor
input_transpose
;
std
::
vector
<
int
>
input_vec
=
phi
::
vectorize
<
int
>
(
input
->
dims
());
std
::
vector
<
int
>
output_vec
=
phi
::
vectorize
<
int
>
(
output
->
dims
());
if
(
data_layout
==
platform
::
DataLayout
::
kNHWC
)
{
if
(
strides
.
size
()
==
2U
)
{
std
::
vector
<
int
>
axis
=
{
0
,
3
,
1
,
2
};
for
(
size_t
i
=
0
;
i
<
axis
.
size
();
++
i
)
{
input_vec
[
i
]
=
input
->
dims
()[
axis
[
i
]];
output_vec
[
i
]
=
output
->
dims
()[
axis
[
i
]];
}
DataTranspose
<
T
,
4
>
(
ctx
,
input
,
&
input_transpose
,
axis
);
}
else
if
(
strides
.
size
()
==
3U
)
{
std
::
vector
<
int
>
axis
=
{
0
,
4
,
1
,
2
,
3
};
for
(
size_t
i
=
0
;
i
<
axis
.
size
();
++
i
)
{
input_vec
[
i
]
=
input
->
dims
()[
axis
[
i
]];
output_vec
[
i
]
=
output
->
dims
()[
axis
[
i
]];
}
DataTranspose
<
T
,
5
>
(
ctx
,
input
,
&
input_transpose
,
axis
);
}
}
else
{
input_transpose
=
*
input
;
}
// update padding and dilation
auto
in_dims
=
input_transpose
.
dims
();
auto
filter_dims
=
filter
->
dims
();
framework
::
DDim
in_data_dims
;
in_data_dims
=
phi
::
slice_ddim
(
in_dims
,
2
,
in_dims
.
size
());
framework
::
DDim
filter_data_dims
=
phi
::
slice_ddim
(
filter_dims
,
2
,
filter_dims
.
size
());
std
::
vector
<
int
>
ksize
=
phi
::
vectorize
<
int
>
(
filter_data_dims
);
UpdatePaddingAndDilation
(
&
paddings
,
&
dilations
,
padding_algorithm
,
in_data_dims
,
strides
,
ksize
);
int
data_dim
=
strides
.
size
();
// 2d or 3d
bool
is_sys_pad
=
phi
::
funcs
::
IsSymmetricPadding
(
paddings
,
data_dim
);
std
::
vector
<
int
>
input_pad
(
input_transpose
.
dims
().
size
()
*
2
,
0
);
Tensor
transformed_input
;
std
::
vector
<
int
>
padding_common
(
data_dim
,
0
);
if
(
!
is_sys_pad
)
{
std
::
vector
<
int
>
padding_diff
(
data_dim
);
std
::
vector
<
int
>
new_input_shape_vec
(
data_dim
+
2
);
new_input_shape_vec
[
0
]
=
input_transpose
.
dims
()[
0
];
new_input_shape_vec
[
1
]
=
input_transpose
.
dims
()[
1
];
for
(
size_t
i
=
0
;
i
<
data_dim
;
++
i
)
{
padding_diff
[
i
]
=
std
::
abs
(
paddings
[
2
*
i
]
-
paddings
[
2
*
i
+
1
]);
padding_common
[
i
]
=
std
::
min
(
paddings
[
2
*
i
],
paddings
[
2
*
i
+
1
]);
new_input_shape_vec
[
i
+
2
]
=
input_transpose
.
dims
()[
i
+
2
]
+
padding_diff
[
i
];
input_pad
[
2
*
i
+
4
]
=
paddings
[
2
*
i
]
-
padding_common
[
i
];
input_pad
[
2
*
i
+
4
+
1
]
=
paddings
[
2
*
i
+
1
]
-
padding_common
[
i
];
}
framework
::
DDim
new_input_shape
(
phi
::
make_ddim
(
new_input_shape_vec
));
transformed_input
.
Resize
(
new_input_shape
);
auto
&
dev_ctx
=
ctx
.
template
device_context
<
paddle
::
platform
::
CUDADeviceContext
>();
transformed_input
=
ctx
.
AllocateTmpTensor
<
T
,
paddle
::
platform
::
CUDADeviceContext
>
(
new_input_shape
,
dev_ctx
);
const
int
rank
=
input_transpose
.
dims
().
size
();
T
pad_value
(
0.0
);
switch
(
rank
)
{
case
4
:
{
phi
::
funcs
::
PadFunction
<
paddle
::
platform
::
CUDADeviceContext
,
T
,
4
>
(
dev_ctx
,
input_pad
,
input_transpose
,
pad_value
,
&
transformed_input
);
}
break
;
case
5
:
{
phi
::
funcs
::
PadFunction
<
paddle
::
platform
::
CUDADeviceContext
,
T
,
5
>
(
dev_ctx
,
input_pad
,
input_transpose
,
pad_value
,
&
transformed_input
);
}
break
;
default:
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
"Op(ConvTranspose) only supports 4-D or 5-D input Tensor."
));
}
}
else
{
transformed_input
=
input_transpose
;
if
(
paddings
.
size
()
==
data_dim
)
{
for
(
size_t
i
=
0
;
i
<
data_dim
;
++
i
)
{
padding_common
[
i
]
=
paddings
[
i
];
}
}
else
{
for
(
size_t
i
=
0
;
i
<
data_dim
;
++
i
)
{
padding_common
[
i
]
=
paddings
[
2
*
i
];
}
}
}
std
::
vector
<
int64_t
>
starts
(
data_dim
,
0
);
std
::
vector
<
int64_t
>
ends
(
data_dim
,
0
);
std
::
vector
<
int64_t
>
axes
(
data_dim
,
0
);
for
(
size_t
i
=
0
;
i
<
data_dim
;
++
i
)
{
starts
[
i
]
=
input_pad
[
2
*
i
+
4
]
*
(
strides
[
i
]
+
1
);
ends
[
i
]
=
starts
[
i
]
+
output_vec
[
i
+
2
];
axes
[
i
]
=
i
+
2
;
}
const
T
*
input_data
=
transformed_input
.
data
<
T
>
();
input_vec
=
phi
::
vectorize
<
int
>
(
transformed_input
.
dims
());
std
::
vector
<
int
>
transformed_output_vec
=
output_vec
;
for
(
size_t
i
=
0
;
i
<
data_dim
;
++
i
)
{
transformed_output_vec
[
i
+
2
]
=
output_vec
[
i
+
2
]
+
(
input_pad
[
2
*
i
+
4
]
+
input_pad
[
2
*
i
+
5
])
*
strides
[
i
]
-
2
*
padding_common
[
i
]
+
paddings
[
2
*
i
]
+
paddings
[
2
*
i
+
1
];
}
Tensor
transformed_output
;
if
(
!
is_sys_pad
)
{
DDim
transformed_output_shape
(
phi
::
make_ddim
(
transformed_output_vec
));
transformed_output
.
mutable_data
<
T
>
(
transformed_output_shape
,
ctx
.
GetPlace
());
}
else
{
output
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
transformed_output
.
ShareDataWith
(
*
output
);
transformed_output
.
Resize
(
phi
::
make_ddim
(
transformed_output_vec
));
}
T
*
transformed_output_data
=
transformed_output
.
data
<
T
>
();
platform
::
DataLayout
layout
;
int
iwo_groups
=
groups
;
int
c_groups
=
1
;
#if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1)
iwo_groups
=
1
;
c_groups
=
groups
;
groups
=
1
;
#endif
if
(
strides
.
size
()
==
2U
)
{
layout
=
platform
::
DataLayout
::
kNCHW
;
}
else
{
layout
=
platform
::
DataLayout
::
kNCDHW
;
}
size_t
workspace_size
=
0
;
#ifdef PADDLE_WITH_HIP
miopenConvBwdDataAlgorithm_t
algo
{};
#else
cudnnConvolutionBwdDataAlgo_t
algo
{};
#endif
// ------------------- cudnn conv algorithm ---------------------
auto
&
dev_ctx
=
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>();
auto
handle
=
dev_ctx
.
cudnn_handle
();
auto
layout_tensor
=
GetCudnnTensorFormat
(
layout
);
bool
deterministic
=
FLAGS_cudnn_deterministic
;
auto
dtype
=
platform
::
CudnnDataType
<
T
>::
type
;
// ------------------- cudnn descriptors ---------------------
ConvArgs
args
{
&
transformed_output
,
filter
,
&
transformed_input
,
strides
,
padding_common
,
dilations
,
dtype
};
args
.
handle
=
handle
;
args
.
idesc
.
set
(
transformed_output
,
iwo_groups
);
args
.
wdesc
.
set
(
*
filter
,
layout_tensor
,
iwo_groups
);
args
.
odesc
.
set
(
transformed_input
,
iwo_groups
);
args
.
cdesc
.
set
(
dtype
,
padding_common
,
strides
,
dilations
,
platform
::
AllowTF32Cudnn
(),
c_groups
);
#ifdef PADDLE_WITH_HIP
using
search
=
SearchAlgorithm
<
miopenConvBwdDataAlgorithm_t
>
;
workspace_size
=
std
::
max
(
workspace_size
,
search
::
GetWorkspaceSize
(
args
));
algo
=
search
::
Find
<
T
>
(
args
,
false
,
deterministic
,
workspace_size
,
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>());
#else
using
search
=
SearchAlgorithm
<
cudnnConvolutionBwdDataAlgoPerf_t
>
;
algo
=
search
::
Find
<
T
>
(
args
,
false
,
deterministic
,
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>());
workspace_size
=
std
::
max
(
workspace_size
,
search
::
GetWorkspaceSize
(
args
,
algo
));
#endif
// ------------------- cudnn conv transpose forward ---------------------
int
input_offset
=
transformed_input
.
numel
()
/
transformed_input
.
dims
()[
0
]
/
groups
;
int
output_offset
=
transformed_output
.
numel
()
/
transformed_output
.
dims
()[
0
]
/
groups
;
int
filter_offset
=
filter
->
numel
()
/
groups
;
ScalingParamType
<
T
>
alpha
=
1.0
f
;
ScalingParamType
<
T
>
beta
=
0.0
f
;
auto
workspace_handle
=
dev_ctx
.
cudnn_workspace_handle
();
for
(
int
g
=
0
;
g
<
groups
;
g
++
)
{
#ifdef PADDLE_WITH_HIP
auto
cudnn_func
=
[
&
](
void
*
cudnn_workspace
)
{
PADDLE_ENFORCE_GPU_SUCCESS
(
platform
::
dynload
::
miopenConvolutionBackwardData
(
handle
,
&
alpha
,
args
.
odesc
.
desc
(),
input_data
+
input_offset
*
g
,
args
.
wdesc
.
desc
(),
filter_data
+
filter_offset
*
g
,
args
.
cdesc
.
desc
(),
algo
,
&
beta
,
args
.
idesc
.
desc
(),
transformed_output_data
+
output_offset
*
g
,
cudnn_workspace
,
workspace_size
));
};
#else // PADDLE_WITH_HIP
auto
cudnn_func
=
[
&
](
void
*
cudnn_workspace
)
{
PADDLE_ENFORCE_GPU_SUCCESS
(
platform
::
dynload
::
cudnnConvolutionBackwardData
(
handle
,
&
alpha
,
args
.
wdesc
.
desc
(),
filter_data
+
filter_offset
*
g
,
args
.
odesc
.
desc
(),
input_data
+
input_offset
*
g
,
args
.
cdesc
.
desc
(),
algo
,
cudnn_workspace
,
workspace_size
,
&
beta
,
args
.
idesc
.
desc
(),
transformed_output_data
+
output_offset
*
g
));
};
#endif // PADDLE_WITH_HIP
workspace_handle
.
RunFunc
(
cudnn_func
,
workspace_size
);
}
if
(
!
is_sys_pad
&&
strides
.
size
()
==
2U
)
{
Slice
<
paddle
::
platform
::
CUDADeviceContext
,
T
,
4
>
(
ctx
,
&
transformed_output
,
output
,
starts
,
ends
,
axes
);
}
else
if
(
!
is_sys_pad
&&
strides
.
size
()
==
3U
)
{
Slice
<
paddle
::
platform
::
CUDADeviceContext
,
T
,
5
>
(
ctx
,
&
transformed_output
,
output
,
starts
,
ends
,
axes
);
}
if
(
data_layout
==
platform
::
DataLayout
::
kNHWC
)
{
Tensor
output_transpose
;
Tensor
output_nchw
;
output_nchw
.
ShareDataWith
(
*
output
);
output_nchw
.
Resize
(
phi
::
make_ddim
(
output_vec
));
if
(
strides
.
size
()
==
2U
)
{
std
::
vector
<
int
>
axis
=
{
0
,
2
,
3
,
1
};
DataTranspose
<
T
,
4
>
(
ctx
,
&
output_nchw
,
&
output_transpose
,
axis
);
*
output
=
output_transpose
;
}
else
if
(
strides
.
size
()
==
3U
)
{
std
::
vector
<
int
>
axis
=
{
0
,
2
,
3
,
4
,
1
};
DataTranspose
<
T
,
5
>
(
ctx
,
&
output_nchw
,
&
output_transpose
,
axis
);
*
output
=
output_transpose
;
}
}
}
};
template
<
typename
T
>
class
CUDNNConvTransposeGradOpKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_EQ
(
platform
::
is_gpu_place
(
ctx
.
GetPlace
()),
true
,
paddle
::
platform
::
errors
::
PreconditionNotMet
(
"It must use CUDAPlace."
));
auto
input
=
ctx
.
Input
<
Tensor
>
(
"Input"
);
auto
filter
=
ctx
.
Input
<
Tensor
>
(
"Filter"
);
auto
output_grad
=
ctx
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Output"
));
auto
input_grad
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"Input"
));
auto
filter_grad
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"Filter"
));
const
T
*
filter_data
=
filter
->
data
<
T
>
();
std
::
vector
<
int
>
strides
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"strides"
);
std
::
vector
<
int
>
paddings
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"paddings"
);
// cudnn v5 does not support dilations
std
::
vector
<
int
>
dilations
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"dilations"
);
int
groups
=
ctx
.
Attr
<
int
>
(
"groups"
);
std
::
string
padding_algorithm
=
ctx
.
Attr
<
std
::
string
>
(
"padding_algorithm"
);
int
user_workspace_size
=
ctx
.
Attr
<
int
>
(
"workspace_size_MB"
);
const
std
::
string
data_layout_str
=
ctx
.
Attr
<
std
::
string
>
(
"data_format"
);
const
paddle
::
platform
::
DataLayout
data_layout
=
(
data_layout_str
!=
"NHWC"
?
platform
::
DataLayout
::
kNCHW
:
platform
::
DataLayout
::
kNHWC
);
// if channel_last, transpose to channel_first
Tensor
input_transpose
;
Tensor
output_grad_transpose
;
std
::
vector
<
int
>
input_vec
=
phi
::
vectorize
<
int
>
(
input
->
dims
());
std
::
vector
<
int
>
output_vec
=
phi
::
vectorize
<
int
>
(
output_grad
->
dims
());
if
(
data_layout
==
platform
::
DataLayout
::
kNHWC
)
{
if
(
strides
.
size
()
==
2U
)
{
std
::
vector
<
int
>
axis
=
{
0
,
3
,
1
,
2
};
for
(
size_t
i
=
0
;
i
<
axis
.
size
();
++
i
)
{
input_vec
[
i
]
=
input
->
dims
()[
axis
[
i
]];
output_vec
[
i
]
=
output_grad
->
dims
()[
axis
[
i
]];
}
DataTranspose
<
T
,
4
>
(
ctx
,
input
,
&
input_transpose
,
axis
);
DataTranspose
<
T
,
4
>
(
ctx
,
output_grad
,
&
output_grad_transpose
,
axis
);
}
else
if
(
strides
.
size
()
==
3U
)
{
std
::
vector
<
int
>
axis
=
{
0
,
4
,
1
,
2
,
3
};
for
(
size_t
i
=
0
;
i
<
axis
.
size
();
++
i
)
{
input_vec
[
i
]
=
input
->
dims
()[
axis
[
i
]];
output_vec
[
i
]
=
output_grad
->
dims
()[
axis
[
i
]];
}
DataTranspose
<
T
,
5
>
(
ctx
,
input
,
&
input_transpose
,
axis
);
DataTranspose
<
T
,
5
>
(
ctx
,
output_grad
,
&
output_grad_transpose
,
axis
);
}
}
else
{
input_transpose
=
*
input
;
output_grad_transpose
=
*
output_grad
;
}
// update padding and dilation
auto
in_dims
=
input_transpose
.
dims
();
auto
filter_dims
=
filter
->
dims
();
framework
::
DDim
in_data_dims
;
in_data_dims
=
phi
::
slice_ddim
(
in_dims
,
2
,
in_dims
.
size
());
framework
::
DDim
filter_data_dims
=
phi
::
slice_ddim
(
filter_dims
,
2
,
filter_dims
.
size
());
std
::
vector
<
int
>
ksize
=
phi
::
vectorize
<
int
>
(
filter_data_dims
);
UpdatePaddingAndDilation
(
&
paddings
,
&
dilations
,
padding_algorithm
,
in_data_dims
,
strides
,
ksize
);
int
data_dim
=
strides
.
size
();
// 2d or 3d
bool
is_sys_pad
=
phi
::
funcs
::
IsSymmetricPadding
(
paddings
,
data_dim
);
std
::
vector
<
int
>
input_pad
(
input_transpose
.
dims
().
size
()
*
2
,
0
);
Tensor
transformed_output_grad
;
std
::
vector
<
int
>
padding_common
(
data_dim
,
0
);
if
(
!
is_sys_pad
)
{
std
::
vector
<
int
>
padding_diff
(
data_dim
);
std
::
vector
<
int
>
new_output_grad_shape_vec
(
data_dim
+
2
);
new_output_grad_shape_vec
[
0
]
=
output_grad_transpose
.
dims
()[
0
];
new_output_grad_shape_vec
[
1
]
=
output_grad_transpose
.
dims
()[
1
];
for
(
size_t
i
=
0
;
i
<
data_dim
;
++
i
)
{
padding_diff
[
i
]
=
std
::
abs
(
paddings
[
2
*
i
]
-
paddings
[
2
*
i
+
1
]);
padding_common
[
i
]
=
std
::
min
(
paddings
[
2
*
i
],
paddings
[
2
*
i
+
1
]);
new_output_grad_shape_vec
[
i
+
2
]
=
output_grad_transpose
.
dims
()[
i
+
2
]
+
padding_diff
[
i
];
input_pad
[
2
*
i
+
4
]
=
paddings
[
2
*
i
]
-
padding_common
[
i
];
input_pad
[
2
*
i
+
4
+
1
]
=
paddings
[
2
*
i
+
1
]
-
padding_common
[
i
];
}
framework
::
DDim
new_output_grad_shape
(
phi
::
make_ddim
(
new_output_grad_shape_vec
));
transformed_output_grad
.
Resize
(
new_output_grad_shape
);
auto
&
dev_ctx
=
ctx
.
template
device_context
<
paddle
::
platform
::
CUDADeviceContext
>();
transformed_output_grad
=
ctx
.
AllocateTmpTensor
<
T
,
paddle
::
platform
::
CUDADeviceContext
>
(
new_output_grad_shape
,
dev_ctx
);
const
int
rank
=
input_transpose
.
dims
().
size
();
T
pad_value
(
0.0
);
switch
(
rank
)
{
case
4
:
{
phi
::
funcs
::
PadFunction
<
paddle
::
platform
::
CUDADeviceContext
,
T
,
4
>
(
dev_ctx
,
input_pad
,
output_grad_transpose
,
pad_value
,
&
transformed_output_grad
);
}
break
;
case
5
:
{
phi
::
funcs
::
PadFunction
<
paddle
::
platform
::
CUDADeviceContext
,
T
,
5
>
(
dev_ctx
,
input_pad
,
output_grad_transpose
,
pad_value
,
&
transformed_output_grad
);
}
break
;
default:
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
"Op(ConvTranspose) only supports 4-D or 5-D input Tensor."
));
}
}
else
{
transformed_output_grad
=
output_grad_transpose
;
if
(
paddings
.
size
()
==
data_dim
)
{
for
(
size_t
i
=
0
;
i
<
data_dim
;
++
i
)
{
padding_common
[
i
]
=
paddings
[
i
];
}
}
else
{
for
(
size_t
i
=
0
;
i
<
data_dim
;
++
i
)
{
padding_common
[
i
]
=
paddings
[
2
*
i
];
}
}
}
const
T
*
input_data
=
input_transpose
.
data
<
T
>
();
const
T
*
output_grad_data
=
transformed_output_grad
.
data
<
T
>
();
output_vec
=
phi
::
vectorize
<
int
>
(
transformed_output_grad
.
dims
());
// ------------------- cudnn descriptors ---------------------
platform
::
DataLayout
layout
;
if
(
strides
.
size
()
==
2U
)
{
layout
=
platform
::
DataLayout
::
kNCHW
;
}
else
{
layout
=
platform
::
DataLayout
::
kNCDHW
;
}
int
iwo_groups
=
groups
;
int
c_groups
=
1
;
#if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1)
iwo_groups
=
1
;
c_groups
=
groups
;
groups
=
1
;
#endif
auto
dtype
=
platform
::
CudnnDataType
<
T
>::
type
;
ConvArgs
args1
{
&
transformed_output_grad
,
filter
,
&
input_transpose
,
strides
,
padding_common
,
dilations
,
dtype
};
ConvArgs
args2
{
&
transformed_output_grad
,
filter
,
&
input_transpose
,
strides
,
padding_common
,
dilations
,
dtype
};
#ifdef PADDLE_WITH_HIP
miopenConvFwdAlgorithm_t
data_algo
{};
miopenConvBwdWeightsAlgorithm_t
filter_algo
{};
#else
cudnnConvolutionFwdAlgo_t
data_algo
{};
cudnnConvolutionBwdFilterAlgo_t
filter_algo
{};
#endif
auto
layout_tensor
=
GetCudnnTensorFormat
(
layout
);
size_t
workspace_size
=
0
;
auto
&
dev_ctx
=
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>();
auto
handle
=
dev_ctx
.
cudnn_handle
();
bool
deterministic
=
FLAGS_cudnn_deterministic
;
T
*
input_grad_data
=
nullptr
;
T
*
filter_grad_data
=
nullptr
;
if
(
input_grad
)
{
input_grad_data
=
input_grad
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
args1
.
handle
=
handle
;
args1
.
idesc
.
set
(
transformed_output_grad
,
iwo_groups
);
args1
.
wdesc
.
set
(
*
filter
,
layout_tensor
,
iwo_groups
);
args1
.
odesc
.
set
(
input_transpose
,
iwo_groups
);
args1
.
cdesc
.
set
(
dtype
,
padding_common
,
strides
,
dilations
,
platform
::
AllowTF32Cudnn
(),
c_groups
);
#ifdef PADDLE_WITH_HIP
using
search1
=
SearchAlgorithm
<
miopenConvFwdAlgorithm_t
>
;
workspace_size
=
std
::
max
(
workspace_size
,
search1
::
GetWorkspaceSize
(
args1
));
data_algo
=
search1
::
Find
<
T
>
(
args1
,
false
,
deterministic
,
workspace_size
,
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>());
#else
using
search1
=
SearchAlgorithm
<
cudnnConvolutionFwdAlgoPerf_t
>
;
data_algo
=
search1
::
Find
<
T
>
(
args1
,
false
,
deterministic
,
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>());
workspace_size
=
std
::
max
(
workspace_size
,
search1
::
GetWorkspaceSize
(
args1
,
data_algo
));
#endif
}
if
(
filter_grad
)
{
filter_grad_data
=
filter_grad
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
args2
.
handle
=
handle
;
args2
.
idesc
.
set
(
transformed_output_grad
,
iwo_groups
);
args2
.
wdesc
.
set
(
*
filter_grad
,
layout_tensor
,
iwo_groups
);
args2
.
odesc
.
set
(
input_transpose
,
iwo_groups
);
args2
.
cdesc
.
set
(
dtype
,
padding_common
,
strides
,
dilations
,
platform
::
AllowTF32Cudnn
(),
c_groups
);
#ifdef PADDLE_WITH_HIP
using
search2
=
SearchAlgorithm
<
miopenConvBwdWeightsAlgorithm_t
>
;
workspace_size
=
std
::
max
(
workspace_size
,
search2
::
GetWorkspaceSize
(
args2
));
filter_algo
=
search2
::
Find
<
T
>
(
args2
,
false
,
deterministic
,
workspace_size
,
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>());
#else
using
search2
=
SearchAlgorithm
<
cudnnConvolutionBwdFilterAlgoPerf_t
>
;
filter_algo
=
search2
::
Find
<
T
>
(
args2
,
false
,
deterministic
,
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>());
workspace_size
=
std
::
max
(
workspace_size
,
search2
::
GetWorkspaceSize
(
args2
,
filter_algo
));
#endif
}
// ------------------- cudnn conv backward data ---------------------
// FIXME(typhoonzero): template type T may not be the same as cudnn call.
int
input_offset
=
input
->
numel
()
/
input
->
dims
()[
0
]
/
groups
;
int
output_grad_offset
=
transformed_output_grad
.
numel
()
/
transformed_output_grad
.
dims
()[
0
]
/
groups
;
int
filter_offset
=
filter
->
numel
()
/
groups
;
ScalingParamType
<
T
>
alpha
=
1.0
f
;
ScalingParamType
<
T
>
beta
=
0.0
f
;
auto
workspace_handle
=
dev_ctx
.
cudnn_workspace_handle
();
if
(
input_grad
)
{
// Because beta is zero, it is unnecessary to reset input_grad.
for
(
int
g
=
0
;
g
<
groups
;
g
++
)
{
#ifdef PADDLE_WITH_HIP
auto
cudnn_func
=
[
&
](
void
*
cudnn_workspace
)
{
PADDLE_ENFORCE_GPU_SUCCESS
(
platform
::
dynload
::
miopenConvolutionForward
(
handle
,
&
alpha
,
args1
.
idesc
.
desc
(),
output_grad_data
+
output_grad_offset
*
g
,
args1
.
wdesc
.
desc
(),
filter_data
+
filter_offset
*
g
,
args1
.
cdesc
.
desc
(),
data_algo
,
&
beta
,
args1
.
odesc
.
desc
(),
input_grad_data
+
input_offset
*
g
,
cudnn_workspace
,
workspace_size
));
};
#else // PADDLE_WITH_HIP
auto
cudnn_func
=
[
&
](
void
*
cudnn_workspace
)
{
PADDLE_ENFORCE_GPU_SUCCESS
(
platform
::
dynload
::
cudnnConvolutionForward
(
handle
,
&
alpha
,
args1
.
idesc
.
desc
(),
output_grad_data
+
output_grad_offset
*
g
,
args1
.
wdesc
.
desc
(),
filter_data
+
filter_offset
*
g
,
args1
.
cdesc
.
desc
(),
data_algo
,
cudnn_workspace
,
workspace_size
,
&
beta
,
args1
.
odesc
.
desc
(),
input_grad_data
+
input_offset
*
g
));
};
#endif // PADDLE_WITH_HIP
workspace_handle
.
RunFunc
(
cudnn_func
,
workspace_size
);
}
if
(
data_layout
==
platform
::
DataLayout
::
kNHWC
)
{
Tensor
input_grad_transpose
;
Tensor
input_grad_nchw
;
input_grad_nchw
.
ShareDataWith
(
*
input_grad
);
input_grad_nchw
.
Resize
(
phi
::
make_ddim
(
input_vec
));
if
(
strides
.
size
()
==
2U
)
{
std
::
vector
<
int
>
axis
=
{
0
,
2
,
3
,
1
};
DataTranspose
<
T
,
4
>
(
ctx
,
&
input_grad_nchw
,
&
input_grad_transpose
,
axis
);
*
input_grad
=
input_grad_transpose
;
}
else
if
(
strides
.
size
()
==
3U
)
{
std
::
vector
<
int
>
axis
=
{
0
,
2
,
3
,
4
,
1
};
DataTranspose
<
T
,
5
>
(
ctx
,
&
input_grad_nchw
,
&
input_grad_transpose
,
axis
);
*
input_grad
=
input_grad_transpose
;
}
}
}
// ------------------- cudnn conv backward filter ---------------------
if
(
filter_grad
)
{
// Because beta is zero, it is unnecessary to reset filter_grad.
// Gradient with respect to the filter
for
(
int
g
=
0
;
g
<
groups
;
g
++
)
{
#ifdef PADDLE_WITH_HIP
auto
cudnn_func
=
[
&
](
void
*
cudnn_workspace
)
{
PADDLE_ENFORCE_GPU_SUCCESS
(
platform
::
dynload
::
miopenConvolutionBackwardWeights
(
handle
,
&
alpha
,
args2
.
odesc
.
desc
(),
input_data
+
input_offset
*
g
,
args2
.
idesc
.
desc
(),
output_grad_data
+
output_grad_offset
*
g
,
args2
.
cdesc
.
desc
(),
filter_algo
,
&
beta
,
args2
.
wdesc
.
desc
(),
filter_grad_data
+
filter_offset
*
g
,
cudnn_workspace
,
workspace_size
));
};
#else // PADDLE_WITH_HIP
auto
cudnn_func
=
[
&
](
void
*
cudnn_workspace
)
{
PADDLE_ENFORCE_GPU_SUCCESS
(
platform
::
dynload
::
cudnnConvolutionBackwardFilter
(
handle
,
&
alpha
,
args2
.
idesc
.
desc
(),
output_grad_data
+
output_grad_offset
*
g
,
args2
.
odesc
.
desc
(),
input_data
+
input_offset
*
g
,
args2
.
cdesc
.
desc
(),
filter_algo
,
cudnn_workspace
,
workspace_size
,
&
beta
,
args2
.
wdesc
.
desc
(),
filter_grad_data
+
filter_offset
*
g
));
};
#endif // PADDLE_WITH_HIP
workspace_handle
.
RunFunc
(
cudnn_func
,
workspace_size
);
}
}
}
};
/*
* Inputs: I, W, dO, ddI, ddW
* Outputs: ddO, dW, dI
* ddo = conv_bp_data(W, ddI) + conv_bp_data(ddW, I)
* dW = conv_bp_filter(dO, ddI)
* dI = conv(dO, ddW)
*/
template
<
typename
T
>
class
CUDNNConvTransposeDoubleGradOpKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
auto
&
dev_ctx
=
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>();
PADDLE_ENFORCE_EQ
(
platform
::
is_gpu_place
(
ctx
.
GetPlace
()),
true
,
paddle
::
platform
::
errors
::
PreconditionNotMet
(
"It must use CUDAPlace."
));
auto
X
=
ctx
.
Input
<
Tensor
>
(
"Input"
);
auto
W
=
ctx
.
Input
<
Tensor
>
(
"Filter"
);
auto
dO
=
ctx
.
Input
<
Tensor
>
(
"DOutput"
);
auto
ddX
=
ctx
.
Input
<
Tensor
>
(
"DDInput"
);
auto
ddW
=
ctx
.
Input
<
Tensor
>
(
"DDFilter"
);
auto
ddO
=
ctx
.
Output
<
Tensor
>
(
"DDOutput"
);
auto
dW
=
ctx
.
Output
<
Tensor
>
(
"DFilter"
);
auto
dX
=
ctx
.
Output
<
Tensor
>
(
"DInput"
);
if
(
ddO
)
{
ddO
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
phi
::
funcs
::
SetConstant
<
platform
::
CUDADeviceContext
,
T
>
set_zero
;
set_zero
(
dev_ctx
,
ddO
,
static_cast
<
T
>
(
0
));
}
if
(
dW
)
{
dW
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
}
if
(
dX
)
{
dX
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
}
const
T
*
dy
=
dO
->
data
<
T
>
();
const
T
*
w
=
W
->
data
<
T
>
();
const
T
*
ddx
=
nullptr
;
const
T
*
ddw
=
nullptr
;
T
*
dw
,
*
dx
,
*
ddy
;
dw
=
dx
=
ddy
=
nullptr
;
T
*
transformed_dx
=
nullptr
;
const
std
::
vector
<
int
>&
strides
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"strides"
);
std
::
vector
<
int
>
dilations
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"dilations"
);
int
groups
=
ctx
.
Attr
<
int
>
(
"groups"
);
bool
deterministic
=
FLAGS_cudnn_deterministic
;
std
::
vector
<
int
>
paddings
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"paddings"
);
std
::
string
padding_algorithm
=
ctx
.
Attr
<
std
::
string
>
(
"padding_algorithm"
);
const
std
::
string
data_format
=
ctx
.
Attr
<
std
::
string
>
(
"data_format"
);
const
bool
channel_last
=
(
data_format
==
"NHWC"
||
data_format
==
"NDHWC"
);
// transform Tensors to channel first-----------
Tensor
transformed_X_channel
(
X
->
type
());
Tensor
transformed_dO_channel
(
dO
->
type
());
Tensor
transformed_ddX_channel
(
X
->
type
());
Tensor
transformed_ddO_channel
(
dO
->
type
());
Tensor
transformed_dX_channel
(
X
->
type
());
if
(
channel_last
)
{
ResizeToChannelFirst
<
platform
::
CUDADeviceContext
,
T
>
(
ctx
,
X
,
&
transformed_X_channel
);
TransToChannelFirst
<
platform
::
CUDADeviceContext
,
T
>
(
ctx
,
X
,
&
transformed_X_channel
);
ResizeToChannelFirst
<
platform
::
CUDADeviceContext
,
T
>
(
ctx
,
dO
,
&
transformed_dO_channel
);
TransToChannelFirst
<
platform
::
CUDADeviceContext
,
T
>
(
ctx
,
dO
,
&
transformed_dO_channel
);
if
(
ddX
)
{
ResizeToChannelFirst
<
platform
::
CUDADeviceContext
,
T
>
(
ctx
,
ddX
,
&
transformed_ddX_channel
);
TransToChannelFirst
<
platform
::
CUDADeviceContext
,
T
>
(
ctx
,
ddX
,
&
transformed_ddX_channel
);
}
if
(
ddO
)
{
ResizeToChannelFirst
<
platform
::
CUDADeviceContext
,
T
>
(
ctx
,
ddO
,
&
transformed_ddO_channel
);
}
if
(
dX
)
{
ResizeToChannelFirst
<
platform
::
CUDADeviceContext
,
T
>
(
ctx
,
dX
,
&
transformed_dX_channel
);
transformed_dX_channel
.
mutable_data
<
T
>
(
ctx
.
GetPlace
());
}
}
else
{
transformed_X_channel
=
*
X
;
transformed_dO_channel
=
*
dO
;
if
(
ddX
)
{
transformed_ddX_channel
=
*
ddX
;
}
if
(
dX
)
{
transformed_dX_channel
=
*
dX
;
}
}
std
::
vector
<
int
>
output_vec
=
phi
::
vectorize
<
int
>
(
transformed_dO_channel
.
dims
());
auto
in_dims
=
transformed_X_channel
.
dims
();
auto
filter_dims
=
W
->
dims
();
framework
::
DDim
in_data_dims
=
phi
::
slice_ddim
(
in_dims
,
2
,
in_dims
.
size
());
framework
::
DDim
filter_data_dims
=
phi
::
slice_ddim
(
filter_dims
,
2
,
filter_dims
.
size
());
std
::
vector
<
int
>
ksize
=
phi
::
vectorize
<
int
>
(
filter_data_dims
);
UpdatePaddingAndDilation
(
&
paddings
,
&
dilations
,
padding_algorithm
,
in_data_dims
,
strides
,
ksize
);
int
data_dim
=
strides
.
size
();
// 2d or 3d
bool
is_sys_pad
=
phi
::
funcs
::
IsSymmetricPadding
(
paddings
,
data_dim
);
Tensor
transformed_X
(
X
->
type
());
Tensor
transformed_ddX
(
X
->
type
());
Tensor
transformed_dO
(
dO
->
type
());
std
::
vector
<
int
>
padding_common
(
data_dim
,
0
);
std
::
vector
<
int
>
input_pad
(
X
->
dims
().
size
()
*
2
,
0
);
if
(
!
is_sys_pad
)
{
// get pad
std
::
vector
<
int
>
padding_diff
(
data_dim
);
std
::
vector
<
int
>
new_input_shape_vec
(
data_dim
+
2
);
std
::
vector
<
int
>
new_output_grad_shape_vec
(
data_dim
+
2
);
new_input_shape_vec
[
0
]
=
transformed_X_channel
.
dims
()[
0
];
new_input_shape_vec
[
1
]
=
transformed_X_channel
.
dims
()[
1
];
new_output_grad_shape_vec
[
0
]
=
transformed_dO_channel
.
dims
()[
0
];
new_output_grad_shape_vec
[
1
]
=
transformed_dO_channel
.
dims
()[
1
];
for
(
size_t
i
=
0
;
i
<
data_dim
;
++
i
)
{
padding_diff
[
i
]
=
std
::
abs
(
paddings
[
2
*
i
]
-
paddings
[
2
*
i
+
1
]);
padding_common
[
i
]
=
std
::
min
(
paddings
[
2
*
i
],
paddings
[
2
*
i
+
1
]);
new_input_shape_vec
[
i
+
2
]
=
transformed_X_channel
.
dims
()[
i
+
2
]
+
padding_diff
[
i
];
new_output_grad_shape_vec
[
i
+
2
]
=
transformed_dO_channel
.
dims
()[
i
+
2
]
+
padding_diff
[
i
];
input_pad
[
2
*
i
+
4
]
=
paddings
[
2
*
i
]
-
padding_common
[
i
];
input_pad
[
2
*
i
+
4
+
1
]
=
paddings
[
2
*
i
+
1
]
-
padding_common
[
i
];
}
framework
::
DDim
new_input_shape
(
phi
::
make_ddim
(
new_input_shape_vec
));
transformed_X
.
Resize
(
new_input_shape
);
transformed_ddX
.
Resize
(
new_input_shape
);
framework
::
DDim
new_output_grad_shape
(
phi
::
make_ddim
(
new_output_grad_shape_vec
));
transformed_dO
.
Resize
(
new_output_grad_shape
);
transformed_dO
=
ctx
.
AllocateTmpTensor
<
T
,
paddle
::
platform
::
CUDADeviceContext
>
(
new_output_grad_shape
,
dev_ctx
);
transformed_X
=
ctx
.
AllocateTmpTensor
<
T
,
paddle
::
platform
::
CUDADeviceContext
>
(
new_input_shape
,
dev_ctx
);
if
(
ddX
)
{
transformed_ddX
=
ctx
.
AllocateTmpTensor
<
T
,
paddle
::
platform
::
CUDADeviceContext
>
(
new_input_shape
,
dev_ctx
);
}
// pad for input
const
int
rank
=
X
->
dims
().
size
();
T
pad_value
(
0.0
);
switch
(
rank
)
{
case
4
:
{
phi
::
funcs
::
PadFunction
<
paddle
::
platform
::
CUDADeviceContext
,
T
,
4
>
(
dev_ctx
,
input_pad
,
transformed_X_channel
,
pad_value
,
&
transformed_X
);
if
(
dO
)
{
phi
::
funcs
::
PadFunction
<
paddle
::
platform
::
CUDADeviceContext
,
T
,
4
>
(
dev_ctx
,
input_pad
,
transformed_dO_channel
,
pad_value
,
&
transformed_dO
);
}
if
(
ddX
)
{
phi
::
funcs
::
PadFunction
<
paddle
::
platform
::
CUDADeviceContext
,
T
,
4
>
(
dev_ctx
,
input_pad
,
transformed_ddX_channel
,
pad_value
,
&
transformed_ddX
);
}
}
break
;
case
5
:
{
phi
::
funcs
::
PadFunction
<
paddle
::
platform
::
CUDADeviceContext
,
T
,
5
>
(
dev_ctx
,
input_pad
,
transformed_X_channel
,
pad_value
,
&
transformed_X
);
if
(
ddX
)
{
phi
::
funcs
::
PadFunction
<
paddle
::
platform
::
CUDADeviceContext
,
T
,
5
>
(
dev_ctx
,
input_pad
,
transformed_ddX_channel
,
pad_value
,
&
transformed_ddX
);
}
}
break
;
default:
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
"ConvOp only support tensors with 4 or 5 dimensions."
));
}
}
else
{
transformed_X
=
transformed_X_channel
;
transformed_dO
=
transformed_dO_channel
;
if
(
ddX
)
{
transformed_ddX
=
transformed_ddX_channel
;
}
if
(
paddings
.
size
()
==
data_dim
)
{
for
(
size_t
i
=
0
;
i
<
data_dim
;
++
i
)
{
padding_common
[
i
]
=
paddings
[
i
];
}
}
else
{
for
(
size_t
i
=
0
;
i
<
data_dim
;
++
i
)
{
padding_common
[
i
]
=
paddings
[
2
*
i
];
}
}
}
std
::
vector
<
int64_t
>
starts
(
data_dim
,
0
);
std
::
vector
<
int64_t
>
ends
(
data_dim
,
0
);
std
::
vector
<
int64_t
>
axes
(
data_dim
,
0
);
for
(
size_t
i
=
0
;
i
<
data_dim
;
++
i
)
{
starts
[
i
]
=
input_pad
[
2
*
i
+
4
]
*
(
strides
[
i
]
+
1
);
ends
[
i
]
=
starts
[
i
]
+
output_vec
[
i
+
2
];
axes
[
i
]
=
i
+
2
;
}
std
::
vector
<
int
>
transformed_output_vec
=
output_vec
;
for
(
size_t
i
=
0
;
i
<
data_dim
;
++
i
)
{
transformed_output_vec
[
i
+
2
]
=
output_vec
[
i
+
2
]
+
(
input_pad
[
2
*
i
+
4
]
+
input_pad
[
2
*
i
+
5
])
*
strides
[
i
]
-
2
*
padding_common
[
i
]
+
paddings
[
2
*
i
]
+
paddings
[
2
*
i
+
1
];
}
if
(
!
is_sys_pad
)
{
DDim
transformed_output_shape
(
phi
::
make_ddim
(
transformed_output_vec
));
transformed_ddO_channel
.
mutable_data
<
T
>
(
transformed_output_shape
,
ctx
.
GetPlace
());
}
else
{
ddO
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
transformed_ddO_channel
=
*
ddO
;
transformed_ddO_channel
.
Resize
(
phi
::
make_ddim
(
transformed_output_vec
));
}
const
T
*
x
=
transformed_X
.
data
<
T
>
();
int
iwo_group
=
groups
;
int
c_group
=
1
;
#if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1)
iwo_group
=
1
;
c_group
=
groups
;
groups
=
1
;
#endif
auto
dtype
=
platform
::
CudnnDataType
<
T
>::
type
;
auto
handle
=
dev_ctx
.
cudnn_handle
();
ConvArgs
args1
{
&
transformed_ddO_channel
,
W
,
&
transformed_ddX
,
strides
,
padding_common
,
dilations
,
dtype
};
ConvArgs
args2
{
&
transformed_ddO_channel
,
ddW
,
&
transformed_X
,
strides
,
padding_common
,
dilations
,
dtype
};
ConvArgs
args3
{
&
transformed_dO
,
dW
,
&
transformed_ddX_channel
,
strides
,
padding_common
,
dilations
,
dtype
};
ConvArgs
args4
{
&
transformed_dO
,
ddW
,
&
transformed_dX_channel
,
strides
,
padding_common
,
dilations
,
dtype
};
#ifdef PADDLE_WITH_HIP
miopenConvBwdDataAlgorithm_t
bwd_algo1
=
static_cast
<
miopenConvBwdDataAlgorithm_t
>
(
0
);
miopenConvBwdDataAlgorithm_t
bwd_algo2
=
static_cast
<
miopenConvBwdDataAlgorithm_t
>
(
0
);
miopenConvFwdAlgorithm_t
data_algo
=
static_cast
<
miopenConvFwdAlgorithm_t
>
(
0
);
miopenConvBwdWeightsAlgorithm_t
filter_algo
=
static_cast
<
miopenConvBwdWeightsAlgorithm_t
>
(
0
);
#else
cudnnConvolutionBwdDataAlgo_t
bwd_algo1
=
static_cast
<
cudnnConvolutionBwdDataAlgo_t
>
(
0
);
cudnnConvolutionBwdDataAlgo_t
bwd_algo2
=
static_cast
<
cudnnConvolutionBwdDataAlgo_t
>
(
0
);
cudnnConvolutionFwdAlgo_t
data_algo
=
static_cast
<
cudnnConvolutionFwdAlgo_t
>
(
0
);
cudnnConvolutionBwdFilterAlgo_t
filter_algo
=
static_cast
<
cudnnConvolutionBwdFilterAlgo_t
>
(
0
);
#endif
auto
layout
=
GetCudnnTensorFormat
(
platform
::
DataLayout
::
kNCHW
);
// ddo = conv(ddI, W) + conv(I, ddW)
size_t
workspace_size
=
0
;
T
*
transformed_ddy_channel
=
nullptr
;
if
(
ddO
)
{
ddy
=
ddO
->
data
<
T
>
();
transformed_ddy_channel
=
transformed_ddO_channel
.
data
<
T
>
();
if
(
ddX
)
{
args1
.
handle
=
handle
;
args1
.
idesc
.
set
(
transformed_ddO_channel
,
iwo_group
);
args1
.
wdesc
.
set
(
*
W
,
layout
,
iwo_group
);
args1
.
odesc
.
set
(
transformed_ddX
,
iwo_group
);
args1
.
cdesc
.
set
(
dtype
,
padding_common
,
strides
,
dilations
,
platform
::
AllowTF32Cudnn
(),
c_group
);
#ifdef PADDLE_WITH_HIP
using
search1
=
SearchAlgorithm
<
miopenConvBwdDataAlgorithm_t
>
;
workspace_size
=
search1
::
GetWorkspaceSize
(
args1
);
bwd_algo1
=
search1
::
Find
<
T
>
(
args1
,
false
,
deterministic
,
workspace_size
,
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>());
#else
using
search1
=
SearchAlgorithm
<
cudnnConvolutionBwdDataAlgoPerf_t
>
;
bwd_algo1
=
search1
::
Find
<
T
>
(
args1
,
false
,
deterministic
,
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>());
workspace_size
=
search1
::
GetWorkspaceSize
(
args1
,
bwd_algo1
);
#endif
}
if
(
ddW
)
{
ddw
=
ddW
->
data
<
T
>
();
args2
.
handle
=
handle
;
args2
.
idesc
.
set
(
transformed_ddO_channel
,
iwo_group
);
args2
.
wdesc
.
set
(
*
ddW
,
layout
,
iwo_group
);
args2
.
odesc
.
set
(
transformed_X
,
iwo_group
);
args2
.
cdesc
.
set
(
dtype
,
padding_common
,
strides
,
dilations
,
platform
::
AllowTF32Cudnn
(),
c_group
);
#ifdef PADDLE_WITH_HIP
using
search2
=
SearchAlgorithm
<
miopenConvBwdDataAlgorithm_t
>
;
workspace_size
=
std
::
max
(
workspace_size
,
search2
::
GetWorkspaceSize
(
args2
));
bwd_algo2
=
search2
::
Find
<
T
>
(
args2
,
false
,
deterministic
,
workspace_size
,
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>());
#else
using
search2
=
SearchAlgorithm
<
cudnnConvolutionBwdDataAlgoPerf_t
>
;
bwd_algo2
=
search2
::
Find
<
T
>
(
args2
,
false
,
deterministic
,
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>());
workspace_size
=
std
::
max
(
workspace_size
,
search2
::
GetWorkspaceSize
(
args2
,
bwd_algo2
));
#endif
}
}
if
(
dW
&&
ddX
)
{
dw
=
dW
->
data
<
T
>
();
args3
.
handle
=
handle
;
args3
.
idesc
.
set
(
transformed_dO
,
iwo_group
);
args3
.
wdesc
.
set
(
*
dW
,
layout
,
iwo_group
);
args3
.
odesc
.
set
(
transformed_ddX_channel
,
iwo_group
);
args3
.
cdesc
.
set
(
dtype
,
padding_common
,
strides
,
dilations
,
platform
::
AllowTF32Cudnn
(),
c_group
);
#ifdef PADDLE_WITH_HIP
using
search3
=
SearchAlgorithm
<
miopenConvBwdWeightsAlgorithm_t
>
;
workspace_size
=
std
::
max
(
workspace_size
,
search3
::
GetWorkspaceSize
(
args3
));
filter_algo
=
search3
::
Find
<
T
>
(
args3
,
false
,
deterministic
,
workspace_size
,
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>());
#else
using
search3
=
SearchAlgorithm
<
cudnnConvolutionBwdFilterAlgoPerf_t
>
;
filter_algo
=
search3
::
Find
<
T
>
(
args3
,
false
,
deterministic
,
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>());
workspace_size
=
std
::
max
(
workspace_size
,
search3
::
GetWorkspaceSize
(
args3
,
filter_algo
));
#endif
}
if
(
ddW
&&
dX
)
{
transformed_dx
=
transformed_dX_channel
.
data
<
T
>
();
args4
.
handle
=
handle
;
args4
.
idesc
.
set
(
transformed_dO
,
iwo_group
);
args4
.
wdesc
.
set
(
*
ddW
,
layout
,
iwo_group
);
args4
.
odesc
.
set
(
transformed_dX_channel
,
iwo_group
);
args4
.
cdesc
.
set
(
dtype
,
padding_common
,
strides
,
dilations
,
platform
::
AllowTF32Cudnn
(),
c_group
);
#ifdef PADDLE_WITH_HIP
using
search4
=
SearchAlgorithm
<
miopenConvFwdAlgorithm_t
>
;
workspace_size
=
std
::
max
(
workspace_size
,
search4
::
GetWorkspaceSize
(
args4
));
data_algo
=
search4
::
Find
<
T
>
(
args4
,
false
,
deterministic
,
workspace_size
,
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>());
#else
using
search4
=
SearchAlgorithm
<
cudnnConvolutionFwdAlgoPerf_t
>
;
data_algo
=
search4
::
Find
<
T
>
(
args4
,
false
,
deterministic
,
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>());
workspace_size
=
std
::
max
(
workspace_size
,
search4
::
GetWorkspaceSize
(
args4
,
data_algo
));
#endif
}
int
i_n
,
i_c
,
i_d
,
i_h
,
i_w
;
GetNCDHW
(
transformed_X
.
dims
(),
platform
::
DataLayout
::
kNCHW
,
&
i_n
,
&
i_c
,
&
i_d
,
&
i_h
,
&
i_w
);
int
o_n
,
o_c
,
o_d
,
o_h
,
o_w
;
GetNCDHW
(
transformed_dO
.
dims
(),
platform
::
DataLayout
::
kNCHW
,
&
o_n
,
&
o_c
,
&
o_d
,
&
o_h
,
&
o_w
);
int
group_offset_in
=
transformed_X
.
numel
()
/
transformed_X
.
dims
()[
0
]
/
groups
;
int
group_offset_out
=
transformed_dO
.
numel
()
/
transformed_dO
.
dims
()[
0
]
/
groups
;
int
group_offset_filter
=
W
->
numel
()
/
groups
;
ScalingParamType
<
T
>
alpha
=
1.0
f
;
ScalingParamType
<
T
>
beta
=
0.0
f
;
auto
wkspace_handle
=
dev_ctx
.
cudnn_workspace_handle
();
if
(
ddO
)
{
if
(
ddX
)
{
ddx
=
transformed_ddX
.
data
<
T
>
();
for
(
int
i
=
0
;
i
<
groups
;
i
++
)
{
#ifdef PADDLE_WITH_HIP
wkspace_handle
.
RunFunc
(
[
&
](
void
*
workspace_ptr
)
{
PADDLE_ENFORCE_GPU_SUCCESS
(
platform
::
dynload
::
miopenConvolutionBackwardData
(
handle
,
&
alpha
,
args1
.
odesc
.
desc
(),
ddx
+
i
*
group_offset_in
,
args1
.
wdesc
.
desc
(),
w
+
i
*
group_offset_filter
,
args1
.
cdesc
.
desc
(),
bwd_algo1
,
&
beta
,
args1
.
idesc
.
desc
(),
transformed_ddy_channel
+
i
*
group_offset_out
,
workspace_ptr
,
workspace_size
));
},
workspace_size
);
#else // PADDLE_WITH_HIP
wkspace_handle
.
RunFunc
(
[
&
](
void
*
workspace_ptr
)
{
PADDLE_ENFORCE_GPU_SUCCESS
(
platform
::
dynload
::
cudnnConvolutionBackwardData
(
handle
,
&
alpha
,
args1
.
wdesc
.
desc
(),
w
+
i
*
group_offset_filter
,
args1
.
odesc
.
desc
(),
ddx
+
i
*
group_offset_in
,
args1
.
cdesc
.
desc
(),
bwd_algo1
,
workspace_ptr
,
workspace_size
,
&
beta
,
args1
.
idesc
.
desc
(),
transformed_ddy_channel
+
i
*
group_offset_out
));
},
workspace_size
);
#endif // PADDLE_WITH_HIP
}
}
if
(
ddW
)
{
for
(
int
i
=
0
;
i
<
groups
;
i
++
)
{
#ifdef PADDLE_WITH_HIP
// MIOPEN ONLY support beta to be 0.0f
Tensor
conv_x_ddw
(
dO
->
type
());
conv_x_ddw
.
Resize
(
transformed_ddO_channel
.
dims
());
T
*
conv_x_ddw_data
=
conv_x_ddw
.
mutable_data
<
T
>
(
ctx
.
GetPlace
());
wkspace_handle
.
RunFunc
(
[
&
](
void
*
workspace_ptr
)
{
PADDLE_ENFORCE_GPU_SUCCESS
(
platform
::
dynload
::
miopenConvolutionBackwardData
(
handle
,
&
alpha
,
args2
.
odesc
.
desc
(),
x
+
i
*
group_offset_in
,
args2
.
wdesc
.
desc
(),
ddw
+
i
*
group_offset_filter
,
args2
.
cdesc
.
desc
(),
bwd_algo2
,
&
beta
,
args2
.
idesc
.
desc
(),
conv_x_ddw_data
+
i
*
group_offset_out
,
workspace_ptr
,
workspace_size
));
},
workspace_size
);
PADDLE_ENFORCE_GPU_SUCCESS
(
platform
::
dynload
::
miopenOpTensor
(
handle
,
miopenTensorOpAdd
,
&
alpha
,
args2
.
idesc
.
desc
(),
transformed_ddy_channel
+
i
*
group_offset_out
,
&
alpha
,
args2
.
idesc
.
desc
(),
conv_x_ddw_data
+
i
*
group_offset_out
,
&
beta
,
args2
.
idesc
.
desc
(),
transformed_ddy_channel
+
i
*
group_offset_out
));
#else // PADDLE_WITH_HIP
wkspace_handle
.
RunFunc
(
[
&
](
void
*
workspace_ptr
)
{
PADDLE_ENFORCE_GPU_SUCCESS
(
platform
::
dynload
::
cudnnConvolutionBackwardData
(
handle
,
&
alpha
,
args2
.
wdesc
.
desc
(),
ddw
+
i
*
group_offset_filter
,
args2
.
odesc
.
desc
(),
x
+
i
*
group_offset_in
,
args2
.
cdesc
.
desc
(),
bwd_algo2
,
workspace_ptr
,
workspace_size
,
&
alpha
,
args2
.
idesc
.
desc
(),
transformed_ddy_channel
+
i
*
group_offset_out
));
},
workspace_size
);
#endif // PADDLE_WITH_HIP
}
}
if
((
!
is_sys_pad
)
&&
(
!
channel_last
))
{
if
(
strides
.
size
()
==
2U
)
{
Slice
<
paddle
::
platform
::
CUDADeviceContext
,
T
,
4
>
(
ctx
,
&
transformed_ddO_channel
,
ddO
,
starts
,
ends
,
axes
);
}
else
if
(
!
is_sys_pad
&&
strides
.
size
()
==
3U
)
{
Slice
<
paddle
::
platform
::
CUDADeviceContext
,
T
,
5
>
(
ctx
,
&
transformed_ddO_channel
,
ddO
,
starts
,
ends
,
axes
);
}
}
else
if
((
!
is_sys_pad
)
&&
(
channel_last
))
{
if
(
strides
.
size
()
==
2U
)
{
Slice
<
paddle
::
platform
::
CUDADeviceContext
,
T
,
4
>
(
ctx
,
&
transformed_ddO_channel
,
&
transformed_ddO_channel
,
starts
,
ends
,
axes
);
}
else
if
(
!
is_sys_pad
&&
strides
.
size
()
==
3U
)
{
Slice
<
paddle
::
platform
::
CUDADeviceContext
,
T
,
5
>
(
ctx
,
&
transformed_ddO_channel
,
&
transformed_ddO_channel
,
starts
,
ends
,
axes
);
}
TransToChannelLast
<
paddle
::
platform
::
CUDADeviceContext
,
T
>
(
ctx
,
&
transformed_ddO_channel
,
ddO
);
}
}
T
*
transformed_dy_channel
=
transformed_dO
.
data
<
T
>
();
if
(
dW
&&
ddX
)
{
ddx
=
transformed_ddX_channel
.
data
<
T
>
();
for
(
int
i
=
0
;
i
<
groups
;
i
++
)
{
#ifdef PADDLE_WITH_HIP
wkspace_handle
.
RunFunc
(
[
&
](
void
*
workspace_ptr
)
{
PADDLE_ENFORCE_GPU_SUCCESS
(
platform
::
dynload
::
miopenConvolutionBackwardWeights
(
handle
,
&
alpha
,
args3
.
odesc
.
desc
(),
ddx
+
i
*
group_offset_in
,
args3
.
idesc
.
desc
(),
transformed_dy_channel
+
i
*
group_offset_out
,
args3
.
cdesc
.
desc
(),
filter_algo
,
&
beta
,
args3
.
wdesc
.
desc
(),
dw
+
i
*
group_offset_filter
,
workspace_ptr
,
workspace_size
));
},
workspace_size
);
#else // PADDLE_WITH_HIP
wkspace_handle
.
RunFunc
(
[
&
](
void
*
workspace_ptr
)
{
PADDLE_ENFORCE_GPU_SUCCESS
(
platform
::
dynload
::
cudnnConvolutionBackwardFilter
(
handle
,
&
alpha
,
args3
.
idesc
.
desc
(),
transformed_dy_channel
+
i
*
group_offset_out
,
args3
.
odesc
.
desc
(),
ddx
+
i
*
group_offset_in
,
args3
.
cdesc
.
desc
(),
filter_algo
,
workspace_ptr
,
workspace_size
,
&
beta
,
args3
.
wdesc
.
desc
(),
dw
+
i
*
group_offset_filter
));
},
workspace_size
);
#endif // PADDLE_WITH_HIP
}
}
if
(
dX
&&
ddW
)
{
ddw
=
ddW
->
data
<
T
>
();
for
(
int
i
=
0
;
i
<
groups
;
i
++
)
{
#ifdef PADDLE_WITH_HIP
wkspace_handle
.
RunFunc
(
[
&
](
void
*
workspace_ptr
)
{
PADDLE_ENFORCE_GPU_SUCCESS
(
platform
::
dynload
::
miopenConvolutionForward
(
handle
,
&
alpha
,
args4
.
idesc
.
desc
(),
transformed_dy_channel
+
i
*
group_offset_out
,
args4
.
wdesc
.
desc
(),
ddw
+
i
*
group_offset_filter
,
args4
.
cdesc
.
desc
(),
data_algo
,
&
beta
,
args4
.
odesc
.
desc
(),
transformed_dx
+
i
*
group_offset_in
,
workspace_ptr
,
workspace_size
));
},
workspace_size
);
#else // PADDLE_WITH_HIP
wkspace_handle
.
RunFunc
(
[
&
](
void
*
workspace_ptr
)
{
PADDLE_ENFORCE_GPU_SUCCESS
(
platform
::
dynload
::
cudnnConvolutionForward
(
handle
,
&
alpha
,
args4
.
idesc
.
desc
(),
transformed_dy_channel
+
i
*
group_offset_out
,
args4
.
wdesc
.
desc
(),
ddw
+
i
*
group_offset_filter
,
args4
.
cdesc
.
desc
(),
data_algo
,
workspace_ptr
,
workspace_size
,
&
beta
,
args4
.
odesc
.
desc
(),
transformed_dx
+
i
*
group_offset_in
));
},
workspace_size
);
#endif // PADDLE_WITH_HIP
}
if
(
channel_last
)
{
TransToChannelLast
<
paddle
::
platform
::
CUDADeviceContext
,
T
>
(
ctx
,
&
transformed_dX_channel
,
dX
);
}
}
}
};
}
// namespace operators
}
// namespace paddle
namespace
ops
=
paddle
::
operators
;
namespace
plat
=
paddle
::
platform
;
#ifdef PADDLE_WITH_HIP
// MIOPEN do not support double
REGISTER_OP_KERNEL
(
conv2d_transpose
,
CUDNN
,
::
paddle
::
platform
::
CUDAPlace
,
ops
::
CUDNNConvTransposeOpKernel
<
plat
::
float16
>
,
ops
::
CUDNNConvTransposeOpKernel
<
float
>
);
REGISTER_OP_KERNEL
(
conv2d_transpose_grad
,
CUDNN
,
::
paddle
::
platform
::
CUDAPlace
,
ops
::
CUDNNConvTransposeGradOpKernel
<
plat
::
float16
>
,
ops
::
CUDNNConvTransposeGradOpKernel
<
float
>
);
REGISTER_OP_KERNEL
(
conv2d_transpose_grad_grad
,
CUDNN
,
plat
::
CUDAPlace
,
paddle
::
operators
::
CUDNNConvTransposeDoubleGradOpKernel
<
float
>
,
paddle
::
operators
::
CUDNNConvTransposeDoubleGradOpKernel
<
plat
::
float16
>
);
REGISTER_OP_KERNEL
(
conv3d_transpose
,
CUDNN
,
::
paddle
::
platform
::
CUDAPlace
,
ops
::
CUDNNConvTransposeOpKernel
<
plat
::
float16
>
,
ops
::
CUDNNConvTransposeOpKernel
<
float
>
);
REGISTER_OP_KERNEL
(
conv3d_transpose_grad
,
CUDNN
,
::
paddle
::
platform
::
CUDAPlace
,
ops
::
CUDNNConvTransposeGradOpKernel
<
plat
::
float16
>
,
ops
::
CUDNNConvTransposeGradOpKernel
<
float
>
);
#else
REGISTER_OP_KERNEL
(
conv2d_transpose
,
CUDNN
,
::
paddle
::
platform
::
CUDAPlace
,
ops
::
CUDNNConvTransposeOpKernel
<
plat
::
float16
>
,
ops
::
CUDNNConvTransposeOpKernel
<
float
>
,
ops
::
CUDNNConvTransposeOpKernel
<
double
>
);
REGISTER_OP_KERNEL
(
conv2d_transpose_grad
,
CUDNN
,
::
paddle
::
platform
::
CUDAPlace
,
ops
::
CUDNNConvTransposeGradOpKernel
<
plat
::
float16
>
,
ops
::
CUDNNConvTransposeGradOpKernel
<
float
>
,
ops
::
CUDNNConvTransposeGradOpKernel
<
double
>
);
REGISTER_OP_KERNEL
(
conv2d_transpose_grad_grad
,
CUDNN
,
plat
::
CUDAPlace
,
paddle
::
operators
::
CUDNNConvTransposeDoubleGradOpKernel
<
float
>
,
paddle
::
operators
::
CUDNNConvTransposeDoubleGradOpKernel
<
double
>
,
paddle
::
operators
::
CUDNNConvTransposeDoubleGradOpKernel
<
plat
::
float16
>
);
REGISTER_OP_KERNEL
(
conv3d_transpose
,
CUDNN
,
::
paddle
::
platform
::
CUDAPlace
,
ops
::
CUDNNConvTransposeOpKernel
<
plat
::
float16
>
,
ops
::
CUDNNConvTransposeOpKernel
<
float
>
,
ops
::
CUDNNConvTransposeOpKernel
<
double
>
);
REGISTER_OP_KERNEL
(
conv3d_transpose_grad
,
CUDNN
,
::
paddle
::
platform
::
CUDAPlace
,
ops
::
CUDNNConvTransposeGradOpKernel
<
plat
::
float16
>
,
ops
::
CUDNNConvTransposeGradOpKernel
<
float
>
,
ops
::
CUDNNConvTransposeGradOpKernel
<
double
>
);
#endif
paddle/fluid/operators/conv_transpose_op.cc
浏览文件 @
1eb96eec
...
...
@@ -13,13 +13,17 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/conv_transpose_op.h"
#include <memory>
#include <string>
#include <vector>
#include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/platform/cudnn_workspace_helper.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/binary.h"
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
...
...
@@ -29,165 +33,6 @@ namespace operators {
using
DataLayout
=
framework
::
DataLayout
;
void
ConvTransposeOp
::
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
{
OP_INOUT_CHECK
(
ctx
->
HasInput
(
"Input"
),
"Input"
,
"Input"
,
"ConvTranspose"
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
"Filter"
),
"Input"
,
"Filter"
,
"ConvTranspose"
);
OP_INOUT_CHECK
(
ctx
->
HasOutput
(
"Output"
),
"Output"
,
"Output"
,
"ConvTranspose"
);
auto
in_dims
=
ctx
->
GetInputDim
(
"Input"
);
auto
filter_dims
=
ctx
->
GetInputDim
(
"Filter"
);
std
::
vector
<
int
>
output_size
=
ctx
->
Attrs
().
Get
<
std
::
vector
<
int
>>
(
"output_size"
);
std
::
vector
<
int
>
output_padding
=
ctx
->
Attrs
().
Get
<
std
::
vector
<
int
>>
(
"output_padding"
);
std
::
vector
<
int
>
strides
=
ctx
->
Attrs
().
Get
<
std
::
vector
<
int
>>
(
"strides"
);
std
::
vector
<
int
>
paddings
=
ctx
->
Attrs
().
Get
<
std
::
vector
<
int
>>
(
"paddings"
);
std
::
vector
<
int
>
dilations
=
ctx
->
Attrs
().
Get
<
std
::
vector
<
int
>>
(
"dilations"
);
int
groups
=
ctx
->
Attrs
().
Get
<
int
>
(
"groups"
);
std
::
string
padding_algorithm
=
ctx
->
Attrs
().
Get
<
std
::
string
>
(
"padding_algorithm"
);
const
std
::
string
data_layout_str
=
ctx
->
Attrs
().
Get
<
std
::
string
>
(
"data_format"
);
const
DataLayout
data_layout
=
ctx
->
IsRunMKLDNNKernel
()
?
DataLayout
::
kNCHW
:
framework
::
StringToDataLayout
(
data_layout_str
);
PADDLE_ENFORCE_EQ
(
in_dims
.
size
()
==
4
||
in_dims
.
size
()
==
5
,
true
,
platform
::
errors
::
InvalidArgument
(
"Input of Op(conv_transpose) should be 4-D or "
"5-D Tensor. But received: %u-D Tensor, "
"the shape of input is [%s]"
,
in_dims
.
size
(),
in_dims
));
PADDLE_ENFORCE_EQ
(
in_dims
.
size
(),
filter_dims
.
size
(),
platform
::
errors
::
InvalidArgument
(
"The input's dimension size and filter's dimension size of "
"Op (conv_transpose) should be equal. But received: the shape of "
"input is [%s], the dimension size of input is [%d], the shape "
"of filter is [%s], the dimension size of filter is [%d]. "
,
in_dims
,
in_dims
.
size
(),
filter_dims
,
filter_dims
.
size
()));
int
stride_size
=
strides
.
size
();
for
(
int
i
=
0
;
i
<
stride_size
;
++
i
)
{
PADDLE_ENFORCE_GT
(
strides
[
i
],
0
,
platform
::
errors
::
InvalidArgument
(
"The stride of Op(Conv) should be larget than 0, but received "
"stride is %d."
,
strides
[
i
]));
}
int
in_sub_stride_size
=
in_dims
.
size
()
-
stride_size
;
PADDLE_ENFORCE_EQ
(
in_dims
.
size
()
-
strides
.
size
(),
2U
,
platform
::
errors
::
InvalidArgument
(
"The input's dimension size minus Attr(stride)'s size must "
"be euqal to 2 for Op(conv_transpose). But received: [%d], the "
"input's dimension size is [%d], the shape of input "
"is [%s], the Attr(stride)'s size is [%d]."
,
in_sub_stride_size
,
in_dims
.
size
(),
in_dims
,
strides
.
size
()));
if
(
output_size
.
size
())
PADDLE_ENFORCE_EQ
(
output_size
.
size
(),
strides
.
size
(),
platform
::
errors
::
InvalidArgument
(
"The Attr(output_size) and Attr(stride) of Op(conv_transpose) "
"should be the same."
));
if
(
output_padding
.
size
())
PADDLE_ENFORCE_EQ
(
output_padding
.
size
(),
strides
.
size
(),
platform
::
errors
::
InvalidArgument
(
"The Attr(output_padding) and Attr(stride) of Op(conv_transpose) "
"should be the same."
));
const
int64_t
C
=
(
data_layout
!=
DataLayout
::
kNHWC
?
in_dims
[
1
]
:
in_dims
[
in_dims
.
size
()
-
1
]);
PADDLE_ENFORCE_EQ
(
C
,
filter_dims
[
0
],
platform
::
errors
::
InvalidArgument
(
"The number of input channels should be equal to filter channels "
"for Op(conv_transpose). But received: the input's channels is "
"[%d], the shape of input is [%s], the filter's channels is [%d], "
"the shape of filter is [%s]. The data_format is %s."
"The error may come from wrong data_format setting."
,
C
,
in_dims
,
filter_dims
[
0
],
filter_dims
,
data_layout_str
));
framework
::
DDim
in_data_dims
;
if
(
data_layout
!=
DataLayout
::
kNHWC
)
{
in_data_dims
=
phi
::
slice_ddim
(
in_dims
,
2
,
in_dims
.
size
());
}
else
{
in_data_dims
=
phi
::
slice_ddim
(
in_dims
,
1
,
in_dims
.
size
()
-
1
);
}
framework
::
DDim
filter_data_dims
=
phi
::
slice_ddim
(
filter_dims
,
2
,
filter_dims
.
size
());
std
::
vector
<
int
>
ksize
=
phi
::
vectorize
<
int
>
(
filter_data_dims
);
UpdatePaddingAndDilation
(
&
paddings
,
&
dilations
,
padding_algorithm
,
in_data_dims
,
strides
,
ksize
);
std
::
vector
<
int64_t
>
output_shape
({
in_dims
[
0
]});
if
(
data_layout
!=
DataLayout
::
kNHWC
)
{
output_shape
.
push_back
(
filter_dims
[
1
]
*
groups
);
}
const
int
offset
=
(
data_layout
!=
DataLayout
::
kNHWC
?
2
:
1
);
for
(
size_t
i
=
0
;
i
<
strides
.
size
();
++
i
)
{
auto
filter_extent
=
dilations
[
i
]
*
(
filter_dims
[
i
+
2
]
-
1
)
+
1
;
auto
infer_shape
=
(
ctx
->
IsRuntime
()
||
in_dims
[
i
+
offset
]
>
0
)
?
(
in_dims
[
i
+
offset
]
-
1
)
*
strides
[
i
]
-
paddings
[
2
*
i
]
-
paddings
[
2
*
i
+
1
]
+
filter_extent
:
-
1
;
if
(
output_size
.
size
())
{
if
(
ctx
->
IsRuntime
())
{
PADDLE_ENFORCE_GE
(
output_size
[
i
],
infer_shape
,
platform
::
errors
::
InvalidArgument
(
"output_size of Op(ConvTransposeOp) should not be "
"less than the infered output size. But received output_size = "
"[%s], whose dim %d is less than the infered output size [%s]"
,
phi
::
make_ddim
(
output_size
).
to_str
(),
i
,
infer_shape
));
PADDLE_ENFORCE_LT
(
output_size
[
i
],
infer_shape
+
strides
[
i
],
platform
::
errors
::
InvalidArgument
(
"output_size of Op(ConvTransposeOp) should be less "
"than infered size + stride. But received output_size = [%s], "
"whose dim %d is not less than the infered output size (%d) + "
"stride (%d) = %d"
,
phi
::
make_ddim
(
output_size
).
to_str
(),
i
,
infer_shape
,
strides
[
i
],
infer_shape
+
strides
[
i
]));
}
output_shape
.
push_back
(
output_size
[
i
]);
}
else
if
(
output_padding
.
size
())
{
if
(
ctx
->
IsRuntime
())
{
PADDLE_ENFORCE_GE
(
output_padding
[
i
],
0
,
platform
::
errors
::
InvalidArgument
(
"output_padding of Op(ConvTransposeOp) should not be "
"less than the 0. But received output_padding = "
"[%s], whose dim %d is less than 0"
,
phi
::
make_ddim
(
output_padding
).
to_str
(),
i
));
PADDLE_ENFORCE_LT
(
output_padding
[
i
],
std
::
max
(
strides
[
i
],
dilations
[
i
]),
platform
::
errors
::
InvalidArgument
(
"output_padding of Op(ConvTransposeOp) should be less "
"than either stride or dilation. But received output_size = "
"[%s], "
"whose dim %d is not less than either stride (%d) or "
"dilation (%d)"
,
phi
::
make_ddim
(
output_size
).
to_str
(),
i
,
strides
[
i
],
dilations
[
i
]));
}
output_shape
.
push_back
((
infer_shape
+
output_padding
[
i
]));
}
else
{
output_shape
.
push_back
(
infer_shape
);
}
}
if
(
data_layout
==
DataLayout
::
kNHWC
)
{
output_shape
.
push_back
(
filter_dims
[
1
]
*
groups
);
}
ctx
->
SetOutputDim
(
"Output"
,
phi
::
make_ddim
(
output_shape
));
}
framework
::
OpKernelType
ConvTransposeOp
::
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
{
framework
::
LibraryType
library_
{
framework
::
LibraryType
::
kPlain
};
...
...
@@ -217,7 +62,7 @@ framework::OpKernelType ConvTransposeOp::GetExpectedKernelType(
}
framework
::
OpKernelType
ConvTransposeOp
::
GetKernelTypeForVar
(
const
std
::
string
&
var_name
,
const
Tensor
&
tensor
,
const
std
::
string
&
var_name
,
const
framework
::
Tensor
&
tensor
,
const
framework
::
OpKernelType
&
expected_kernel_type
)
const
{
#ifdef PADDLE_WITH_MKLDNN
// Only input require reshaping, weights and
...
...
@@ -493,17 +338,6 @@ Example:
)DOC"
);
}
void
ConvTransposeOpGrad
::
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
{
auto
in_dims
=
ctx
->
GetInputDim
(
"Input"
);
auto
filter_dims
=
ctx
->
GetInputDim
(
"Filter"
);
if
(
ctx
->
HasOutput
(
framework
::
GradVarName
(
"Input"
)))
{
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"Input"
),
in_dims
);
}
if
(
ctx
->
HasOutput
(
framework
::
GradVarName
(
"Filter"
)))
{
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"Filter"
),
filter_dims
);
}
}
framework
::
OpKernelType
ConvTransposeOpGrad
::
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
{
bool
use_cudnn
=
...
...
@@ -587,24 +421,6 @@ class ConvTransposeDoubleGradMaker : public framework::SingleGradOpMaker<T> {
}
};
void
ConvTransposeOpDoubleGrad
::
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
{
auto
x_dims
=
ctx
->
GetInputDim
(
"Input"
);
auto
w_dims
=
ctx
->
GetInputDim
(
"Filter"
);
auto
do_dims
=
ctx
->
GetInputDim
(
"DOutput"
);
if
(
ctx
->
HasOutput
(
"DDOutput"
)
&&
(
ctx
->
HasInput
(
"DDInput"
)
||
(
ctx
->
HasInput
(
"DDFilter"
))))
{
ctx
->
SetOutputDim
(
"DDOutput"
,
do_dims
);
}
if
(
ctx
->
HasOutput
(
"DFilter"
)
&&
ctx
->
HasInput
(
"DDInput"
))
{
ctx
->
SetOutputDim
(
"DFilter"
,
w_dims
);
}
if
(
ctx
->
HasOutput
(
"DInput"
)
&&
ctx
->
HasInput
(
"DDFilter"
))
{
ctx
->
SetOutputDim
(
"DInput"
,
x_dims
);
}
}
framework
::
OpKernelType
ConvTransposeOpDoubleGrad
::
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
{
bool
use_cudnn
=
...
...
@@ -635,59 +451,57 @@ framework::OpKernelType ConvTransposeOpDoubleGrad::GetExpectedKernelType(
namespace
ops
=
paddle
::
operators
;
// conv2d_transpose
DECLARE_INFER_SHAPE_FUNCTOR
(
conv2d_transpose
,
Conv2dTranposeInferShapeFunctor
,
PD_INFER_META
(
phi
::
ConvTransposeInferMeta
));
DECLARE_INFER_SHAPE_FUNCTOR
(
conv2d_transpose_grad
,
Conv2dTranposeGradInferShapeFunctor
,
PD_INFER_META
(
phi
::
ConvTransposeGradInferMeta
));
DECLARE_INFER_SHAPE_FUNCTOR
(
conv2d_transpose_grad_grad
,
Conv2dTranposeDoubleGradInferShapeFunctor
,
PD_INFER_META
(
phi
::
Conv2dTransposeDoubleGradInferMeta
));
REGISTER_OPERATOR
(
conv2d_transpose
,
ops
::
ConvTransposeOp
,
ops
::
Conv2DTransposeOpMaker
,
ops
::
ConvTransposeGradOpMaker
<
paddle
::
framework
::
OpDesc
>
,
ops
::
ConvTransposeGradOpMaker
<
paddle
::
imperative
::
OpBase
>
);
REGISTER_OPERATOR
(
conv2d_transpose_grad
,
ops
::
ConvTransposeOpGrad
,
ops
::
ConvTransposeDoubleGradMaker
<
paddle
::
framework
::
OpDesc
>
,
ops
::
ConvTransposeDoubleGradMaker
<
paddle
::
imperative
::
OpBase
>
);
REGISTER_OPERATOR
(
conv2d_transpose_grad_grad
,
ops
::
ConvTransposeOpDoubleGrad
);
REGISTER_OP_CPU_KERNEL
(
conv2d_transpose
,
ops
::
GemmConvTransposeKernel
<
paddle
::
platform
::
CPUDeviceContext
,
float
>
,
ops
::
GemmConvTransposeKernel
<
paddle
::
platform
::
CPUDeviceContext
,
double
>
);
REGISTER_OP_CPU_KERNEL
(
conv2d_transpose_grad
,
ops
::
GemmConvTransposeGradKernel
<
paddle
::
platform
::
CPUDeviceContext
,
float
>
,
ops
::
GemmConvTransposeGradKernel
<
paddle
::
platform
::
CPUDeviceContext
,
double
>
);
ops
::
ConvTransposeGradOpMaker
<
paddle
::
imperative
::
OpBase
>
,
Conv2dTranposeInferShapeFunctor
);
REGISTER_OPERATOR
(
conv2d_transpose_grad
,
ops
::
ConvTransposeOpGrad
,
ops
::
ConvTransposeDoubleGradMaker
<
paddle
::
framework
::
OpDesc
>
,
ops
::
ConvTransposeDoubleGradMaker
<
paddle
::
imperative
::
OpBase
>
,
Conv2dTranposeGradInferShapeFunctor
);
REGISTER_OPERATOR
(
conv2d_transpose_grad_grad
,
ops
::
ConvTransposeOpDoubleGrad
,
Conv2dTranposeDoubleGradInferShapeFunctor
);
// conv3d_transpose
DECLARE_INFER_SHAPE_FUNCTOR
(
conv3d_transpose
,
Conv3dTranposeInferShapeFunctor
,
PD_INFER_META
(
phi
::
ConvTransposeInferMeta
));
DECLARE_INFER_SHAPE_FUNCTOR
(
conv3d_transpose_grad
,
Conv3dTranposeGradInferShapeFunctor
,
PD_INFER_META
(
phi
::
ConvTransposeGradInferMeta
));
REGISTER_OPERATOR
(
conv3d_transpose
,
ops
::
ConvTransposeOp
,
ops
::
Conv3DTransposeOpMaker
,
ops
::
ConvTransposeGradOpMaker
<
paddle
::
framework
::
OpDesc
>
,
ops
::
ConvTransposeGradOpMaker
<
paddle
::
imperative
::
OpBase
>
);
REGISTER_OPERATOR
(
conv3d_transpose_grad
,
ops
::
ConvTransposeOpGrad
);
REGISTER_OP_CPU_KERNEL
(
conv3d_transpose
,
ops
::
GemmConvTransposeKernel
<
paddle
::
platform
::
CPUDeviceContext
,
float
>
,
ops
::
GemmConvTransposeKernel
<
paddle
::
platform
::
CPUDeviceContext
,
double
>
);
REGISTER_OP_CPU_KERNEL
(
conv3d_transpose_grad
,
ops
::
GemmConvTransposeGradKernel
<
paddle
::
platform
::
CPUDeviceContext
,
float
>
,
ops
::
GemmConvTransposeGradKernel
<
paddle
::
platform
::
CPUDeviceContext
,
double
>
);
ops
::
ConvTransposeGradOpMaker
<
paddle
::
imperative
::
OpBase
>
,
Conv3dTranposeInferShapeFunctor
);
REGISTER_OPERATOR
(
conv3d_transpose_grad
,
ops
::
ConvTransposeOpGrad
,
Conv3dTranposeGradInferShapeFunctor
);
// depthwise conv2d_transpose
DECLARE_INFER_SHAPE_FUNCTOR
(
depthwise_conv2d_transpose
,
DepthWiseConv2dTranposeInferShapeFunctor
,
PD_INFER_META
(
phi
::
ConvTransposeInferMeta
));
DECLARE_INFER_SHAPE_FUNCTOR
(
depthwise_conv2d_transpose_grad
,
DepthWiseConv2dTranposeGradInferShapeFunctor
,
PD_INFER_META
(
phi
::
ConvTransposeGradInferMeta
));
REGISTER_OPERATOR
(
depthwise_conv2d_transpose
,
ops
::
ConvTransposeOp
,
ops
::
Conv2DTransposeOpMaker
,
ops
::
ConvTransposeGradOpMaker
<
paddle
::
framework
::
OpDesc
>
,
ops
::
ConvTransposeGradOpMaker
<
paddle
::
imperative
::
OpBase
>
);
REGISTER_OPERATOR
(
depthwise_conv2d_transpose_grad
,
ops
::
ConvTransposeOpGrad
);
REGISTER_OP_CPU_KERNEL
(
depthwise_conv2d_transpose
,
ops
::
GemmConvTransposeKernel
<
paddle
::
platform
::
CPUDeviceContext
,
float
>
,
ops
::
GemmConvTransposeKernel
<
paddle
::
platform
::
CPUDeviceContext
,
double
>
);
REGISTER_OP_CPU_KERNEL
(
depthwise_conv2d_transpose_grad
,
ops
::
GemmConvTransposeGradKernel
<
paddle
::
platform
::
CPUDeviceContext
,
float
>
,
ops
::
GemmConvTransposeGradKernel
<
paddle
::
platform
::
CPUDeviceContext
,
double
>
);
ops
::
ConvTransposeGradOpMaker
<
paddle
::
imperative
::
OpBase
>
,
DepthWiseConv2dTranposeInferShapeFunctor
);
REGISTER_OPERATOR
(
depthwise_conv2d_transpose_grad
,
ops
::
ConvTransposeOpGrad
,
DepthWiseConv2dTranposeGradInferShapeFunctor
);
REGISTER_OP_VERSION
(
conv_transpose
)
.
AddCheckpoint
(
...
...
paddle/fluid/operators/conv_transpose_op.cu
已删除
100644 → 0
浏览文件 @
b77e20ac
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/conv_transpose_op.h"
#include "paddle/phi/kernels/gpu/depthwise_conv.h"
namespace
ops
=
paddle
::
operators
;
using
CUDA
=
paddle
::
platform
::
CUDADeviceContext
;
namespace
paddle
{
namespace
operators
{
using
Tensor
=
framework
::
Tensor
;
using
DDim
=
framework
::
DDim
;
template
<
typename
DeviceContext
,
typename
T
>
class
DepthwiseConvTransposeKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
const
std
::
string
data_layout_str
=
context
.
Attr
<
std
::
string
>
(
"data_format"
);
const
framework
::
DataLayout
data_layout
=
framework
::
StringToDataLayout
(
data_layout_str
);
const
Tensor
*
input
=
context
.
Input
<
Tensor
>
(
"Input"
);
Tensor
filter
=
*
context
.
Input
<
Tensor
>
(
"Filter"
);
Tensor
*
output
=
context
.
Output
<
Tensor
>
(
"Output"
);
output
->
mutable_data
<
T
>
(
context
.
GetPlace
());
int
groups
=
context
.
Attr
<
int
>
(
"groups"
);
PADDLE_ENFORCE_EQ
(
groups
,
filter
.
dims
()[
0
],
platform
::
errors
::
InvalidArgument
(
"groups should be error to the 1st dimension of filter. But "
"received groups is %d and filter dimension[0] is %d"
,
groups
,
filter
.
dims
()[
0
]));
std
::
vector
<
int
>
strides
=
context
.
Attr
<
std
::
vector
<
int
>>
(
"strides"
);
std
::
vector
<
int
>
paddings
=
context
.
Attr
<
std
::
vector
<
int
>>
(
"paddings"
);
std
::
vector
<
int
>
dilations
=
context
.
Attr
<
std
::
vector
<
int
>>
(
"dilations"
);
std
::
string
padding_algorithm
=
context
.
Attr
<
std
::
string
>
(
"padding_algorithm"
);
for
(
auto
v
:
dilations
)
{
PADDLE_ENFORCE_EQ
(
v
,
1
,
platform
::
errors
::
InvalidArgument
(
"dilations should be 1 in depthwise conv. "
"But received dilations is %d"
,
v
));
}
auto
in_dims
=
input
->
dims
();
auto
filter_dims
=
filter
.
dims
();
framework
::
DDim
in_data_dims
;
if
(
data_layout
!=
framework
::
DataLayout
::
kNHWC
)
{
in_data_dims
=
phi
::
slice_ddim
(
in_dims
,
2
,
in_dims
.
size
());
}
else
{
in_data_dims
=
phi
::
slice_ddim
(
in_dims
,
1
,
in_dims
.
size
()
-
1
);
}
framework
::
DDim
filter_data_dims
=
phi
::
slice_ddim
(
filter_dims
,
2
,
filter_dims
.
size
());
std
::
vector
<
int
>
ksize
=
phi
::
vectorize
<
int
>
(
filter_data_dims
);
UpdatePaddingAndDilation
(
&
paddings
,
&
dilations
,
padding_algorithm
,
in_data_dims
,
strides
,
ksize
);
output
->
mutable_data
<
T
>
(
context
.
GetPlace
());
auto
&
dev_ctx
=
context
.
template
device_context
<
DeviceContext
>();
phi
::
funcs
::
SetConstant
<
DeviceContext
,
T
>
set_zero
;
set_zero
(
dev_ctx
,
output
,
static_cast
<
T
>
(
0
));
math
::
DepthwiseConvInputGradFunctor
<
phi
::
GPUContext
,
T
>
depthwiseConvInputGrad
;
depthwiseConvInputGrad
(
static_cast
<
const
typename
framework
::
ConvertToPhiContext
<
DeviceContext
>::
TYPE
&>
(
dev_ctx
),
*
output
,
filter
,
*
input
,
strides
,
std
::
vector
<
int
>
{
paddings
[
0
],
paddings
[
2
],
paddings
[
1
],
paddings
[
3
]},
dilations
,
output
,
data_layout
);
}
};
template
<
typename
DeviceContext
,
typename
T
>
class
DepthwiseConvTransposeGradKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
const
std
::
string
data_layout_str
=
context
.
Attr
<
std
::
string
>
(
"data_format"
);
const
framework
::
DataLayout
data_layout
=
framework
::
StringToDataLayout
(
data_layout_str
);
const
Tensor
*
input
=
context
.
Input
<
Tensor
>
(
"Input"
);
const
Tensor
*
output_grad
=
context
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Output"
));
Tensor
*
input_grad
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"Input"
));
Tensor
*
filter_grad
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"Filter"
));
Tensor
filter
=
*
context
.
Input
<
Tensor
>
(
"Filter"
);
if
(
!
input_grad
&&
!
filter_grad
)
return
;
auto
&
dev_ctx
=
context
.
template
device_context
<
DeviceContext
>();
std
::
vector
<
int
>
strides
=
context
.
Attr
<
std
::
vector
<
int
>>
(
"strides"
);
std
::
vector
<
int
>
paddings
=
context
.
Attr
<
std
::
vector
<
int
>>
(
"paddings"
);
std
::
vector
<
int
>
dilations
=
context
.
Attr
<
std
::
vector
<
int
>>
(
"dilations"
);
std
::
string
padding_algorithm
=
context
.
Attr
<
std
::
string
>
(
"padding_algorithm"
);
auto
in_dims
=
input
->
dims
();
auto
filter_dims
=
filter
.
dims
();
framework
::
DDim
in_data_dims
;
if
(
data_layout
!=
framework
::
DataLayout
::
kNHWC
)
{
in_data_dims
=
phi
::
slice_ddim
(
in_dims
,
2
,
in_dims
.
size
());
}
else
{
in_data_dims
=
phi
::
slice_ddim
(
in_dims
,
1
,
in_dims
.
size
()
-
1
);
}
framework
::
DDim
filter_data_dims
=
phi
::
slice_ddim
(
filter_dims
,
2
,
filter_dims
.
size
());
std
::
vector
<
int
>
ksize
=
phi
::
vectorize
<
int
>
(
filter_data_dims
);
UpdatePaddingAndDilation
(
&
paddings
,
&
dilations
,
padding_algorithm
,
in_data_dims
,
strides
,
ksize
);
if
(
input_grad
)
{
math
::
DepthwiseConvFunctor
<
phi
::
GPUContext
,
T
>
depthwiseConv
;
depthwiseConv
(
static_cast
<
const
typename
framework
::
ConvertToPhiContext
<
DeviceContext
>::
TYPE
&>
(
dev_ctx
),
*
output_grad
,
filter
,
strides
,
std
::
vector
<
int
>
{
paddings
[
0
],
paddings
[
2
],
paddings
[
1
],
paddings
[
3
]},
dilations
,
input_grad
,
data_layout
);
}
if
(
filter_grad
)
{
phi
::
funcs
::
SetConstant
<
DeviceContext
,
T
>
set_zero
;
filter_grad
->
mutable_data
<
T
>
(
context
.
GetPlace
());
set_zero
(
dev_ctx
,
filter_grad
,
static_cast
<
T
>
(
0
));
math
::
DepthwiseConvFilterGradFunctor
<
phi
::
GPUContext
,
T
>
depthwiseConvFilterGrad
;
depthwiseConvFilterGrad
(
static_cast
<
const
typename
framework
::
ConvertToPhiContext
<
DeviceContext
>::
TYPE
&>
(
dev_ctx
),
*
output_grad
,
*
input
,
strides
,
std
::
vector
<
int
>
{
paddings
[
0
],
paddings
[
2
],
paddings
[
1
],
paddings
[
3
]},
dilations
,
filter_grad
,
data_layout
);
}
}
};
}
// namespace operators
}
// namespace paddle
// conv2d
REGISTER_OP_CUDA_KERNEL
(
conv2d_transpose
,
ops
::
GemmConvTransposeKernel
<
CUDA
,
float
>
,
ops
::
GemmConvTransposeKernel
<
CUDA
,
double
>
);
REGISTER_OP_CUDA_KERNEL
(
conv2d_transpose_grad
,
ops
::
GemmConvTransposeGradKernel
<
CUDA
,
float
>
,
ops
::
GemmConvTransposeGradKernel
<
CUDA
,
double
>
);
REGISTER_OP_CUDA_KERNEL
(
conv2d_transpose_grad_grad
,
ops
::
GemmConvTransposeGradKernel
<
CUDA
,
float
>
,
ops
::
GemmConvTransposeGradKernel
<
CUDA
,
double
>
);
// conv3d
REGISTER_OP_CUDA_KERNEL
(
conv3d_transpose
,
ops
::
GemmConvTransposeKernel
<
CUDA
,
float
>
,
ops
::
GemmConvTransposeKernel
<
CUDA
,
double
>
);
REGISTER_OP_CUDA_KERNEL
(
conv3d_transpose_grad
,
ops
::
GemmConvTransposeGradKernel
<
CUDA
,
float
>
,
ops
::
GemmConvTransposeGradKernel
<
CUDA
,
double
>
);
// depthwise conv2d
REGISTER_OP_CUDA_KERNEL
(
depthwise_conv2d_transpose
,
ops
::
DepthwiseConvTransposeKernel
<
CUDA
,
float
>
,
ops
::
DepthwiseConvTransposeKernel
<
CUDA
,
double
>
);
REGISTER_OP_CUDA_KERNEL
(
depthwise_conv2d_transpose_grad
,
ops
::
DepthwiseConvTransposeGradKernel
<
CUDA
,
float
>
,
ops
::
DepthwiseConvTransposeGradKernel
<
CUDA
,
double
>
);
paddle/fluid/operators/conv_transpose_op.h
浏览文件 @
1eb96eec
...
...
@@ -13,72 +13,14 @@ See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <algorithm>
#include <string>
#include <vector>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/conv_op.h"
#include "paddle/fluid/operators/eigen/eigen_function.h"
#include "paddle/fluid/operators/math/concat_and_split.h"
#include "paddle/fluid/operators/math/im2col.h"
#include "paddle/fluid/operators/math/vol2col.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/fluid/framework/op_kernel_type.h"
#include "paddle/fluid/framework/op_proto_maker.h"
#include "paddle/fluid/framework/operator.h"
namespace
paddle
{
namespace
operators
{
using
Tensor
=
framework
::
Tensor
;
using
DDim
=
framework
::
DDim
;
template
<
typename
DeviceContext
,
typename
T
,
size_t
D
>
static
void
Slice
(
const
framework
::
ExecutionContext
&
context
,
const
Tensor
*
input
,
Tensor
*
out
,
const
std
::
vector
<
int64_t
>&
begin_vec
,
const
std
::
vector
<
int64_t
>&
end_vec
,
const
std
::
vector
<
int64_t
>&
axes_vec
)
{
auto
&
place
=
*
context
.
template
device_context
<
DeviceContext
>().
eigen_device
();
auto
in_dims
=
input
->
dims
();
auto
offsets
=
Eigen
::
DSizes
<
Eigen
::
DenseIndex
,
D
>
();
auto
extents
=
Eigen
::
DSizes
<
Eigen
::
DenseIndex
,
D
>
();
for
(
size_t
i
=
0
;
i
<
D
;
++
i
)
{
offsets
[
i
]
=
0
;
extents
[
i
]
=
in_dims
[
i
];
}
std
::
vector
<
int64_t
>
out_shape_vec
=
phi
::
vectorize
(
in_dims
);
for
(
size_t
i
=
0
;
i
<
axes_vec
.
size
();
++
i
)
{
offsets
[
axes_vec
[
i
]]
=
begin_vec
[
i
];
extents
[
axes_vec
[
i
]]
=
end_vec
[
i
]
-
begin_vec
[
i
];
out_shape_vec
[
axes_vec
[
i
]]
=
end_vec
[
i
]
-
begin_vec
[
i
];
}
framework
::
DDim
out_dims
(
phi
::
make_ddim
(
out_shape_vec
));
out
->
mutable_data
<
T
>
(
out_dims
,
context
.
GetPlace
());
auto
in_t
=
framework
::
EigenTensor
<
T
,
D
,
Eigen
::
RowMajor
,
Eigen
::
DenseIndex
>::
From
(
*
input
);
auto
out_t
=
framework
::
EigenTensor
<
T
,
D
,
Eigen
::
RowMajor
,
Eigen
::
DenseIndex
>::
From
(
*
out
,
out_dims
);
EigenSlice
<
std
::
decay_t
<
decltype
(
place
)
>
,
T
,
D
>::
Eval
(
place
,
out_t
,
in_t
,
offsets
,
extents
);
out
->
Resize
(
out_dims
);
}
template
<
typename
DeviceContext
,
typename
T
,
size_t
D
>
static
void
Slice
(
const
framework
::
ExecutionContext
&
context
,
const
Tensor
*
input
,
Tensor
*
out
,
int64_t
begin_idx
,
int64_t
end_idx
,
int64_t
axes
)
{
std
::
vector
<
int64_t
>
begin_vec
=
{
begin_idx
};
std
::
vector
<
int64_t
>
end_vec
=
{
end_idx
};
std
::
vector
<
int64_t
>
axes_vec
=
{
axes
};
Slice
<
DeviceContext
,
T
,
D
>
(
context
,
input
,
out
,
begin_vec
,
end_vec
,
axes_vec
);
}
// Define Op classes in .h file so that other conv transpose
// operator implementations can reuse the code.
class
Conv2DTransposeOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
...
...
@@ -94,21 +36,19 @@ class Conv3DTransposeOpMaker : public framework::OpProtoAndCheckerMaker {
class
ConvTransposeOp
:
public
framework
::
OperatorWithKernel
{
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
;
protected:
framework
::
OpKernelType
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
;
framework
::
OpKernelType
GetKernelTypeForVar
(
const
std
::
string
&
var_name
,
const
Tensor
&
tensor
,
const
std
::
string
&
var_name
,
const
framework
::
Tensor
&
tensor
,
const
framework
::
OpKernelType
&
expected_kernel_type
)
const
override
;
};
class
ConvTransposeOpGrad
:
public
framework
::
OperatorWithKernel
{
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
;
protected:
framework
::
OpKernelType
GetExpectedKernelType
(
...
...
@@ -118,464 +58,11 @@ class ConvTransposeOpGrad : public framework::OperatorWithKernel {
class
ConvTransposeOpDoubleGrad
:
public
framework
::
OperatorWithKernel
{
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
;
protected:
framework
::
OpKernelType
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
;
};
template
<
typename
DeviceContext
,
typename
T
>
class
GemmConvTransposeKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
const
std
::
string
data_layout_str
=
context
.
Attr
<
std
::
string
>
(
"data_format"
);
const
framework
::
DataLayout
data_layout
=
framework
::
StringToDataLayout
(
data_layout_str
);
const
Tensor
*
input
=
context
.
Input
<
Tensor
>
(
"Input"
);
// The filter will be reshaped, so it should not be constant pointer
Tensor
filter
=
*
context
.
Input
<
Tensor
>
(
"Filter"
);
Tensor
*
output
=
context
.
Output
<
Tensor
>
(
"Output"
);
std
::
vector
<
int
>
strides
=
context
.
Attr
<
std
::
vector
<
int
>>
(
"strides"
);
std
::
vector
<
int
>
paddings
=
context
.
Attr
<
std
::
vector
<
int
>>
(
"paddings"
);
std
::
vector
<
int
>
dilations
=
context
.
Attr
<
std
::
vector
<
int
>>
(
"dilations"
);
int
groups
=
context
.
Attr
<
int
>
(
"groups"
);
std
::
string
padding_algorithm
=
context
.
Attr
<
std
::
string
>
(
"padding_algorithm"
);
auto
in_dims
=
input
->
dims
();
auto
filter_dims
=
filter
.
dims
();
auto
out_dims
=
output
->
dims
();
const
int
batch_size
=
static_cast
<
int
>
(
input
->
dims
()[
0
]);
framework
::
DDim
in_data_dims
;
if
(
data_layout
!=
framework
::
DataLayout
::
kNHWC
)
{
in_data_dims
=
phi
::
slice_ddim
(
in_dims
,
2
,
in_dims
.
size
());
}
else
{
in_data_dims
=
phi
::
slice_ddim
(
in_dims
,
1
,
in_dims
.
size
()
-
1
);
}
framework
::
DDim
filter_data_dims
=
phi
::
slice_ddim
(
filter_dims
,
2
,
filter_dims
.
size
());
std
::
vector
<
int
>
ksize
=
phi
::
vectorize
<
int
>
(
filter_data_dims
);
UpdatePaddingAndDilation
(
&
paddings
,
&
dilations
,
padding_algorithm
,
in_data_dims
,
strides
,
ksize
);
// input_shape_vec: {n, c, h, w} or {n, c, d, h, w} for channel_first
// input_shape_vec: {n, h, w, c} or {n, d, h, w, c} for channel_last
std
::
vector
<
int64_t
>
input_shape_vec
=
phi
::
vectorize
(
input
->
dims
());
// filter_shape_vec: {k_o, k_i, k_h, k_w} or {k_o, k_i, k_d, k_h, k_w}
std
::
vector
<
int64_t
>
filter_shape_vec
=
phi
::
vectorize
(
filter
.
dims
());
// use col_shape in the im2col and col2im (or vol2col and col2vol)
// calculation
// col_shape_vec: {o_c/g, k_h, k_w, h, w} or {o_c/g, k_d, k_h, k_w, d, h, w}
size_t
data_dim
=
filter_shape_vec
.
size
()
-
2
;
std
::
vector
<
int64_t
>
col_shape_vec
(
1
+
2
*
data_dim
);
if
(
data_layout
!=
framework
::
DataLayout
::
kNHWC
)
{
col_shape_vec
[
0
]
=
out_dims
[
1
]
/
groups
;
for
(
size_t
j
=
0
;
j
<
data_dim
;
++
j
)
{
col_shape_vec
[
j
+
1
]
=
filter_shape_vec
[
j
+
2
];
col_shape_vec
[
j
+
1
+
data_dim
]
=
input_shape_vec
[
j
+
2
];
}
}
else
{
col_shape_vec
[
0
]
=
out_dims
[
out_dims
.
size
()
-
1
]
/
groups
;
for
(
size_t
j
=
0
;
j
<
data_dim
;
++
j
)
{
col_shape_vec
[
j
+
1
]
=
filter_shape_vec
[
j
+
2
];
col_shape_vec
[
j
+
1
+
data_dim
]
=
input_shape_vec
[
j
+
1
];
}
}
DDim
col_shape
(
phi
::
make_ddim
(
col_shape_vec
));
// use col_matrix_shape in the gemm calculation
// size: (o_c/g * k_h * k_w, h * w) or (o_c/g * k_d * k_h * k_w, d * h * w)
DDim
col_matrix_shape
=
phi
::
flatten_to_2d
(
col_shape
,
data_dim
+
1
);
Tensor
col
;
col
.
mutable_data
<
T
>
(
col_shape
,
context
.
GetPlace
());
// col_matrix shares the same piece of data with col,
// but will be reshaped into a two-dimensional matrix shape
// to call the matrix multiplication interface.
Tensor
col_matrix
;
col_matrix
.
ShareDataWith
(
col
);
col_matrix
.
Resize
(
col_matrix_shape
);
// output size: (o_c, o_h, o_w) or (o_c, o_d, o_h, o_w) for channel_first
// output size: (o_h, o_w, o_c) or (o_d, o_h, o_w, o_c) for channel_last
DDim
output_shape
=
phi
::
slice_ddim
(
output
->
dims
(),
1
,
output
->
dims
().
size
());
// input matrix size: (i_c, h * w) or (i_c, d * h * w) for channel_first
// input matrix size: (h * w, i_c) or (d * h * w, i_c) for channel_last
DDim
input_matrix_shape
;
if
(
data_layout
!=
framework
::
DataLayout
::
kNHWC
)
{
input_matrix_shape
=
{
in_dims
[
1
],
col_matrix_shape
[
1
]};
}
else
{
input_matrix_shape
=
{
col_matrix_shape
[
1
],
in_dims
[
in_dims
.
size
()
-
1
]};
}
// filter size: (i_c, o_c/g * k_h * k_w) or (i_c, o_c/g * k_d * k_h * k_w)
DDim
filter_matrix_shape
;
if
(
data_layout
!=
framework
::
DataLayout
::
kNHWC
)
{
filter_matrix_shape
=
{
in_dims
[
1
],
col_matrix_shape
[
0
]};
}
else
{
filter_matrix_shape
=
{
in_dims
[
in_dims
.
size
()
-
1
],
col_matrix_shape
[
0
]};
}
filter
.
Resize
(
filter_matrix_shape
);
output
->
mutable_data
<
T
>
(
context
.
GetPlace
());
phi
::
funcs
::
SetConstant
<
DeviceContext
,
T
>
set_zero
;
auto
&
dev_ctx
=
context
.
template
device_context
<
DeviceContext
>();
auto
blas
=
phi
::
funcs
::
GetBlas
<
DeviceContext
,
T
>
(
dev_ctx
);
set_zero
(
dev_ctx
,
output
,
static_cast
<
T
>
(
0
));
int
in_step
=
(
data_layout
!=
framework
::
DataLayout
::
kNHWC
?
static_cast
<
int
>
(
in_dims
[
1
])
/
groups
:
static_cast
<
int
>
(
in_dims
[
in_dims
.
size
()
-
1
])
/
groups
);
int
out_step
=
(
data_layout
!=
framework
::
DataLayout
::
kNHWC
?
static_cast
<
int
>
(
out_dims
[
1
])
/
groups
:
static_cast
<
int
>
(
out_dims
[
out_dims
.
size
()
-
1
])
/
groups
);
math
::
Col2ImFunctor
<
math
::
ColFormat
::
kCFO
,
DeviceContext
,
T
>
col2im
;
math
::
Col2VolFunctor
<
DeviceContext
,
T
>
col2vol
;
math
::
ConcatFunctor
<
DeviceContext
,
T
>
concat_functor
;
// convolution transpose: gemm + col2im or col2vol (similar to conv-backward
// on input)
size_t
D
=
input
->
dims
().
size
();
for
(
int
i
=
0
;
i
<
batch_size
;
i
++
)
{
// batch with size (i_c, h * w) or (i_c, d * h * w) for channel_first
// batch with size (h * w, i_c) or (d * h * w, i_c) for channel_last
Tensor
input_batch
=
input
->
Slice
(
i
,
i
+
1
).
Resize
(
input_matrix_shape
);
// output size: (o_c, o_h, o_w) or (o_c, o_d, o_h, o_w) for channel_first
// output size: (o_h, o_w, o_c) or (o_d, o_h, o_w, o_c) for channel_last
Tensor
output_batch
=
output
->
Slice
(
i
,
i
+
1
).
Resize
(
output_shape
);
std
::
vector
<
Tensor
>
output_batch_vec
;
for
(
int
g
=
0
;
g
<
groups
;
g
++
)
{
int64_t
start
=
g
*
in_step
;
int64_t
end
=
(
g
+
1
)
*
in_step
;
int
axes
=
(
data_layout
!=
framework
::
DataLayout
::
kNHWC
?
0
:
1
);
Tensor
filter_slice
=
filter
.
Slice
(
g
*
in_step
,
(
g
+
1
)
*
in_step
);
Tensor
in_slice
,
out_slice
;
// col_matrix = filter_slice * input_slice
// of shape (o_c/g * k_h * k_w, h * w)
// or (o_c/g * k_d * k_h * k_w, d * h * w)
if
(
data_layout
!=
framework
::
DataLayout
::
kNHWC
)
{
in_slice
=
input_batch
.
Slice
(
g
*
in_step
,
(
g
+
1
)
*
in_step
);
out_slice
=
output_batch
.
Slice
(
g
*
out_step
,
(
g
+
1
)
*
out_step
);
blas
.
MatMul
(
filter_slice
,
true
,
in_slice
,
false
,
static_cast
<
T
>
(
1.0
),
&
col_matrix
,
static_cast
<
T
>
(
0.0
));
}
else
{
Slice
<
DeviceContext
,
T
,
2
>
(
context
,
&
input_batch
,
&
in_slice
,
start
,
end
,
axes
);
start
=
g
*
out_step
;
end
=
(
g
+
1
)
*
out_step
;
axes
=
D
-
2
;
if
(
D
==
4U
)
{
Slice
<
DeviceContext
,
T
,
3
>
(
context
,
&
output_batch
,
&
out_slice
,
start
,
end
,
axes
);
}
else
if
(
D
==
5U
)
{
Slice
<
DeviceContext
,
T
,
4
>
(
context
,
&
output_batch
,
&
out_slice
,
start
,
end
,
axes
);
}
blas
.
MatMul
(
filter_slice
,
true
,
in_slice
,
true
,
static_cast
<
T
>
(
1.0
),
&
col_matrix
,
static_cast
<
T
>
(
0.0
));
}
if
(
data_dim
==
2U
)
{
// col2im: col_matrix -> dy
// from (o_c/g * k_h * k_w, h * w) to (o_c/g, o_h, o_w) or (o_h, o_w,
// o_c/g)
col2im
(
dev_ctx
,
col
,
dilations
,
strides
,
std
::
vector
<
int
>
{
paddings
[
0
],
paddings
[
2
],
paddings
[
1
],
paddings
[
3
]},
&
out_slice
,
data_layout
);
}
else
if
(
data_dim
==
3U
)
{
// col2vol: col_matrix -> dy
// from (o_c/g * k_d * k_h * k_w, d * h * w) to (o_c/g, o_d, o_h, o_w)
// or (o_d, o_h, o_w, o_c/g)
col2vol
(
dev_ctx
,
col
,
dilations
,
strides
,
paddings
,
&
out_slice
,
data_layout
);
}
if
(
data_layout
==
framework
::
DataLayout
::
kNHWC
)
{
output_batch_vec
.
push_back
(
out_slice
);
}
}
if
(
data_layout
==
framework
::
DataLayout
::
kNHWC
)
{
concat_functor
(
dev_ctx
,
output_batch_vec
,
static_cast
<
int
>
(
D
-
2
),
&
output_batch
);
}
}
}
};
template
<
typename
DeviceContext
,
typename
T
>
class
GemmConvTransposeGradKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
const
std
::
string
data_layout_str
=
context
.
Attr
<
std
::
string
>
(
"data_format"
);
const
framework
::
DataLayout
data_layout
=
framework
::
StringToDataLayout
(
data_layout_str
);
const
Tensor
*
input
=
context
.
Input
<
Tensor
>
(
"Input"
);
const
Tensor
*
output_grad
=
context
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Output"
));
// For filter, we do not use const pointer b/c we will do reshape,
// but we should avoid modifying its value.
Tensor
filter
=
*
context
.
Input
<
Tensor
>
(
"Filter"
);
Tensor
*
input_grad
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"Input"
));
Tensor
*
filter_grad
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"Filter"
));
if
((
!
input_grad
)
&&
(
!
filter_grad
))
return
;
std
::
vector
<
int
>
strides
=
context
.
Attr
<
std
::
vector
<
int
>>
(
"strides"
);
std
::
vector
<
int
>
paddings
=
context
.
Attr
<
std
::
vector
<
int
>>
(
"paddings"
);
std
::
vector
<
int
>
dilations
=
context
.
Attr
<
std
::
vector
<
int
>>
(
"dilations"
);
int
groups
=
context
.
Attr
<
int
>
(
"groups"
);
std
::
string
padding_algorithm
=
context
.
Attr
<
std
::
string
>
(
"padding_algorithm"
);
auto
in_dims
=
input
->
dims
();
auto
filter_dims
=
filter
.
dims
();
auto
out_grad_dims
=
output_grad
->
dims
();
const
int
batch_size
=
static_cast
<
int
>
(
input
->
dims
()[
0
]);
framework
::
DDim
in_data_dims
;
if
(
data_layout
!=
framework
::
DataLayout
::
kNHWC
)
{
in_data_dims
=
phi
::
slice_ddim
(
in_dims
,
2
,
in_dims
.
size
());
}
else
{
in_data_dims
=
phi
::
slice_ddim
(
in_dims
,
1
,
in_dims
.
size
()
-
1
);
}
framework
::
DDim
filter_data_dims
=
phi
::
slice_ddim
(
filter_dims
,
2
,
filter_dims
.
size
());
std
::
vector
<
int
>
ksize
=
phi
::
vectorize
<
int
>
(
filter_data_dims
);
UpdatePaddingAndDilation
(
&
paddings
,
&
dilations
,
padding_algorithm
,
in_data_dims
,
strides
,
ksize
);
// input_shape_vec: {n, c, h, w} or {n, c, d, h, w} for channel_first
// input_shape_vec: {n, h, w, c} or {n, d, h, w, c} for channel_last
std
::
vector
<
int64_t
>
input_shape_vec
=
phi
::
vectorize
(
input
->
dims
());
// filter_shape_vec: {i_c, o_c, k_h, k_w} or {i_c, o_c, k_d, k_h, k_w}
std
::
vector
<
int64_t
>
filter_shape_vec
=
phi
::
vectorize
(
filter
.
dims
());
// use col_shape in the im2col and col2im (or vol2col and col2vol)
// calculation
// col_shape_vec: {o_c, k_h, k_w, h, w} or {o_c, k_d, k_h, k_w, d, h, w} for
size_t
data_dim
=
filter_shape_vec
.
size
()
-
2
;
std
::
vector
<
int64_t
>
col_shape_vec
(
1
+
2
*
data_dim
);
if
(
data_layout
!=
framework
::
DataLayout
::
kNHWC
)
{
col_shape_vec
[
0
]
=
out_grad_dims
[
1
];
for
(
size_t
j
=
0
;
j
<
data_dim
;
++
j
)
{
col_shape_vec
[
j
+
1
]
=
filter_shape_vec
[
j
+
2
];
col_shape_vec
[
j
+
1
+
data_dim
]
=
input_shape_vec
[
j
+
2
];
}
}
else
{
col_shape_vec
[
0
]
=
out_grad_dims
[
out_grad_dims
.
size
()
-
1
];
for
(
size_t
j
=
0
;
j
<
data_dim
;
++
j
)
{
col_shape_vec
[
j
+
1
]
=
filter_shape_vec
[
j
+
2
];
col_shape_vec
[
j
+
1
+
data_dim
]
=
input_shape_vec
[
j
+
1
];
}
}
DDim
col_shape
(
phi
::
make_ddim
(
col_shape_vec
));
// use col_matrix_shape in the gemm calculation
// size: (o_c * k_h * k_w, h * w) or (o_c * k_d * k_h * k_w, d * h * w)
DDim
col_matrix_shape
=
phi
::
flatten_to_2d
(
col_shape
,
data_dim
+
1
);
// output size: (o_c, o_h, o_w) or (o_c, o_d, o_h, o_w) for channel_first
// output size: (o_h, o_w, o_c) or (o_d, o_h, o_w, o_c) for channel_last
DDim
output_shape
=
phi
::
slice_ddim
(
output_grad
->
dims
(),
1
,
output_grad
->
dims
().
size
());
// input matrix size: (i_c, h * w) or (i_c, d * h * w) for channel_first
// input matrix size: (h * w, i_c) or (d * h * w, i_c) for channel_last
DDim
input_matrix_shape
;
if
(
data_layout
!=
framework
::
DataLayout
::
kNHWC
)
{
input_matrix_shape
=
{
in_dims
[
1
],
col_matrix_shape
[
1
]};
}
else
{
input_matrix_shape
=
{
col_matrix_shape
[
1
],
in_dims
[
in_dims
.
size
()
-
1
]};
}
// filter size: (i_c, o_c/g * k_h * k_w) or (i_c, o_c/g * k_d * k_h * k_w)
DDim
filter_matrix_shape
;
if
(
data_layout
!=
framework
::
DataLayout
::
kNHWC
)
{
filter_matrix_shape
=
{
in_dims
[
1
],
col_matrix_shape
[
0
]
/
groups
};
}
else
{
filter_matrix_shape
=
{
in_dims
[
in_dims
.
size
()
-
1
],
col_matrix_shape
[
0
]
/
groups
};
}
filter
.
Resize
(
filter_matrix_shape
);
int
in_step
=
(
data_layout
!=
framework
::
DataLayout
::
kNHWC
?
static_cast
<
int
>
(
in_dims
[
1
])
/
groups
:
static_cast
<
int
>
(
in_dims
[
in_dims
.
size
()
-
1
])
/
groups
);
int
col_step
=
static_cast
<
int
>
(
col_matrix_shape
[
0
])
/
groups
;
// convolution transpose grad on input:
// im2col + gemm (similar to conv-forward)
// input need to compute gradient
auto
&
dev_ctx
=
context
.
template
device_context
<
DeviceContext
>();
auto
blas
=
phi
::
funcs
::
GetBlas
<
DeviceContext
,
T
>
(
dev_ctx
);
if
(
input_grad
||
filter_grad
)
{
Tensor
col
;
col
.
mutable_data
<
T
>
(
col_shape
,
context
.
GetPlace
());
// col_matrix shares the same piece of data with col,
// but will be reshaped into a two-dimensional matrix shape
// to call the matrix multiplication interface.
Tensor
col_matrix
;
col_matrix
.
ShareDataWith
(
col
);
col_matrix
.
Resize
(
col_matrix_shape
);
Tensor
filter_grad_
;
phi
::
funcs
::
SetConstant
<
DeviceContext
,
T
>
set_zero
;
math
::
Im2ColFunctor
<
math
::
ColFormat
::
kCFO
,
DeviceContext
,
T
>
im2col
;
math
::
Vol2ColFunctor
<
DeviceContext
,
T
>
vol2col
;
math
::
ConcatFunctor
<
DeviceContext
,
T
>
concat_functor
;
if
(
input_grad
)
{
input_grad
->
mutable_data
<
T
>
(
context
.
GetPlace
());
set_zero
(
dev_ctx
,
input_grad
,
static_cast
<
T
>
(
0
));
}
if
(
filter_grad
)
{
// filter_grad_ size (i_c, o_c/g, k_h, k_w)
filter_grad
->
mutable_data
<
T
>
(
context
.
GetPlace
());
set_zero
(
dev_ctx
,
filter_grad
,
static_cast
<
T
>
(
0
));
filter_grad_
=
*
filter_grad
;
filter_grad_
.
Resize
(
filter_matrix_shape
);
}
size_t
D
=
input
->
dims
().
size
();
for
(
int
i
=
0
;
i
<
batch_size
;
i
++
)
{
// batch with size (o_c, o_h, o_w) or (o_c, o_d, o_h, o_w) for
// channel_first
// batch with size (o_h, o_w, o_c) or (o_d, o_h, o_w, o_c) for
// channel_last
Tensor
output_grad_batch
=
output_grad
->
Slice
(
i
,
i
+
1
).
Resize
(
output_shape
);
if
(
data_dim
==
2U
)
{
// im2col: dy -> col matrix
// from (o_c, o_h, o_w) to (o_c * k_h * k_w, i_h * i_w) for
// channel_first
// from (o_h, o_w, o_c) to (o_c * k_h * k_w, i_h * i_w) for
// channel_last
im2col
(
dev_ctx
,
output_grad_batch
,
dilations
,
strides
,
std
::
vector
<
int
>
{
paddings
[
0
],
paddings
[
2
],
paddings
[
1
],
paddings
[
3
]},
&
col
,
data_layout
);
}
else
if
(
data_dim
==
3U
)
{
// vol2col: dy -> col_matrix
// from (o_c, o_d, o_h, o_w) to (o_c * k_d * k_h * k_w, i_d * i_h *
// i_w) for channel_first
// from (o_d, o_h, o_w, o_c) to (i_d * i_h * i_w, o_c * k_d * k_h *
// k_w) for channel_last
vol2col
(
dev_ctx
,
output_grad_batch
,
dilations
,
strides
,
paddings
,
&
col
,
data_layout
);
}
if
(
input_grad
)
{
// batch with size (i_c, i_h, i_w) or (i_h, i_w, i_c)
Tensor
input_grad_batch
=
input_grad
->
Slice
(
i
,
i
+
1
).
Resize
(
input_matrix_shape
);
// gemm: dx = filter * dy
// (i_c, o_c * k_h * k_w) * (o_c * k_h * k_w, i_h * i_w) -> (i_c, i_h
// * i_w)
// or
// (i_c, o_c * k_d * k_h * k_w) * (o_c * k_d * k_h * k_w, i_d * i_h *
// i_w) -> (i_c,
// i_d, i_h, i_w)
// gemm: dx = dy^T * filter^T for channel_last
std
::
vector
<
Tensor
>
input_grad_batch_vec
;
for
(
int
g
=
0
;
g
<
groups
;
g
++
)
{
// input_grad_slice: (i_c/g, i_h * i_w) or (i_c/g, i_d * i_h * i_w)
// for channel_first
// input_grad_slice: (i_h * i_w, i_c/g) or (i_d * i_h * i_w, i_c/g)
// for channel_last
// filter_slice: (i_c/g, o_c/g * k_h * k_w)
Tensor
filter_slice
=
filter
.
Slice
(
g
*
in_step
,
(
g
+
1
)
*
in_step
);
// col_matrix_slice: (o_c/g * k_h * k_w, h * w) or (o_c/g * k_d *
// k_h * k_w, d * h * w)
Tensor
col_matrix_slice
=
col_matrix
.
Slice
(
g
*
col_step
,
(
g
+
1
)
*
col_step
);
if
(
data_layout
!=
framework
::
DataLayout
::
kNHWC
)
{
Tensor
input_grad_slice
=
input_grad_batch
.
Slice
(
g
*
in_step
,
(
g
+
1
)
*
in_step
);
blas
.
MatMul
(
filter_slice
,
false
,
col_matrix_slice
,
false
,
static_cast
<
T
>
(
1.0
),
&
input_grad_slice
,
static_cast
<
T
>
(
0.0
));
}
else
{
Tensor
input_grad_slice
;
Slice
<
DeviceContext
,
T
,
2
>
(
context
,
&
input_grad_batch
,
&
input_grad_slice
,
g
*
in_step
,
(
g
+
1
)
*
in_step
,
1
);
blas
.
MatMul
(
col_matrix_slice
,
true
,
filter_slice
,
true
,
static_cast
<
T
>
(
1.0
),
&
input_grad_slice
,
static_cast
<
T
>
(
0.0
));
DDim
input_grad_slice_shape
;
if
(
data_dim
==
2U
)
{
input_grad_slice_shape
=
{
in_dims
[
1
],
in_dims
[
2
],
in_step
};
}
else
{
input_grad_slice_shape
=
{
in_dims
[
1
],
in_dims
[
2
],
in_dims
[
3
],
in_step
};
}
input_grad_slice
=
input_grad_slice
.
Resize
(
input_grad_slice_shape
);
input_grad_batch_vec
.
push_back
(
input_grad_slice
);
}
}
if
(
data_layout
==
framework
::
DataLayout
::
kNHWC
)
{
concat_functor
(
dev_ctx
,
input_grad_batch_vec
,
static_cast
<
int
>
(
D
-
2
),
&
input_grad_batch
);
}
}
if
(
filter_grad
)
{
// input batch: (i_c, i_h * i_w) or (i_h, i_w * i_c)
Tensor
in_batch
=
input
->
Slice
(
i
,
i
+
1
).
Resize
(
input_matrix_shape
);
// gemm: d_filter = x * dy^T
// (i_c, i_h * i_w) * (i_h * i_w, o_c * k_h * k_w) -> (i_c, o_c * k_h
// * k_w)
// or
// (i_c, i_d * i_h * i_w) * (i_d * i_h * i_w, o_c * k_d * k_h * k_w)
// -> (i_c, o_c * k_d *
// k_h * k_w)
// gemm: d_filter = x^T * dy^T for channel_last
for
(
int
g
=
0
;
g
<
groups
;
g
++
)
{
Tensor
filter_grad_slice
=
filter_grad_
.
Slice
(
g
*
in_step
,
(
g
+
1
)
*
in_step
);
Tensor
col_matrix_slice
=
col_matrix
.
Slice
(
g
*
col_step
,
(
g
+
1
)
*
col_step
);
if
(
data_layout
!=
framework
::
DataLayout
::
kNHWC
)
{
Tensor
in_batch_slice
=
in_batch
.
Slice
(
g
*
in_step
,
(
g
+
1
)
*
in_step
);
blas
.
MatMul
(
in_batch_slice
,
false
,
col_matrix_slice
,
true
,
static_cast
<
T
>
(
1.0
),
&
filter_grad_slice
,
static_cast
<
T
>
(
1.0
));
}
else
{
Tensor
in_batch_slice
;
Slice
<
DeviceContext
,
T
,
2
>
(
context
,
&
in_batch
,
&
in_batch_slice
,
g
*
in_step
,
(
g
+
1
)
*
in_step
,
1
);
blas
.
MatMul
(
in_batch_slice
,
true
,
col_matrix_slice
,
true
,
static_cast
<
T
>
(
1.0
),
&
filter_grad_slice
,
static_cast
<
T
>
(
1.0
));
}
}
}
}
}
}
};
}
// namespace operators
}
// namespace paddle
paddle/fluid/operators/conv_transpose_op_npu.cc
浏览文件 @
1eb96eec
...
...
@@ -13,11 +13,15 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/conv_transpose_op.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
#include "paddle/phi/kernels/cpu/conv_util.h"
namespace
paddle
{
namespace
operators
{
using
Tensor
=
framework
::
Tensor
;
using
NPUDeviceContext
=
platform
::
NPUDeviceContext
;
template
<
typename
T
>
...
...
@@ -55,8 +59,8 @@ class Conv2DTransposeNPUKernel : public framework::OpKernel<T> {
filter_data_dims
=
phi
::
slice_ddim
(
filter_dims
,
2
,
in_dims
.
size
());
std
::
vector
<
int
>
ksize
=
phi
::
vectorize
<
int
>
(
filter_data_dims
);
UpdatePaddingAndDilation
(
&
padding
,
&
dilation
,
padding_algorithm
,
in_data_dims
,
stride
,
ksize
);
phi
::
UpdatePaddingAndDilation
(
&
padding
,
&
dilation
,
padding_algorithm
,
in_data_dims
,
stride
,
ksize
);
// construct NPU attr
std
::
vector
<
int
>
strides
(
4
,
1
);
...
...
@@ -137,8 +141,8 @@ class Conv2DTransposeGradNPUKernel : public framework::OpKernel<T> {
framework
::
DDim
filter_data_dims
=
phi
::
slice_ddim
(
filter_dims
,
2
,
filter_dims
.
size
());
std
::
vector
<
int
>
ksize
=
phi
::
vectorize
<
int
>
(
filter_data_dims
);
UpdatePaddingAndDilation
(
&
paddings
,
&
dilations
,
padding_algorithm
,
in_data_dims
,
strides
,
ksize
);
phi
::
UpdatePaddingAndDilation
(
&
paddings
,
&
dilations
,
padding_algorithm
,
in_data_dims
,
strides
,
ksize
);
std
::
vector
<
int
>
strides_vec
(
4
,
1
);
std
::
vector
<
int
>
dilations_vec
(
4
,
1
);
...
...
paddle/fluid/operators/conv_transpose_op_xpu.cc
浏览文件 @
1eb96eec
...
...
@@ -8,15 +8,22 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/conv_transpose_op.h"
#include <memory>
#include <string>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/device/device_wrapper.h"
#include "paddle/phi/kernels/cpu/conv_util.h"
#ifdef PADDLE_WITH_XPU
namespace
paddle
{
namespace
operators
{
using
Tensor
=
framework
::
Tensor
;
// target_len == 2 || target_len == 4
inline
std
::
vector
<
int
>
vector_extend
(
const
std
::
vector
<
int
>&
src
,
int
target_len
)
{
...
...
@@ -61,8 +68,8 @@ class Conv2DTransposeXPUKernel : public framework::OpKernel<T> {
framework
::
DDim
filter_data_dims
=
phi
::
slice_ddim
(
filter
.
dims
(),
2
,
filter
.
dims
().
size
());
std
::
vector
<
int
>
ksize
=
phi
::
vectorize
<
int
>
(
filter_data_dims
);
UpdatePaddingAndDilation
(
&
paddings
,
&
dilations
,
padding_algorithm
,
in_data_dims
,
strides
,
ksize
);
phi
::
UpdatePaddingAndDilation
(
&
paddings
,
&
dilations
,
padding_algorithm
,
in_data_dims
,
strides
,
ksize
);
const
int
batch_size
=
static_cast
<
int
>
(
input
->
dims
()[
0
]);
const
int
img_yc
=
static_cast
<
int
>
(
input
->
dims
()[
1
]);
...
...
@@ -135,8 +142,8 @@ class Conv2DTransposeGradXPUKernel : public framework::OpKernel<T> {
framework
::
DDim
filter_data_dims
=
phi
::
slice_ddim
(
filter
.
dims
(),
2
,
filter
.
dims
().
size
());
std
::
vector
<
int
>
ksize
=
phi
::
vectorize
<
int
>
(
filter_data_dims
);
UpdatePaddingAndDilation
(
&
paddings
,
&
dilations
,
padding_algorithm
,
in_data_dims
,
strides
,
ksize
);
phi
::
UpdatePaddingAndDilation
(
&
paddings
,
&
dilations
,
padding_algorithm
,
in_data_dims
,
strides
,
ksize
);
const
int
batch_size
=
static_cast
<
int
>
(
input
->
dims
()[
0
]);
const
int
img_yc
=
static_cast
<
int
>
(
input
->
dims
()[
1
]);
...
...
paddle/phi/infermeta/backward.cc
浏览文件 @
1eb96eec
...
...
@@ -64,6 +64,45 @@ void BilinearTensorProductGradInferMeta(const MetaTensor& x,
}
}
void
ConvTransposeGradInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
filter
,
const
MetaTensor
&
dout
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
vector
<
int
>&
output_padding
,
const
std
::
vector
<
int
>&
output_size
,
const
std
::
string
&
padding_algorithm
,
int
groups
,
const
std
::
vector
<
int
>&
dilations
,
const
std
::
string
&
data_format
,
MetaTensor
*
dx
,
MetaTensor
*
dfilter
)
{
GeneralBinaryGradInferMeta
(
x
,
filter
,
dx
,
dfilter
);
}
void
Conv2dTransposeDoubleGradInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
filter
,
const
MetaTensor
&
dout
,
const
MetaTensor
&
ddx
,
const
MetaTensor
&
ddfilter
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
vector
<
int
>&
output_padding
,
const
std
::
vector
<
int
>&
output_size
,
const
std
::
string
&
padding_algorithm
,
int
groups
,
const
std
::
vector
<
int
>&
dilations
,
const
std
::
string
&
data_format
,
MetaTensor
*
dx
,
MetaTensor
*
dfilter
,
MetaTensor
*
ddout
)
{
GeneralBinaryGradInferMeta
(
x
,
filter
,
dx
,
dfilter
);
if
(
ddout
)
{
ddout
->
share_meta
(
dout
);
}
}
void
GatherNdGradInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
index
,
const
MetaTensor
&
out_grad
,
...
...
paddle/phi/infermeta/backward.h
浏览文件 @
1eb96eec
...
...
@@ -37,6 +37,37 @@ void BilinearTensorProductGradInferMeta(const MetaTensor& x,
MetaTensor
*
dweight
,
MetaTensor
*
dbias
);
void
ConvTransposeGradInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
filter
,
const
MetaTensor
&
dout
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
vector
<
int
>&
output_padding
,
const
std
::
vector
<
int
>&
output_size
,
const
std
::
string
&
padding_algorithm
,
int
groups
,
const
std
::
vector
<
int
>&
dilations
,
const
std
::
string
&
data_format
,
MetaTensor
*
dx
,
MetaTensor
*
dfilter
);
void
Conv2dTransposeDoubleGradInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
filter
,
const
MetaTensor
&
dout
,
const
MetaTensor
&
ddx
,
const
MetaTensor
&
ddfilter
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
vector
<
int
>&
output_padding
,
const
std
::
vector
<
int
>&
output_size
,
const
std
::
string
&
padding_algorithm
,
int
groups
,
const
std
::
vector
<
int
>&
dilations
,
const
std
::
string
&
data_format
,
MetaTensor
*
dx
,
MetaTensor
*
dfilter
,
MetaTensor
*
ddout
);
void
GatherNdGradInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
index
,
const
MetaTensor
&
out_grad
,
...
...
paddle/phi/infermeta/binary.cc
浏览文件 @
1eb96eec
...
...
@@ -17,8 +17,10 @@ limitations under the License. */
#include <algorithm>
#include <vector>
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/core/ddim.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/kernels/cpu/conv_util.h"
#include "paddle/phi/kernels/funcs/common_shape.h"
#include "paddle/phi/kernels/cpu/conv_util.h"
...
...
@@ -312,51 +314,6 @@ void CompareAllInferMeta(const MetaTensor& x,
out
->
set_dtype
(
DataType
::
BOOL
);
}
void
CrossInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
int
axis
,
MetaTensor
*
out
)
{
auto
x_dim
=
x
.
dims
();
auto
y_dim
=
y
.
dims
();
auto
dim
=
axis
;
bool
dims_match
=
phi
::
funcs
::
CheckDims
(
x_dim
,
y_dim
);
PADDLE_ENFORCE_EQ
(
dims_match
,
true
,
phi
::
errors
::
InvalidArgument
(
"The 'shape' of Input(X) should be equal to "
"the 'shape' of Input(Y). But received "
"Input(X).dimensions = [%s], "
"Input(Y).dimensions = [%s]"
,
x_dim
,
y_dim
));
if
(
dim
!=
DDim
::
kMaxRank
)
{
PADDLE_ENFORCE_EQ
(
dim
<
x_dim
.
size
()
&&
dim
>=
(
0
-
x_dim
.
size
()),
true
,
phi
::
errors
::
OutOfRange
(
"Attr(dim) is out of range, It's expected "
"to be in range of [-%d, %d]. But received Attr(dim) = %d."
,
x_dim
.
size
(),
x_dim
.
size
()
-
1
,
dim
));
if
(
dim
<
0
)
{
dim
+=
x_dim
.
size
();
}
PADDLE_ENFORCE_EQ
(
x_dim
[
dim
]
==
3
&&
y_dim
[
dim
]
==
3
,
true
,
phi
::
errors
::
InvalidArgument
(
"Input(X/Y).dims()[dim] should be equal to 3."
"But received Input(X/Y).dims()[dim] = %d."
,
x_dim
[
dim
]));
}
out
->
set_dims
(
x_dim
);
out
->
set_dtype
(
x
.
dtype
());
out
->
set_layout
(
x
.
layout
());
out
->
share_lod
(
x
);
}
void
ConvInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
filter
,
const
std
::
vector
<
int
>&
strides
,
...
...
@@ -512,6 +469,241 @@ void ConvInferMeta(const MetaTensor& input,
out
->
set_dtype
(
input
.
dtype
());
}
void
ConvTransposeInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
filter
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
vector
<
int
>&
output_padding
,
const
std
::
vector
<
int
>&
output_size
,
const
std
::
string
&
padding_algorithm
,
int
groups
,
const
std
::
vector
<
int
>&
dilations
,
const
std
::
string
&
data_format
,
MetaTensor
*
out
,
MetaConfig
config
)
{
auto
x_dims
=
x
.
dims
();
auto
filter_dims
=
filter
.
dims
();
std
::
vector
<
int
>
paddings_
=
paddings
;
std
::
vector
<
int
>
dilations_
=
dilations
;
const
DataLayout
data_layout
=
config
.
is_run_mkldnn_kernel
?
DataLayout
::
kNCHW
:
paddle
::
framework
::
StringToDataLayout
(
data_format
);
PADDLE_ENFORCE_EQ
(
x_dims
.
size
()
==
4
||
x_dims
.
size
()
==
5
,
true
,
errors
::
InvalidArgument
(
"Input of Op(conv_transpose) should be 4-D or "
"5-D Tensor. But received: %u-D Tensor, "
"the shape of input is [%s]"
,
x_dims
.
size
(),
x_dims
));
PADDLE_ENFORCE_EQ
(
x_dims
.
size
(),
filter_dims
.
size
(),
errors
::
InvalidArgument
(
"The input's dimension size and filter's dimension size of "
"Op (conv_transpose) should be equal. But received: the shape of "
"input is [%s], the dimension size of input is [%d], the shape "
"of filter is [%s], the dimension size of filter is [%d]. "
,
x_dims
,
x_dims
.
size
(),
filter_dims
,
filter_dims
.
size
()));
int
stride_size
=
strides
.
size
();
for
(
int
i
=
0
;
i
<
stride_size
;
++
i
)
{
PADDLE_ENFORCE_GT
(
strides
[
i
],
0
,
errors
::
InvalidArgument
(
"The stride of Op(Conv) should be larget than 0, but received "
"stride is %d."
,
strides
[
i
]));
}
int
in_sub_stride_size
=
x_dims
.
size
()
-
stride_size
;
PADDLE_ENFORCE_EQ
(
x_dims
.
size
()
-
strides
.
size
(),
2U
,
errors
::
InvalidArgument
(
"The input's dimension size minus Attr(stride)'s size must "
"be euqal to 2 for Op(conv_transpose). But received: [%d], the "
"input's dimension size is [%d], the shape of input "
"is [%s], the Attr(stride)'s size is [%d]."
,
in_sub_stride_size
,
x_dims
.
size
(),
x_dims
,
strides
.
size
()));
if
(
output_size
.
size
())
PADDLE_ENFORCE_EQ
(
output_size
.
size
(),
strides
.
size
(),
errors
::
InvalidArgument
(
"The Attr(output_size) and Attr(stride) of Op(conv_transpose) "
"should be the same."
));
if
(
output_padding
.
size
())
PADDLE_ENFORCE_EQ
(
output_padding
.
size
(),
strides
.
size
(),
errors
::
InvalidArgument
(
"The Attr(output_padding) and Attr(stride) of Op(conv_transpose) "
"should be the same."
));
const
int64_t
C
=
(
data_layout
!=
DataLayout
::
kNHWC
?
x_dims
[
1
]
:
x_dims
[
x_dims
.
size
()
-
1
]);
PADDLE_ENFORCE_EQ
(
C
,
filter_dims
[
0
],
errors
::
InvalidArgument
(
"The number of input channels should be equal to filter channels "
"for Op(conv_transpose). But received: the input's channels is "
"[%d], the shape of input is [%s], the filter's channels is [%d], "
"the shape of filter is [%s]. The data_format is %s."
"The error may come from wrong data_format setting."
,
C
,
x_dims
,
filter_dims
[
0
],
filter_dims
,
data_format
));
DDim
x_data_dims
;
if
(
data_layout
!=
DataLayout
::
kNHWC
)
{
x_data_dims
=
slice_ddim
(
x_dims
,
2
,
x_dims
.
size
());
}
else
{
x_data_dims
=
slice_ddim
(
x_dims
,
1
,
x_dims
.
size
()
-
1
);
}
DDim
filter_data_dims
=
slice_ddim
(
filter_dims
,
2
,
filter_dims
.
size
());
std
::
vector
<
int
>
ksize
=
vectorize
<
int
>
(
filter_data_dims
);
UpdatePaddingAndDilation
(
&
paddings_
,
&
dilations_
,
padding_algorithm
,
x_data_dims
,
strides
,
ksize
);
std
::
vector
<
int64_t
>
output_shape
({
x_dims
[
0
]});
if
(
data_layout
!=
DataLayout
::
kNHWC
)
{
output_shape
.
push_back
(
filter_dims
[
1
]
*
groups
);
}
const
int
offset
=
(
data_layout
!=
DataLayout
::
kNHWC
?
2
:
1
);
for
(
size_t
i
=
0
;
i
<
strides
.
size
();
++
i
)
{
auto
filter_extent
=
dilations_
[
i
]
*
(
filter_dims
[
i
+
2
]
-
1
)
+
1
;
auto
infer_shape
=
(
config
.
is_runtime
||
x_dims
[
i
+
offset
]
>
0
)
?
(
x_dims
[
i
+
offset
]
-
1
)
*
strides
[
i
]
-
paddings_
[
2
*
i
]
-
paddings_
[
2
*
i
+
1
]
+
filter_extent
:
-
1
;
if
(
output_size
.
size
())
{
if
(
config
.
is_runtime
)
{
PADDLE_ENFORCE_GE
(
output_size
[
i
],
infer_shape
,
errors
::
InvalidArgument
(
"output_size of Op(ConvTransposeOp) should not be "
"less than the infered output size. But received output_size = "
"[%s], whose dim %d is less than the infered output size [%s]"
,
make_ddim
(
output_size
).
to_str
(),
i
,
infer_shape
));
PADDLE_ENFORCE_LT
(
output_size
[
i
],
infer_shape
+
strides
[
i
],
errors
::
InvalidArgument
(
"output_size of Op(ConvTransposeOp) should be less "
"than infered size + stride. But received output_size = [%s], "
"whose dim %d is not less than the infered output size (%d) + "
"stride (%d) = %d"
,
make_ddim
(
output_size
).
to_str
(),
i
,
infer_shape
,
strides
[
i
],
infer_shape
+
strides
[
i
]));
}
output_shape
.
push_back
(
output_size
[
i
]);
}
else
if
(
output_padding
.
size
())
{
if
(
config
.
is_runtime
)
{
PADDLE_ENFORCE_GE
(
output_padding
[
i
],
0
,
errors
::
InvalidArgument
(
"output_padding of Op(ConvTransposeOp) should not be "
"less than the 0. But received output_padding = "
"[%s], whose dim %d is less than 0"
,
make_ddim
(
output_padding
).
to_str
(),
i
));
PADDLE_ENFORCE_LT
(
output_padding
[
i
],
std
::
max
(
strides
[
i
],
dilations_
[
i
]),
errors
::
InvalidArgument
(
"output_padding of Op(ConvTransposeOp) should be less "
"than either stride or dilation. But received output_size = "
"[%s], "
"whose dim %d is not less than either stride (%d) or "
"dilation (%d)"
,
make_ddim
(
output_size
).
to_str
(),
i
,
strides
[
i
],
dilations_
[
i
]));
}
output_shape
.
push_back
((
infer_shape
+
output_padding
[
i
]));
}
else
{
output_shape
.
push_back
(
infer_shape
);
}
}
if
(
data_layout
==
DataLayout
::
kNHWC
)
{
output_shape
.
push_back
(
filter_dims
[
1
]
*
groups
);
}
out
->
set_dims
(
make_ddim
(
output_shape
));
out
->
set_dtype
(
x
.
dtype
());
}
void
CrossInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
int
axis
,
MetaTensor
*
out
)
{
auto
x_dim
=
x
.
dims
();
auto
y_dim
=
y
.
dims
();
auto
dim
=
axis
;
bool
dims_match
=
phi
::
funcs
::
CheckDims
(
x_dim
,
y_dim
);
PADDLE_ENFORCE_EQ
(
dims_match
,
true
,
phi
::
errors
::
InvalidArgument
(
"The 'shape' of Input(X) should be equal to "
"the 'shape' of Input(Y). But received "
"Input(X).dimensions = [%s], "
"Input(Y).dimensions = [%s]"
,
x_dim
,
y_dim
));
if
(
dim
!=
DDim
::
kMaxRank
)
{
PADDLE_ENFORCE_EQ
(
dim
<
x_dim
.
size
()
&&
dim
>=
(
0
-
x_dim
.
size
()),
true
,
phi
::
errors
::
OutOfRange
(
"Attr(dim) is out of range, It's expected "
"to be in range of [-%d, %d]. But received Attr(dim) = %d."
,
x_dim
.
size
(),
x_dim
.
size
()
-
1
,
dim
));
if
(
dim
<
0
)
{
dim
+=
x_dim
.
size
();
}
PADDLE_ENFORCE_EQ
(
x_dim
[
dim
]
==
3
&&
y_dim
[
dim
]
==
3
,
true
,
phi
::
errors
::
InvalidArgument
(
"Input(X/Y).dims()[dim] should be equal to 3."
"But received Input(X/Y).dims()[dim] = %d."
,
x_dim
[
dim
]));
}
out
->
set_dims
(
x_dim
);
out
->
set_dtype
(
x
.
dtype
());
out
->
set_layout
(
x
.
layout
());
out
->
share_lod
(
x
);
}
void
DistInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
float
p
,
...
...
paddle/phi/infermeta/binary.h
浏览文件 @
1eb96eec
...
...
@@ -83,6 +83,19 @@ void ConvInferMeta(const MetaTensor& input,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
ConvTransposeInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
filter
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
vector
<
int
>&
output_padding
,
const
std
::
vector
<
int
>&
output_size
,
const
std
::
string
&
padding_algorithm
,
int
groups
,
const
std
::
vector
<
int
>&
dilations
,
const
std
::
string
&
data_format
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
CrossInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
int
axis
,
...
...
paddle/phi/kernels/conv_transpose_grad_kernel.h
0 → 100644
浏览文件 @
1eb96eec
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include "paddle/phi/core/dense_tensor.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
Conv2dTransposeGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
filter
,
const
DenseTensor
&
dout
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
vector
<
int
>&
output_padding
,
const
std
::
vector
<
int
>&
output_size
,
const
std
::
string
&
padding_algorithm
,
int
groups
,
const
std
::
vector
<
int
>&
dilations
,
const
std
::
string
&
data_format
,
DenseTensor
*
dx
,
DenseTensor
*
dfilter
);
template
<
typename
T
,
typename
Context
>
void
Conv2dTransposeDoubleGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
filter
,
const
DenseTensor
&
dout
,
const
DenseTensor
&
ddx
,
const
DenseTensor
&
ddfilter
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
vector
<
int
>&
output_padding
,
const
std
::
vector
<
int
>&
output_size
,
const
std
::
string
&
padding_algorithm
,
int
groups
,
const
std
::
vector
<
int
>&
dilations
,
const
std
::
string
&
data_format
,
DenseTensor
*
dx
,
DenseTensor
*
dfilter
,
DenseTensor
*
ddout
);
template
<
typename
T
,
typename
Context
>
void
Conv3dTransposeGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
filter
,
const
DenseTensor
&
dout
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
vector
<
int
>&
output_padding
,
const
std
::
vector
<
int
>&
output_size
,
const
std
::
string
&
padding_algorithm
,
int
groups
,
const
std
::
vector
<
int
>&
dilations
,
const
std
::
string
&
data_format
,
DenseTensor
*
dx
,
DenseTensor
*
dfilter
);
template
<
typename
T
,
typename
Context
>
void
DepthwiseConv2dTransposeGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
filter
,
const
DenseTensor
&
dout
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
vector
<
int
>&
output_padding
,
const
std
::
vector
<
int
>&
output_size
,
const
std
::
string
&
padding_algorithm
,
int
groups
,
const
std
::
vector
<
int
>&
dilations
,
const
std
::
string
&
data_format
,
DenseTensor
*
dx
,
DenseTensor
*
dfilter
);
}
// namespace phi
paddle/phi/kernels/conv_transpose_kernel.h
0 → 100644
浏览文件 @
1eb96eec
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include "paddle/phi/core/dense_tensor.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
Conv2dTransposeKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
filter
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
vector
<
int
>&
output_padding
,
const
std
::
vector
<
int
>&
output_size
,
const
std
::
string
&
padding_algorithm
,
int
groups
,
const
std
::
vector
<
int
>&
dilations
,
const
std
::
string
&
data_format
,
DenseTensor
*
out
);
template
<
typename
T
,
typename
Context
>
void
Conv3dTransposeKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
filter
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
vector
<
int
>&
output_padding
,
const
std
::
vector
<
int
>&
output_size
,
const
std
::
string
&
padding_algorithm
,
int
groups
,
const
std
::
vector
<
int
>&
dilations
,
const
std
::
string
&
data_format
,
DenseTensor
*
out
);
template
<
typename
T
,
typename
Context
>
void
DepthwiseConv2dTransposeKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
filter
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
vector
<
int
>&
output_padding
,
const
std
::
vector
<
int
>&
output_size
,
const
std
::
string
&
padding_algorithm
,
int
groups
,
const
std
::
vector
<
int
>&
dilations
,
const
std
::
string
&
data_format
,
DenseTensor
*
out
);
}
// namespace phi
paddle/phi/kernels/cpu/conv_transpose_grad_kernel.cc
0 → 100644
浏览文件 @
1eb96eec
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/conv_transpose_grad_kernel.h"
#include "paddle/phi/kernels/impl/conv_transpose_grad_kernel_impl.h"
#include "paddle/phi/core/kernel_registry.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
DepthwiseConv2dTransposeGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
filter
,
const
DenseTensor
&
dout
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
vector
<
int
>&
output_padding
,
const
std
::
vector
<
int
>&
output_size
,
const
std
::
string
&
padding_algorithm
,
int
groups
,
const
std
::
vector
<
int
>&
dilations
,
const
std
::
string
&
data_format
,
DenseTensor
*
dx
,
DenseTensor
*
dfilter
)
{
ConvTransposeGradRawKernel
<
T
,
Context
>
(
ctx
,
x
,
filter
,
dout
,
strides
,
paddings
,
padding_algorithm
,
groups
,
dilations
,
data_format
,
dx
,
dfilter
);
}
}
// namespace phi
PD_REGISTER_KERNEL
(
conv2d_transpose_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
Conv2dTransposeGradKernel
,
float
,
double
)
{}
PD_REGISTER_KERNEL
(
conv3d_transpose_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
Conv3dTransposeGradKernel
,
float
,
double
)
{}
PD_REGISTER_KERNEL
(
depthwise_conv2d_transpose_grad
,
CPU
,
ALL_LAYOUT
,
phi
::
DepthwiseConv2dTransposeGradKernel
,
float
,
double
)
{}
paddle/phi/kernels/cpu/conv_transpose_kernel.cc
0 → 100644
浏览文件 @
1eb96eec
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/conv_transpose_kernel.h"
#include "paddle/phi/kernels/impl/conv_transpose_kernel_impl.h"
#include "paddle/phi/core/kernel_registry.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
DepthwiseConv2dTransposeKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
filter
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
vector
<
int
>&
output_padding
,
const
std
::
vector
<
int
>&
output_size
,
const
std
::
string
&
padding_algorithm
,
int
groups
,
const
std
::
vector
<
int
>&
dilations
,
const
std
::
string
&
data_format
,
DenseTensor
*
out
)
{
ConvTransposeRawKernel
<
T
,
Context
>
(
ctx
,
x
,
filter
,
strides
,
paddings
,
padding_algorithm
,
groups
,
dilations
,
data_format
,
out
);
}
}
// namespace phi
PD_REGISTER_KERNEL
(
conv2d_transpose
,
CPU
,
ALL_LAYOUT
,
phi
::
Conv2dTransposeKernel
,
float
,
double
)
{}
PD_REGISTER_KERNEL
(
conv3d_transpose
,
CPU
,
ALL_LAYOUT
,
phi
::
Conv3dTransposeKernel
,
float
,
double
)
{}
PD_REGISTER_KERNEL
(
depthwise_conv2d_transpose
,
CPU
,
ALL_LAYOUT
,
phi
::
DepthwiseConv2dTransposeKernel
,
float
,
double
)
{}
paddle/phi/kernels/funcs/slice.h
浏览文件 @
1eb96eec
...
...
@@ -123,5 +123,56 @@ DenseTensor Slice(const Context& dev_ctx,
return
ret
;
}
// Use in conv_transpose kernel
template
<
typename
Context
,
typename
T
,
size_t
D
>
static
void
Slice
(
const
Context
&
ctx
,
const
DenseTensor
*
input
,
DenseTensor
*
out
,
const
std
::
vector
<
int64_t
>&
begin_vec
,
const
std
::
vector
<
int64_t
>&
end_vec
,
const
std
::
vector
<
int64_t
>&
axes_vec
)
{
auto
&
place
=
*
ctx
.
eigen_device
();
auto
in_dims
=
input
->
dims
();
auto
offsets
=
Eigen
::
DSizes
<
Eigen
::
DenseIndex
,
D
>
();
auto
extents
=
Eigen
::
DSizes
<
Eigen
::
DenseIndex
,
D
>
();
for
(
size_t
i
=
0
;
i
<
D
;
++
i
)
{
offsets
[
i
]
=
0
;
extents
[
i
]
=
in_dims
[
i
];
}
std
::
vector
<
int64_t
>
out_shape_vec
=
vectorize
(
in_dims
);
for
(
size_t
i
=
0
;
i
<
axes_vec
.
size
();
++
i
)
{
offsets
[
axes_vec
[
i
]]
=
begin_vec
[
i
];
extents
[
axes_vec
[
i
]]
=
end_vec
[
i
]
-
begin_vec
[
i
];
out_shape_vec
[
axes_vec
[
i
]]
=
end_vec
[
i
]
-
begin_vec
[
i
];
}
DDim
out_dims
(
make_ddim
(
out_shape_vec
));
out
->
Resize
(
out_dims
);
ctx
.
template
Alloc
<
T
>(
out
);
auto
in_t
=
EigenTensor
<
T
,
D
,
Eigen
::
RowMajor
,
Eigen
::
DenseIndex
>::
From
(
*
input
);
auto
out_t
=
EigenTensor
<
T
,
D
,
Eigen
::
RowMajor
,
Eigen
::
DenseIndex
>::
From
(
*
out
,
out_dims
);
funcs
::
EigenSlice
<
std
::
decay_t
<
decltype
(
place
)
>
,
T
,
D
>::
Eval
(
place
,
out_t
,
in_t
,
offsets
,
extents
);
out
->
Resize
(
out_dims
);
}
template
<
typename
Context
,
typename
T
,
size_t
D
>
static
void
Slice
(
const
Context
&
ctx
,
const
DenseTensor
*
input
,
DenseTensor
*
out
,
int64_t
begin_idx
,
int64_t
end_idx
,
int64_t
axes
)
{
std
::
vector
<
int64_t
>
begin_vec
=
{
begin_idx
};
std
::
vector
<
int64_t
>
end_vec
=
{
end_idx
};
std
::
vector
<
int64_t
>
axes_vec
=
{
axes
};
Slice
<
Context
,
T
,
D
>
(
ctx
,
input
,
out
,
begin_vec
,
end_vec
,
axes_vec
);
}
}
// namespace funcs
}
// namespace phi
paddle/phi/kernels/gpu/conv_transpose_grad_kernel.cu
0 → 100644
浏览文件 @
1eb96eec
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/conv_transpose_grad_kernel.h"
#include "paddle/phi/kernels/impl/conv_transpose_grad_kernel_impl.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/core/ddim.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/cpu/conv_util.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/gpu/depthwise_conv.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
Conv2dTransposeDoubleGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
filter
,
const
DenseTensor
&
dout
,
const
DenseTensor
&
ddx
,
const
DenseTensor
&
ddfilter
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
vector
<
int
>&
output_padding
,
const
std
::
vector
<
int
>&
output_size
,
const
std
::
string
&
padding_algorithm
,
int
groups
,
const
std
::
vector
<
int
>&
dilations
,
const
std
::
string
&
data_format
,
DenseTensor
*
dx
,
DenseTensor
*
dfilter
,
DenseTensor
*
ddout
)
{
ConvTransposeGradRawKernel
<
T
,
Context
>
(
ctx
,
x
,
filter
,
dout
,
strides
,
paddings
,
padding_algorithm
,
groups
,
dilations
,
data_format
,
dx
,
dfilter
);
}
template
<
typename
T
,
typename
Context
>
void
DepthwiseConv2dTransposeGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
filter
,
const
DenseTensor
&
dout
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
vector
<
int
>&
output_padding
,
const
std
::
vector
<
int
>&
output_size
,
const
std
::
string
&
padding_algorithm
,
int
groups
,
const
std
::
vector
<
int
>&
dilations
,
const
std
::
string
&
data_format
,
DenseTensor
*
dx
,
DenseTensor
*
dfilter
)
{
const
DataLayout
data_layout
=
paddle
::
framework
::
StringToDataLayout
(
data_format
);
DenseTensor
filter_
=
filter
;
if
(
!
dx
&&
!
dfilter
)
{
return
;
}
std
::
vector
<
int
>
paddings_
=
paddings
;
std
::
vector
<
int
>
dilations_
=
dilations
;
auto
x_dims
=
x
.
dims
();
auto
filter_dims
=
filter_
.
dims
();
DDim
in_data_dims
;
if
(
data_layout
!=
DataLayout
::
kNHWC
)
{
in_data_dims
=
slice_ddim
(
x_dims
,
2
,
x_dims
.
size
());
}
else
{
in_data_dims
=
slice_ddim
(
x_dims
,
1
,
x_dims
.
size
()
-
1
);
}
DDim
filter_data_dims
=
slice_ddim
(
filter_dims
,
2
,
filter_dims
.
size
());
std
::
vector
<
int
>
ksize
=
vectorize
<
int
>
(
filter_data_dims
);
UpdatePaddingAndDilation
(
&
paddings_
,
&
dilations_
,
padding_algorithm
,
in_data_dims
,
strides
,
ksize
);
if
(
dx
)
{
paddle
::
operators
::
math
::
DepthwiseConvFunctor
<
Context
,
T
>
depthwiseConv
;
depthwiseConv
(
ctx
,
dout
,
filter_
,
strides
,
std
::
vector
<
int
>
{
paddings_
[
0
],
paddings_
[
2
],
paddings_
[
1
],
paddings_
[
3
]},
dilations_
,
dx
,
data_layout
);
}
if
(
dfilter
)
{
funcs
::
SetConstant
<
Context
,
T
>
set_zero
;
ctx
.
template
Alloc
<
T
>(
dfilter
);
set_zero
(
ctx
,
dfilter
,
static_cast
<
T
>
(
0
));
paddle
::
operators
::
math
::
DepthwiseConvFilterGradFunctor
<
Context
,
T
>
depthwiseConvFilterGrad
;
depthwiseConvFilterGrad
(
ctx
,
dout
,
x
,
strides
,
std
::
vector
<
int
>
{
paddings_
[
0
],
paddings_
[
2
],
paddings_
[
1
],
paddings_
[
3
]},
dilations_
,
dfilter
,
data_layout
);
}
}
}
// namespace phi
PD_REGISTER_KERNEL
(
conv2d_transpose_grad
,
GPU
,
ALL_LAYOUT
,
phi
::
Conv2dTransposeGradKernel
,
float
,
double
)
{}
PD_REGISTER_KERNEL
(
conv2d_transpose_grad_grad
,
GPU
,
ALL_LAYOUT
,
phi
::
Conv2dTransposeDoubleGradKernel
,
float
,
double
)
{}
PD_REGISTER_KERNEL
(
conv3d_transpose_grad
,
GPU
,
ALL_LAYOUT
,
phi
::
Conv3dTransposeGradKernel
,
float
,
double
)
{}
PD_REGISTER_KERNEL
(
depthwise_conv2d_transpose_grad
,
GPU
,
ALL_LAYOUT
,
phi
::
DepthwiseConv2dTransposeGradKernel
,
float
,
double
)
{}
paddle/phi/kernels/gpu/conv_transpose_kernel.cu
0 → 100644
浏览文件 @
1eb96eec
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/conv_transpose_kernel.h"
#include "paddle/phi/kernels/impl/conv_transpose_kernel_impl.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/core/ddim.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/cpu/conv_util.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/gpu/depthwise_conv.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
DepthwiseConv2dTransposeKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
filter
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
vector
<
int
>&
output_padding
,
const
std
::
vector
<
int
>&
output_size
,
const
std
::
string
&
padding_algorithm
,
int
groups
,
const
std
::
vector
<
int
>&
dilations
,
const
std
::
string
&
data_format
,
DenseTensor
*
out
)
{
const
DataLayout
data_layout
=
paddle
::
framework
::
StringToDataLayout
(
data_format
);
DenseTensor
filter_
=
filter
;
ctx
.
template
Alloc
<
T
>(
out
);
PADDLE_ENFORCE_EQ
(
groups
,
filter_
.
dims
()[
0
],
errors
::
InvalidArgument
(
"groups should be error to the 1st dimension of filter_. But "
"received groups is %d and filter dimension[0] is %d"
,
groups
,
filter_
.
dims
()[
0
]));
std
::
vector
<
int
>
paddings_
=
paddings
;
std
::
vector
<
int
>
dilations_
=
dilations
;
for
(
auto
v
:
dilations_
)
{
PADDLE_ENFORCE_EQ
(
v
,
1
,
errors
::
InvalidArgument
(
"dilations should be 1 in depthwise conv. "
"But received dilations is %d"
,
v
));
}
auto
x_dims
=
x
.
dims
();
auto
filter_dims
=
filter_
.
dims
();
DDim
in_data_dims
;
if
(
data_layout
!=
DataLayout
::
kNHWC
)
{
in_data_dims
=
slice_ddim
(
x_dims
,
2
,
x_dims
.
size
());
}
else
{
in_data_dims
=
slice_ddim
(
x_dims
,
1
,
x_dims
.
size
()
-
1
);
}
DDim
filter_data_dims
=
slice_ddim
(
filter_dims
,
2
,
filter_dims
.
size
());
std
::
vector
<
int
>
ksize
=
vectorize
<
int
>
(
filter_data_dims
);
UpdatePaddingAndDilation
(
&
paddings_
,
&
dilations_
,
padding_algorithm
,
in_data_dims
,
strides
,
ksize
);
ctx
.
template
Alloc
<
T
>(
out
);
funcs
::
SetConstant
<
Context
,
T
>
set_zero
;
set_zero
(
ctx
,
out
,
static_cast
<
T
>
(
0
));
paddle
::
operators
::
math
::
DepthwiseConvInputGradFunctor
<
Context
,
T
>
depthwiseConvInputGrad
;
depthwiseConvInputGrad
(
ctx
,
*
out
,
filter
,
x
,
strides
,
std
::
vector
<
int
>
{
paddings_
[
0
],
paddings_
[
2
],
paddings_
[
1
],
paddings_
[
3
]},
dilations_
,
out
,
data_layout
);
}
}
// namespace phi
PD_REGISTER_KERNEL
(
conv2d_transpose
,
GPU
,
ALL_LAYOUT
,
phi
::
Conv2dTransposeKernel
,
float
,
double
)
{}
PD_REGISTER_KERNEL
(
conv3d_transpose
,
GPU
,
ALL_LAYOUT
,
phi
::
Conv3dTransposeKernel
,
float
,
double
)
{}
PD_REGISTER_KERNEL
(
depthwise_conv2d_transpose
,
GPU
,
ALL_LAYOUT
,
phi
::
DepthwiseConv2dTransposeKernel
,
float
,
double
)
{}
paddle/phi/kernels/gpudnn/conv_transpose_grad_kernel.cu
0 → 100644
浏览文件 @
1eb96eec
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/conv_transpose_grad_kernel.h"
#include <algorithm>
#include "paddle/phi/backends/dynload/cudnn.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/ddim.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/cpu/conv_util.h"
#include "paddle/phi/kernels/funcs/batch_norm_utils.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/padding.h"
#include "paddle/phi/kernels/funcs/slice.h"
#include "paddle/phi/kernels/transpose_kernel.h"
#ifdef PADDLE_WITH_HIP
#include "paddle/fluid/operators/conv_miopen_helper.h"
#include "paddle/fluid/platform/device/gpu/rocm/miopen_helper.h"
#else
#include "paddle/fluid/operators/conv_cudnn_helper.h"
#include "paddle/fluid/platform/device/gpu/cuda/cudnn_helper.h"
#endif
namespace
phi
{
using
GPUDNNDataLayout
=
paddle
::
platform
::
DataLayout
;
template
<
typename
T
,
typename
Context
>
void
ConvTransposeGradRawGPUDNNKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
filter
,
const
DenseTensor
&
dout
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
string
&
padding_algorithm
,
int
groups
,
const
std
::
vector
<
int
>&
dilations
,
const
std
::
string
&
data_format
,
DenseTensor
*
dx
,
DenseTensor
*
dfilter
)
{
const
T
*
filter_data
=
filter
.
data
<
T
>
();
std
::
vector
<
int
>
paddings_
=
paddings
;
std
::
vector
<
int
>
dilations_
=
dilations
;
// cudnn v5 does not support dilations
const
GPUDNNDataLayout
data_layout
=
(
data_format
!=
"NHWC"
?
GPUDNNDataLayout
::
kNCHW
:
GPUDNNDataLayout
::
kNHWC
);
// if channel_last, transpose to channel_first
DenseTensor
x_transpose
;
DenseTensor
dout_transpose
;
std
::
vector
<
int
>
x_vec
=
vectorize
<
int
>
(
x
.
dims
());
std
::
vector
<
int
>
out_vec
=
vectorize
<
int
>
(
dout
.
dims
());
if
(
data_layout
==
GPUDNNDataLayout
::
kNHWC
)
{
if
(
strides
.
size
()
==
2U
)
{
std
::
vector
<
int
>
axis
=
{
0
,
3
,
1
,
2
};
for
(
size_t
i
=
0
;
i
<
axis
.
size
();
++
i
)
{
x_vec
[
i
]
=
x
.
dims
()[
axis
[
i
]];
out_vec
[
i
]
=
dout
.
dims
()[
axis
[
i
]];
}
x_transpose
=
Transpose
<
T
,
Context
>
(
ctx
,
x
,
axis
);
dout_transpose
=
Transpose
<
T
,
Context
>
(
ctx
,
dout
,
axis
);
}
else
if
(
strides
.
size
()
==
3U
)
{
std
::
vector
<
int
>
axis
=
{
0
,
4
,
1
,
2
,
3
};
for
(
size_t
i
=
0
;
i
<
axis
.
size
();
++
i
)
{
x_vec
[
i
]
=
x
.
dims
()[
axis
[
i
]];
out_vec
[
i
]
=
dout
.
dims
()[
axis
[
i
]];
}
x_transpose
=
Transpose
<
T
,
Context
>
(
ctx
,
x
,
axis
);
dout_transpose
=
Transpose
<
T
,
Context
>
(
ctx
,
dout
,
axis
);
}
}
else
{
x_transpose
=
x
;
dout_transpose
=
dout
;
}
// update padding and dilation
auto
x_dims
=
x_transpose
.
dims
();
auto
filter_dims
=
filter
.
dims
();
DDim
x_data_dims
;
x_data_dims
=
slice_ddim
(
x_dims
,
2
,
x_dims
.
size
());
DDim
filter_data_dims
=
slice_ddim
(
filter_dims
,
2
,
filter_dims
.
size
());
std
::
vector
<
int
>
ksize
=
vectorize
<
int
>
(
filter_data_dims
);
UpdatePaddingAndDilation
(
&
paddings_
,
&
dilations_
,
padding_algorithm
,
x_data_dims
,
strides
,
ksize
);
int
data_dim
=
strides
.
size
();
// 2d or 3d
bool
is_sys_pad
=
funcs
::
IsSymmetricPadding
(
paddings_
,
data_dim
);
std
::
vector
<
int
>
x_pad
(
x_dims
.
size
()
*
2
,
0
);
DenseTensor
transformed_dout
;
std
::
vector
<
int
>
padding_common
(
data_dim
,
0
);
if
(
!
is_sys_pad
)
{
std
::
vector
<
int
>
padding_diff
(
data_dim
);
std
::
vector
<
int
>
new_dout_shape_vec
(
data_dim
+
2
);
new_dout_shape_vec
[
0
]
=
dout_transpose
.
dims
()[
0
];
new_dout_shape_vec
[
1
]
=
dout_transpose
.
dims
()[
1
];
for
(
size_t
i
=
0
;
i
<
data_dim
;
++
i
)
{
padding_diff
[
i
]
=
std
::
abs
(
paddings_
[
2
*
i
]
-
paddings_
[
2
*
i
+
1
]);
padding_common
[
i
]
=
std
::
min
(
paddings_
[
2
*
i
],
paddings_
[
2
*
i
+
1
]);
new_dout_shape_vec
[
i
+
2
]
=
dout_transpose
.
dims
()[
i
+
2
]
+
padding_diff
[
i
];
x_pad
[
2
*
i
+
4
]
=
paddings_
[
2
*
i
]
-
padding_common
[
i
];
x_pad
[
2
*
i
+
4
+
1
]
=
paddings_
[
2
*
i
+
1
]
-
padding_common
[
i
];
}
transformed_dout
.
Resize
(
make_ddim
(
new_dout_shape_vec
));
ctx
.
template
Alloc
<
T
>(
&
transformed_dout
);
const
int
rank
=
x_transpose
.
dims
().
size
();
T
pad_value
(
0.0
);
switch
(
rank
)
{
case
4
:
{
funcs
::
PadFunction
<
Context
,
T
,
4
>
(
ctx
,
x_pad
,
dout_transpose
,
pad_value
,
&
transformed_dout
);
}
break
;
case
5
:
{
funcs
::
PadFunction
<
Context
,
T
,
5
>
(
ctx
,
x_pad
,
dout_transpose
,
pad_value
,
&
transformed_dout
);
}
break
;
default:
PADDLE_THROW
(
errors
::
InvalidArgument
(
"Op(ConvTranspose) only supports 4-D or 5-D x DenseTensor."
));
}
}
else
{
transformed_dout
=
dout_transpose
;
if
(
paddings_
.
size
()
==
data_dim
)
{
for
(
size_t
i
=
0
;
i
<
data_dim
;
++
i
)
{
padding_common
[
i
]
=
paddings_
[
i
];
}
}
else
{
for
(
size_t
i
=
0
;
i
<
data_dim
;
++
i
)
{
padding_common
[
i
]
=
paddings_
[
2
*
i
];
}
}
}
const
T
*
x_data
=
x_transpose
.
data
<
T
>
();
const
T
*
dout_data
=
transformed_dout
.
data
<
T
>
();
out_vec
=
vectorize
<
int
>
(
transformed_dout
.
dims
());
// ------------------- cudnn descriptors ---------------------
GPUDNNDataLayout
layout
;
if
(
strides
.
size
()
==
2U
)
{
layout
=
GPUDNNDataLayout
::
kNCHW
;
}
else
{
layout
=
GPUDNNDataLayout
::
kNCDHW
;
}
int
iwo_groups
=
groups
;
int
c_groups
=
1
;
#if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1)
iwo_groups
=
1
;
c_groups
=
groups
;
groups
=
1
;
#endif
auto
dtype
=
paddle
::
platform
::
CudnnDataType
<
T
>::
type
;
paddle
::
operators
::
ConvArgs
args1
{
&
transformed_dout
,
&
filter
,
&
x_transpose
,
strides
,
padding_common
,
dilations_
,
dtype
};
paddle
::
operators
::
ConvArgs
args2
{
&
transformed_dout
,
&
filter
,
&
x_transpose
,
strides
,
padding_common
,
dilations_
,
dtype
};
#ifdef PADDLE_WITH_HIP
miopenConvFwdAlgorithm_t
data_algo
{};
miopenConvBwdWeightsAlgorithm_t
filter_algo
{};
#else
cudnnConvolutionFwdAlgo_t
data_algo
{};
cudnnConvolutionBwdFilterAlgo_t
filter_algo
{};
#endif
auto
layout_tensor
=
paddle
::
platform
::
GetCudnnTensorFormat
(
layout
);
size_t
workspace_size
=
0
;
auto
handle
=
ctx
.
cudnn_handle
();
bool
deterministic
=
FLAGS_cudnn_deterministic
;
T
*
dx_data
=
nullptr
;
T
*
dfilter_data
=
nullptr
;
if
(
dx
)
{
dx_data
=
ctx
.
template
Alloc
<
T
>(
dx
);
args1
.
handle
=
handle
;
args1
.
idesc
.
set
(
transformed_dout
,
iwo_groups
);
args1
.
wdesc
.
set
(
filter
,
layout_tensor
,
iwo_groups
);
args1
.
odesc
.
set
(
x_transpose
,
iwo_groups
);
args1
.
cdesc
.
set
(
dtype
,
padding_common
,
strides
,
dilations_
,
paddle
::
platform
::
AllowTF32Cudnn
(),
c_groups
);
#ifdef PADDLE_WITH_HIP
using
search1
=
paddle
::
operators
::
SearchAlgorithm
<
miopenConvFwdAlgorithm_t
>
;
workspace_size
=
std
::
max
(
workspace_size
,
search1
::
GetWorkspaceSize
(
args1
));
data_algo
=
search1
::
Find
<
T
>
(
args1
,
false
,
deterministic
,
workspace_size
,
ctx
);
#else
using
search1
=
paddle
::
operators
::
SearchAlgorithm
<
cudnnConvolutionFwdAlgoPerf_t
>
;
data_algo
=
search1
::
Find
<
T
>
(
args1
,
false
,
deterministic
,
ctx
);
workspace_size
=
std
::
max
(
workspace_size
,
search1
::
GetWorkspaceSize
(
args1
,
data_algo
));
#endif
}
if
(
dfilter
)
{
dfilter_data
=
ctx
.
template
Alloc
<
T
>(
dfilter
);
args2
.
handle
=
handle
;
args2
.
idesc
.
set
(
transformed_dout
,
iwo_groups
);
args2
.
wdesc
.
set
(
*
dfilter
,
layout_tensor
,
iwo_groups
);
args2
.
odesc
.
set
(
x_transpose
,
iwo_groups
);
args2
.
cdesc
.
set
(
dtype
,
padding_common
,
strides
,
dilations_
,
paddle
::
platform
::
AllowTF32Cudnn
(),
c_groups
);
#ifdef PADDLE_WITH_HIP
using
search2
=
paddle
::
operators
::
SearchAlgorithm
<
miopenConvBwdWeightsAlgorithm_t
>
;
workspace_size
=
std
::
max
(
workspace_size
,
search2
::
GetWorkspaceSize
(
args2
));
filter_algo
=
search2
::
Find
<
T
>
(
args2
,
false
,
deterministic
,
workspace_size
,
ctx
);
#else
using
search2
=
paddle
::
operators
::
SearchAlgorithm
<
cudnnConvolutionBwdFilterAlgoPerf_t
>
;
filter_algo
=
search2
::
Find
<
T
>
(
args2
,
false
,
deterministic
,
ctx
);
workspace_size
=
std
::
max
(
workspace_size
,
search2
::
GetWorkspaceSize
(
args2
,
filter_algo
));
#endif
}
// ------------------- cudnn conv backward data ---------------------
// FIxME(typhoonzero): template type T may not be the same as cudnn call.
int
x_offset
=
x
.
numel
()
/
x
.
dims
()[
0
]
/
groups
;
int
dout_offset
=
transformed_dout
.
numel
()
/
transformed_dout
.
dims
()[
0
]
/
groups
;
int
filter_offset
=
filter
.
numel
()
/
groups
;
paddle
::
operators
::
ScalingParamType
<
T
>
alpha
=
1.0
f
;
paddle
::
operators
::
ScalingParamType
<
T
>
beta
=
0.0
f
;
auto
workspace_handle
=
ctx
.
cudnn_workspace_handle
();
if
(
dx
)
{
// Because beta is zero, it is unnecessary to reset dx.
for
(
int
g
=
0
;
g
<
groups
;
g
++
)
{
#ifdef PADDLE_WITH_HIP
auto
cudnn_func
=
[
&
](
void
*
cudnn_workspace
)
{
PADDLE_ENFORCE_GPU_SUCCESS
(
dynload
::
miopenConvolutionForward
(
handle
,
&
alpha
,
args1
.
idesc
.
desc
(),
dout_data
+
dout_offset
*
g
,
args1
.
wdesc
.
desc
(),
filter_data
+
filter_offset
*
g
,
args1
.
cdesc
.
desc
(),
data_algo
,
&
beta
,
args1
.
odesc
.
desc
(),
dx_data
+
x_offset
*
g
,
cudnn_workspace
,
workspace_size
));
};
#else // PADDLE_WITH_HIP
auto
cudnn_func
=
[
&
](
void
*
cudnn_workspace
)
{
PADDLE_ENFORCE_GPU_SUCCESS
(
dynload
::
cudnnConvolutionForward
(
handle
,
&
alpha
,
args1
.
idesc
.
desc
(),
dout_data
+
dout_offset
*
g
,
args1
.
wdesc
.
desc
(),
filter_data
+
filter_offset
*
g
,
args1
.
cdesc
.
desc
(),
data_algo
,
cudnn_workspace
,
workspace_size
,
&
beta
,
args1
.
odesc
.
desc
(),
dx_data
+
x_offset
*
g
));
};
#endif // PADDLE_WITH_HIP
workspace_handle
.
RunFunc
(
cudnn_func
,
workspace_size
);
}
if
(
data_layout
==
GPUDNNDataLayout
::
kNHWC
)
{
DenseTensor
dx_transpose
;
DenseTensor
dx_nchw
;
dx_nchw
.
ShareDataWith
(
*
dx
);
dx_nchw
.
Resize
(
make_ddim
(
x_vec
));
if
(
strides
.
size
()
==
2U
)
{
std
::
vector
<
int
>
axis
=
{
0
,
2
,
3
,
1
};
dx_transpose
=
Transpose
<
T
,
Context
>
(
ctx
,
dx_nchw
,
axis
);
*
dx
=
dx_transpose
;
}
else
if
(
strides
.
size
()
==
3U
)
{
std
::
vector
<
int
>
axis
=
{
0
,
2
,
3
,
4
,
1
};
dx_transpose
=
Transpose
<
T
,
Context
>
(
ctx
,
dx_nchw
,
axis
);
*
dx
=
dx_transpose
;
}
}
}
// ------------------- cudnn conv backward filter ---------------------
if
(
dfilter
)
{
// Because beta is zero, it is unnecessary to reset dfilter.
// Gradient with respect to the filter
for
(
int
g
=
0
;
g
<
groups
;
g
++
)
{
#ifdef PADDLE_WITH_HIP
auto
cudnn_func
=
[
&
](
void
*
cudnn_workspace
)
{
PADDLE_ENFORCE_GPU_SUCCESS
(
dynload
::
miopenConvolutionBackwardWeights
(
handle
,
&
alpha
,
args2
.
odesc
.
desc
(),
x_data
+
x_offset
*
g
,
args2
.
idesc
.
desc
(),
dout_data
+
dout_offset
*
g
,
args2
.
cdesc
.
desc
(),
filter_algo
,
&
beta
,
args2
.
wdesc
.
desc
(),
dfilter_data
+
filter_offset
*
g
,
cudnn_workspace
,
workspace_size
));
};
#else // PADDLE_WITH_HIP
auto
cudnn_func
=
[
&
](
void
*
cudnn_workspace
)
{
PADDLE_ENFORCE_GPU_SUCCESS
(
dynload
::
cudnnConvolutionBackwardFilter
(
handle
,
&
alpha
,
args2
.
idesc
.
desc
(),
dout_data
+
dout_offset
*
g
,
args2
.
odesc
.
desc
(),
x_data
+
x_offset
*
g
,
args2
.
cdesc
.
desc
(),
filter_algo
,
cudnn_workspace
,
workspace_size
,
&
beta
,
args2
.
wdesc
.
desc
(),
dfilter_data
+
filter_offset
*
g
));
};
#endif // PADDLE_WITH_HIP
workspace_handle
.
RunFunc
(
cudnn_func
,
workspace_size
);
}
}
}
template
<
typename
T
,
typename
Context
>
void
Conv2dTransposeGradGPUDNNKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
filter
,
const
DenseTensor
&
dout
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings_
,
const
std
::
vector
<
int
>&
output_padding
,
const
std
::
vector
<
int
>&
output_size
,
const
std
::
string
&
padding_algorithm
,
int
groups
,
const
std
::
vector
<
int
>&
dilations_
,
const
std
::
string
&
data_format
,
DenseTensor
*
dx
,
DenseTensor
*
dfilter
)
{
ConvTransposeGradRawGPUDNNKernel
<
T
,
Context
>
(
ctx
,
x
,
filter
,
dout
,
strides
,
paddings_
,
padding_algorithm
,
groups
,
dilations_
,
data_format
,
dx
,
dfilter
);
}
/*
* Inputs: I, filter, dout, ddI, ddfilter
* Outputs: ddout, dfilter, dI
* ddo = conv_bp_data(filter, ddI) + conv_bp_data(ddfilter, I)
* dfilter = conv_bp_filter(dout, ddI)
* dI = conv(dout, ddfilter)
*/
template
<
typename
T
,
typename
Context
>
void
Conv2dTransposeDoubleGradGPUDNNKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
filter
,
const
DenseTensor
&
dout
,
const
DenseTensor
&
ddx
,
const
DenseTensor
&
ddfilter
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
vector
<
int
>&
output_padding
,
const
std
::
vector
<
int
>&
output_size
,
const
std
::
string
&
padding_algorithm
,
int
groups
,
const
std
::
vector
<
int
>&
dilations
,
const
std
::
string
&
data_format
,
DenseTensor
*
dx
,
DenseTensor
*
dfilter
,
DenseTensor
*
ddout
)
{
if
(
dx
)
{
ctx
.
template
Alloc
<
T
>(
dx
);
}
if
(
dfilter
)
{
ctx
.
template
Alloc
<
T
>(
dfilter
);
}
if
(
ddout
)
{
ctx
.
template
Alloc
<
T
>(
ddout
);
funcs
::
SetConstant
<
Context
,
T
>
set_zero
;
set_zero
(
ctx
,
ddout
,
static_cast
<
T
>
(
0
));
}
const
T
*
filter_
=
filter
.
data
<
T
>
();
const
T
*
dout_
=
dout
.
data
<
T
>
();
const
T
*
ddx_
=
nullptr
;
const
T
*
ddfilter_
=
nullptr
;
T
*
dx_
=
nullptr
;
T
*
dfilter_
=
nullptr
;
T
*
ddout_
=
nullptr
;
T
*
transformed_dx_
=
nullptr
;
std
::
vector
<
int
>
paddings_
=
paddings
;
std
::
vector
<
int
>
dilations_
=
dilations
;
bool
deterministic
=
FLAGS_cudnn_deterministic
;
const
bool
channel_last
=
(
data_format
==
"NHWC"
||
data_format
==
"NDHWC"
);
// transform DenseTensors to channel first-----------
DenseTensor
transformed_x_channel
(
x
.
type
());
DenseTensor
transformed_dout_channel
(
dout
.
type
());
DenseTensor
transformed_ddx_channel
(
x
.
type
());
DenseTensor
transformed_dx_channel
(
x
.
type
());
DenseTensor
transformed_ddout_channel
(
dout
.
type
());
if
(
channel_last
)
{
ResizeToChannelFirst
<
Context
,
T
>
(
ctx
,
&
x
,
&
transformed_x_channel
);
TransToChannelFirst
<
Context
,
T
>
(
ctx
,
&
x
,
&
transformed_x_channel
);
ResizeToChannelFirst
<
Context
,
T
>
(
ctx
,
&
dout
,
&
transformed_dout_channel
);
TransToChannelFirst
<
Context
,
T
>
(
ctx
,
&
dout
,
&
transformed_dout_channel
);
ResizeToChannelFirst
<
Context
,
T
>
(
ctx
,
&
ddx
,
&
transformed_ddx_channel
);
TransToChannelFirst
<
Context
,
T
>
(
ctx
,
&
ddx
,
&
transformed_ddx_channel
);
if
(
dx
)
{
ResizeToChannelFirst
<
Context
,
T
>
(
ctx
,
dx
,
&
transformed_dx_channel
);
ctx
.
template
Alloc
<
T
>(
&
transformed_dx_channel
);
}
if
(
ddout
)
{
ResizeToChannelFirst
<
Context
,
T
>
(
ctx
,
ddout
,
&
transformed_ddout_channel
);
}
}
else
{
transformed_x_channel
=
x
;
transformed_dout_channel
=
dout
;
transformed_ddx_channel
=
ddx
;
if
(
dx
)
{
transformed_dx_channel
=
*
dx
;
}
}
std
::
vector
<
int
>
out_vec
=
vectorize
<
int
>
(
transformed_dout_channel
.
dims
());
auto
x_dims
=
transformed_x_channel
.
dims
();
auto
filter_dims
=
filter
.
dims
();
DDim
x_data_dims
=
slice_ddim
(
x_dims
,
2
,
x_dims
.
size
());
DDim
filter_data_dims
=
slice_ddim
(
filter_dims
,
2
,
filter_dims
.
size
());
std
::
vector
<
int
>
ksize
=
vectorize
<
int
>
(
filter_data_dims
);
UpdatePaddingAndDilation
(
&
paddings_
,
&
dilations_
,
padding_algorithm
,
x_data_dims
,
strides
,
ksize
);
int
data_dim
=
strides
.
size
();
// 2d or 3d
bool
is_sys_pad
=
funcs
::
IsSymmetricPadding
(
paddings_
,
data_dim
);
DenseTensor
transformed_x
(
x
.
type
());
DenseTensor
transformed_ddx
(
x
.
type
());
DenseTensor
transformed_dout
(
dout
.
type
());
std
::
vector
<
int
>
padding_common
(
data_dim
,
0
);
std
::
vector
<
int
>
input_pad
(
x
.
dims
().
size
()
*
2
,
0
);
if
(
!
is_sys_pad
)
{
// get pad
std
::
vector
<
int
>
padding_diff
(
data_dim
);
std
::
vector
<
int
>
new_input_shape_vec
(
data_dim
+
2
);
std
::
vector
<
int
>
new_output_grad_shape_vec
(
data_dim
+
2
);
new_input_shape_vec
[
0
]
=
transformed_x_channel
.
dims
()[
0
];
new_input_shape_vec
[
1
]
=
transformed_x_channel
.
dims
()[
1
];
new_output_grad_shape_vec
[
0
]
=
transformed_dout_channel
.
dims
()[
0
];
new_output_grad_shape_vec
[
1
]
=
transformed_dout_channel
.
dims
()[
1
];
for
(
size_t
i
=
0
;
i
<
data_dim
;
++
i
)
{
padding_diff
[
i
]
=
std
::
abs
(
paddings_
[
2
*
i
]
-
paddings_
[
2
*
i
+
1
]);
padding_common
[
i
]
=
std
::
min
(
paddings_
[
2
*
i
],
paddings_
[
2
*
i
+
1
]);
new_input_shape_vec
[
i
+
2
]
=
transformed_x_channel
.
dims
()[
i
+
2
]
+
padding_diff
[
i
];
new_output_grad_shape_vec
[
i
+
2
]
=
transformed_dout_channel
.
dims
()[
i
+
2
]
+
padding_diff
[
i
];
input_pad
[
2
*
i
+
4
]
=
paddings_
[
2
*
i
]
-
padding_common
[
i
];
input_pad
[
2
*
i
+
4
+
1
]
=
paddings_
[
2
*
i
+
1
]
-
padding_common
[
i
];
}
DDim
new_input_shape
(
make_ddim
(
new_input_shape_vec
));
transformed_x
.
Resize
(
new_input_shape
);
transformed_ddx
.
Resize
(
new_input_shape
);
transformed_dout
.
Resize
(
make_ddim
(
new_output_grad_shape_vec
));
ctx
.
template
Alloc
<
T
>(
&
transformed_x
);
ctx
.
template
Alloc
<
T
>(
&
transformed_ddx
);
ctx
.
template
Alloc
<
T
>(
&
transformed_dout
);
// pad for input
const
int
rank
=
x
.
dims
().
size
();
T
pad_value
(
0.0
);
switch
(
rank
)
{
case
4
:
{
funcs
::
PadFunction
<
Context
,
T
,
4
>
(
ctx
,
input_pad
,
transformed_x_channel
,
pad_value
,
&
transformed_x
);
funcs
::
PadFunction
<
Context
,
T
,
4
>
(
ctx
,
input_pad
,
transformed_dout_channel
,
pad_value
,
&
transformed_dout
);
funcs
::
PadFunction
<
Context
,
T
,
4
>
(
ctx
,
input_pad
,
transformed_ddx_channel
,
pad_value
,
&
transformed_ddx
);
}
break
;
case
5
:
{
funcs
::
PadFunction
<
Context
,
T
,
5
>
(
ctx
,
input_pad
,
transformed_x_channel
,
pad_value
,
&
transformed_x
);
funcs
::
PadFunction
<
Context
,
T
,
5
>
(
ctx
,
input_pad
,
transformed_ddx_channel
,
pad_value
,
&
transformed_ddx
);
}
break
;
default:
PADDLE_THROW
(
errors
::
InvalidArgument
(
"ConvOp only support tensors with 4 or 5 dimensions."
));
}
}
else
{
transformed_x
=
transformed_x_channel
;
transformed_dout
=
transformed_dout_channel
;
transformed_ddx
=
transformed_ddx_channel
;
if
(
paddings_
.
size
()
==
data_dim
)
{
for
(
size_t
i
=
0
;
i
<
data_dim
;
++
i
)
{
padding_common
[
i
]
=
paddings_
[
i
];
}
}
else
{
for
(
size_t
i
=
0
;
i
<
data_dim
;
++
i
)
{
padding_common
[
i
]
=
paddings_
[
2
*
i
];
}
}
}
std
::
vector
<
int64_t
>
starts
(
data_dim
,
0
);
std
::
vector
<
int64_t
>
ends
(
data_dim
,
0
);
std
::
vector
<
int64_t
>
axes
(
data_dim
,
0
);
for
(
size_t
i
=
0
;
i
<
data_dim
;
++
i
)
{
starts
[
i
]
=
input_pad
[
2
*
i
+
4
]
*
(
strides
[
i
]
+
1
);
ends
[
i
]
=
starts
[
i
]
+
out_vec
[
i
+
2
];
axes
[
i
]
=
i
+
2
;
}
std
::
vector
<
int
>
transformed_out_vec
=
out_vec
;
for
(
size_t
i
=
0
;
i
<
data_dim
;
++
i
)
{
transformed_out_vec
[
i
+
2
]
=
out_vec
[
i
+
2
]
+
(
input_pad
[
2
*
i
+
4
]
+
input_pad
[
2
*
i
+
5
])
*
strides
[
i
]
-
2
*
padding_common
[
i
]
+
paddings_
[
2
*
i
]
+
paddings_
[
2
*
i
+
1
];
}
if
(
!
is_sys_pad
)
{
transformed_ddout_channel
.
Resize
(
make_ddim
(
transformed_out_vec
));
ctx
.
template
Alloc
<
T
>(
&
transformed_ddout_channel
);
}
else
{
ctx
.
template
Alloc
<
T
>(
ddout
);
transformed_ddout_channel
=
*
ddout
;
transformed_ddout_channel
.
Resize
(
make_ddim
(
transformed_out_vec
));
}
const
T
*
x_
=
transformed_x
.
data
<
T
>
();
int
iwo_group
=
groups
;
int
c_group
=
1
;
#if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1)
iwo_group
=
1
;
c_group
=
groups
;
groups
=
1
;
#endif
auto
dtype
=
paddle
::
platform
::
CudnnDataType
<
T
>::
type
;
auto
handle
=
ctx
.
cudnn_handle
();
paddle
::
operators
::
ConvArgs
args1
{
&
transformed_ddout_channel
,
&
filter
,
&
transformed_ddx
,
strides
,
padding_common
,
dilations_
,
dtype
};
paddle
::
operators
::
ConvArgs
args2
{
&
transformed_ddout_channel
,
&
ddfilter
,
&
transformed_x
,
strides
,
padding_common
,
dilations_
,
dtype
};
paddle
::
operators
::
ConvArgs
args3
{
&
transformed_dout
,
dfilter
,
&
transformed_ddx_channel
,
strides
,
padding_common
,
dilations_
,
dtype
};
paddle
::
operators
::
ConvArgs
args4
{
&
transformed_dout
,
&
ddfilter
,
&
transformed_dx_channel
,
strides
,
padding_common
,
dilations_
,
dtype
};
#ifdef PADDLE_WITH_HIP
miopenConvBwdDataAlgorithm_t
bwd_algo1
=
static_cast
<
miopenConvBwdDataAlgorithm_t
>
(
0
);
miopenConvBwdDataAlgorithm_t
bwd_algo2
=
static_cast
<
miopenConvBwdDataAlgorithm_t
>
(
0
);
miopenConvFwdAlgorithm_t
data_algo
=
static_cast
<
miopenConvFwdAlgorithm_t
>
(
0
);
miopenConvBwdWeightsAlgorithm_t
filter_algo
=
static_cast
<
miopenConvBwdWeightsAlgorithm_t
>
(
0
);
#else
cudnnConvolutionBwdDataAlgo_t
bwd_algo1
=
static_cast
<
cudnnConvolutionBwdDataAlgo_t
>
(
0
);
cudnnConvolutionBwdDataAlgo_t
bwd_algo2
=
static_cast
<
cudnnConvolutionBwdDataAlgo_t
>
(
0
);
cudnnConvolutionFwdAlgo_t
data_algo
=
static_cast
<
cudnnConvolutionFwdAlgo_t
>
(
0
);
cudnnConvolutionBwdFilterAlgo_t
filter_algo
=
static_cast
<
cudnnConvolutionBwdFilterAlgo_t
>
(
0
);
#endif
auto
layout
=
paddle
::
platform
::
GetCudnnTensorFormat
(
GPUDNNDataLayout
::
kNCHW
);
// ddo = conv(ddI, filter) + conv(I, ddfilter)
size_t
workspace_size
=
0
;
T
*
transformed_ddout_channel_
=
nullptr
;
if
(
ddout
)
{
ddout_
=
ddout
->
data
<
T
>
();
transformed_ddout_channel_
=
transformed_ddout_channel
.
data
<
T
>
();
args1
.
handle
=
handle
;
args1
.
idesc
.
set
(
transformed_ddout_channel
,
iwo_group
);
args1
.
wdesc
.
set
(
filter
,
layout
,
iwo_group
);
args1
.
odesc
.
set
(
transformed_ddx
,
iwo_group
);
args1
.
cdesc
.
set
(
dtype
,
padding_common
,
strides
,
dilations_
,
paddle
::
platform
::
AllowTF32Cudnn
(),
c_group
);
#ifdef PADDLE_WITH_HIP
using
search1
=
paddle
::
operators
::
SearchAlgorithm
<
miopenConvBwdDataAlgorithm_t
>
;
workspace_size
=
search1
::
GetWorkspaceSize
(
args1
);
bwd_algo1
=
search1
::
Find
<
T
>
(
args1
,
false
,
deterministic
,
workspace_size
,
ctx
);
#else
using
search1
=
paddle
::
operators
::
SearchAlgorithm
<
cudnnConvolutionBwdDataAlgoPerf_t
>
;
bwd_algo1
=
search1
::
Find
<
T
>
(
args1
,
false
,
deterministic
,
ctx
);
workspace_size
=
search1
::
GetWorkspaceSize
(
args1
,
bwd_algo1
);
#endif
ddfilter_
=
ddfilter
.
data
<
T
>
();
args2
.
handle
=
handle
;
args2
.
idesc
.
set
(
transformed_ddout_channel
,
iwo_group
);
args2
.
wdesc
.
set
(
ddfilter
,
layout
,
iwo_group
);
args2
.
odesc
.
set
(
transformed_x
,
iwo_group
);
args2
.
cdesc
.
set
(
dtype
,
padding_common
,
strides
,
dilations_
,
paddle
::
platform
::
AllowTF32Cudnn
(),
c_group
);
#ifdef PADDLE_WITH_HIP
using
search2
=
paddle
::
operators
::
SearchAlgorithm
<
miopenConvBwdDataAlgorithm_t
>
;
workspace_size
=
std
::
max
(
workspace_size
,
search2
::
GetWorkspaceSize
(
args2
));
bwd_algo2
=
search2
::
Find
<
T
>
(
args2
,
false
,
deterministic
,
workspace_size
,
ctx
);
#else
using
search2
=
paddle
::
operators
::
SearchAlgorithm
<
cudnnConvolutionBwdDataAlgoPerf_t
>
;
bwd_algo2
=
search2
::
Find
<
T
>
(
args2
,
false
,
deterministic
,
ctx
);
workspace_size
=
std
::
max
(
workspace_size
,
search2
::
GetWorkspaceSize
(
args2
,
bwd_algo2
));
#endif
}
if
(
dfilter
)
{
dfilter_
=
dfilter
->
data
<
T
>
();
args3
.
handle
=
handle
;
args3
.
idesc
.
set
(
transformed_dout
,
iwo_group
);
args3
.
wdesc
.
set
(
*
dfilter
,
layout
,
iwo_group
);
args3
.
odesc
.
set
(
transformed_ddx_channel
,
iwo_group
);
args3
.
cdesc
.
set
(
dtype
,
padding_common
,
strides
,
dilations_
,
paddle
::
platform
::
AllowTF32Cudnn
(),
c_group
);
#ifdef PADDLE_WITH_HIP
using
search3
=
paddle
::
operators
::
SearchAlgorithm
<
miopenConvBwdWeightsAlgorithm_t
>
;
workspace_size
=
std
::
max
(
workspace_size
,
search3
::
GetWorkspaceSize
(
args3
));
filter_algo
=
search3
::
Find
<
T
>
(
args3
,
false
,
deterministic
,
workspace_size
,
ctx
);
#else
using
search3
=
paddle
::
operators
::
SearchAlgorithm
<
cudnnConvolutionBwdFilterAlgoPerf_t
>
;
filter_algo
=
search3
::
Find
<
T
>
(
args3
,
false
,
deterministic
,
ctx
);
workspace_size
=
std
::
max
(
workspace_size
,
search3
::
GetWorkspaceSize
(
args3
,
filter_algo
));
#endif
}
if
(
dx
)
{
transformed_dx_
=
transformed_dx_channel
.
data
<
T
>
();
args4
.
handle
=
handle
;
args4
.
idesc
.
set
(
transformed_dout
,
iwo_group
);
args4
.
wdesc
.
set
(
ddfilter
,
layout
,
iwo_group
);
args4
.
odesc
.
set
(
transformed_dx_channel
,
iwo_group
);
args4
.
cdesc
.
set
(
dtype
,
padding_common
,
strides
,
dilations_
,
paddle
::
platform
::
AllowTF32Cudnn
(),
c_group
);
#ifdef PADDLE_WITH_HIP
using
search4
=
paddle
::
operators
::
SearchAlgorithm
<
miopenConvFwdAlgorithm_t
>
;
workspace_size
=
std
::
max
(
workspace_size
,
search4
::
GetWorkspaceSize
(
args4
));
data_algo
=
search4
::
Find
<
T
>
(
args4
,
false
,
deterministic
,
workspace_size
,
ctx
);
#else
using
search4
=
paddle
::
operators
::
SearchAlgorithm
<
cudnnConvolutionFwdAlgoPerf_t
>
;
data_algo
=
search4
::
Find
<
T
>
(
args4
,
false
,
deterministic
,
ctx
);
workspace_size
=
std
::
max
(
workspace_size
,
search4
::
GetWorkspaceSize
(
args4
,
data_algo
));
#endif
}
int
i_n
,
i_c
,
i_d
,
i_h
,
i_w
;
paddle
::
operators
::
GetNCDHW
(
transformed_x
.
dims
(),
GPUDNNDataLayout
::
kNCHW
,
&
i_n
,
&
i_c
,
&
i_d
,
&
i_h
,
&
i_w
);
int
o_n
,
o_c
,
o_d
,
o_h
,
o_w
;
paddle
::
operators
::
GetNCDHW
(
transformed_dout
.
dims
(),
GPUDNNDataLayout
::
kNCHW
,
&
o_n
,
&
o_c
,
&
o_d
,
&
o_h
,
&
o_w
);
int
group_offset_in
=
transformed_x
.
numel
()
/
transformed_x
.
dims
()[
0
]
/
groups
;
int
group_offset_out
=
transformed_dout
.
numel
()
/
transformed_dout
.
dims
()[
0
]
/
groups
;
int
group_offset_filter
=
filter
.
numel
()
/
groups
;
paddle
::
operators
::
ScalingParamType
<
T
>
alpha
=
1.0
f
;
paddle
::
operators
::
ScalingParamType
<
T
>
beta
=
0.0
f
;
auto
wkspace_handle
=
ctx
.
cudnn_workspace_handle
();
if
(
ddout
)
{
ddx_
=
transformed_ddx
.
data
<
T
>
();
for
(
int
i
=
0
;
i
<
groups
;
i
++
)
{
#ifdef PADDLE_WITH_HIP
wkspace_handle
.
RunFunc
(
[
&
](
void
*
workspace_ptr
)
{
PADDLE_ENFORCE_GPU_SUCCESS
(
dynload
::
miopenConvolutionBackwardData
(
handle
,
&
alpha
,
args1
.
odesc
.
desc
(),
ddx_
+
i
*
group_offset_in
,
args1
.
wdesc
.
desc
(),
filter_
+
i
*
group_offset_filter
,
args1
.
cdesc
.
desc
(),
bwd_algo1
,
&
beta
,
args1
.
idesc
.
desc
(),
transformed_ddout_channel_
+
i
*
group_offset_out
,
workspace_ptr
,
workspace_size
));
},
workspace_size
);
#else // PADDLE_WITH_HIP
wkspace_handle
.
RunFunc
(
[
&
](
void
*
workspace_ptr
)
{
PADDLE_ENFORCE_GPU_SUCCESS
(
dynload
::
cudnnConvolutionBackwardData
(
handle
,
&
alpha
,
args1
.
wdesc
.
desc
(),
filter_
+
i
*
group_offset_filter
,
args1
.
odesc
.
desc
(),
ddx_
+
i
*
group_offset_in
,
args1
.
cdesc
.
desc
(),
bwd_algo1
,
workspace_ptr
,
workspace_size
,
&
beta
,
args1
.
idesc
.
desc
(),
transformed_ddout_channel_
+
i
*
group_offset_out
));
},
workspace_size
);
#endif // PADDLE_WITH_HIP
}
for
(
int
i
=
0
;
i
<
groups
;
i
++
)
{
#ifdef PADDLE_WITH_HIP
// MIOPEN ONLY support beta to be 0.0f
DenseTensor
conv_x_ddfilter
(
dout
.
type
());
conv_x_ddfilter
.
Resize
(
transformed_ddout_channel
.
dims
());
T
*
conv_x_ddfilter_data
=
ctx
.
template
Alloc
<
T
>(
&
conv_x_ddfilter
);
wkspace_handle
.
RunFunc
(
[
&
](
void
*
workspace_ptr
)
{
PADDLE_ENFORCE_GPU_SUCCESS
(
dynload
::
miopenConvolutionBackwardData
(
handle
,
&
alpha
,
args2
.
odesc
.
desc
(),
x_
+
i
*
group_offset_in
,
args2
.
wdesc
.
desc
(),
ddfilter_
+
i
*
group_offset_filter
,
args2
.
cdesc
.
desc
(),
bwd_algo2
,
&
beta
,
args2
.
idesc
.
desc
(),
conv_x_ddfilter_data
+
i
*
group_offset_out
,
workspace_ptr
,
workspace_size
));
},
workspace_size
);
PADDLE_ENFORCE_GPU_SUCCESS
(
dynload
::
miopenOpTensor
(
handle
,
miopenTensorOpAdd
,
&
alpha
,
args2
.
idesc
.
desc
(),
transformed_ddout_channel_
+
i
*
group_offset_out
,
&
alpha
,
args2
.
idesc
.
desc
(),
conv_x_ddfilter_data
+
i
*
group_offset_out
,
&
beta
,
args2
.
idesc
.
desc
(),
transformed_ddout_channel_
+
i
*
group_offset_out
));
#else // PADDLE_WITH_HIP
wkspace_handle
.
RunFunc
(
[
&
](
void
*
workspace_ptr
)
{
PADDLE_ENFORCE_GPU_SUCCESS
(
dynload
::
cudnnConvolutionBackwardData
(
handle
,
&
alpha
,
args2
.
wdesc
.
desc
(),
ddfilter_
+
i
*
group_offset_filter
,
args2
.
odesc
.
desc
(),
x_
+
i
*
group_offset_in
,
args2
.
cdesc
.
desc
(),
bwd_algo2
,
workspace_ptr
,
workspace_size
,
&
alpha
,
args2
.
idesc
.
desc
(),
transformed_ddout_channel_
+
i
*
group_offset_out
));
},
workspace_size
);
#endif // PADDLE_WITH_HIP
}
if
((
!
is_sys_pad
)
&&
(
!
channel_last
))
{
if
(
strides
.
size
()
==
2U
)
{
funcs
::
Slice
<
Context
,
T
,
4
>
(
ctx
,
&
transformed_ddout_channel
,
ddout
,
starts
,
ends
,
axes
);
}
else
if
(
!
is_sys_pad
&&
strides
.
size
()
==
3U
)
{
funcs
::
Slice
<
Context
,
T
,
5
>
(
ctx
,
&
transformed_ddout_channel
,
ddout
,
starts
,
ends
,
axes
);
}
}
else
if
((
!
is_sys_pad
)
&&
(
channel_last
))
{
if
(
strides
.
size
()
==
2U
)
{
funcs
::
Slice
<
Context
,
T
,
4
>
(
ctx
,
&
transformed_ddout_channel
,
&
transformed_ddout_channel
,
starts
,
ends
,
axes
);
}
else
if
(
!
is_sys_pad
&&
strides
.
size
()
==
3U
)
{
funcs
::
Slice
<
Context
,
T
,
5
>
(
ctx
,
&
transformed_ddout_channel
,
&
transformed_ddout_channel
,
starts
,
ends
,
axes
);
}
TransToChannelLast
<
Context
,
T
>
(
ctx
,
&
transformed_ddout_channel
,
ddout
);
}
}
T
*
transformed_dout_channel_
=
transformed_dout
.
data
<
T
>
();
if
(
dfilter
)
{
ddx_
=
transformed_ddx_channel
.
data
<
T
>
();
for
(
int
i
=
0
;
i
<
groups
;
i
++
)
{
#ifdef PADDLE_WITH_HIP
wkspace_handle
.
RunFunc
(
[
&
](
void
*
workspace_ptr
)
{
PADDLE_ENFORCE_GPU_SUCCESS
(
dynload
::
miopenConvolutionBackwardWeights
(
handle
,
&
alpha
,
args3
.
odesc
.
desc
(),
ddx_
+
i
*
group_offset_in
,
args3
.
idesc
.
desc
(),
transformed_dout_channel_
+
i
*
group_offset_out
,
args3
.
cdesc
.
desc
(),
filter_algo
,
&
beta
,
args3
.
wdesc
.
desc
(),
dfilter_
+
i
*
group_offset_filter
,
workspace_ptr
,
workspace_size
));
},
workspace_size
);
#else // PADDLE_WITH_HIP
wkspace_handle
.
RunFunc
(
[
&
](
void
*
workspace_ptr
)
{
PADDLE_ENFORCE_GPU_SUCCESS
(
dynload
::
cudnnConvolutionBackwardFilter
(
handle
,
&
alpha
,
args3
.
idesc
.
desc
(),
transformed_dout_channel_
+
i
*
group_offset_out
,
args3
.
odesc
.
desc
(),
ddx_
+
i
*
group_offset_in
,
args3
.
cdesc
.
desc
(),
filter_algo
,
workspace_ptr
,
workspace_size
,
&
beta
,
args3
.
wdesc
.
desc
(),
dfilter_
+
i
*
group_offset_filter
));
},
workspace_size
);
#endif // PADDLE_WITH_HIP
}
}
if
(
dx
)
{
ddfilter_
=
ddfilter
.
data
<
T
>
();
for
(
int
i
=
0
;
i
<
groups
;
i
++
)
{
#ifdef PADDLE_WITH_HIP
wkspace_handle
.
RunFunc
(
[
&
](
void
*
workspace_ptr
)
{
PADDLE_ENFORCE_GPU_SUCCESS
(
dynload
::
miopenConvolutionForward
(
handle
,
&
alpha
,
args4
.
idesc
.
desc
(),
transformed_dout_channel_
+
i
*
group_offset_out
,
args4
.
wdesc
.
desc
(),
ddfilter_
+
i
*
group_offset_filter
,
args4
.
cdesc
.
desc
(),
data_algo
,
&
beta
,
args4
.
odesc
.
desc
(),
transformed_dx_
+
i
*
group_offset_in
,
workspace_ptr
,
workspace_size
));
},
workspace_size
);
#else // PADDLE_WITH_HIP
wkspace_handle
.
RunFunc
(
[
&
](
void
*
workspace_ptr
)
{
PADDLE_ENFORCE_GPU_SUCCESS
(
dynload
::
cudnnConvolutionForward
(
handle
,
&
alpha
,
args4
.
idesc
.
desc
(),
transformed_dout_channel_
+
i
*
group_offset_out
,
args4
.
wdesc
.
desc
(),
ddfilter_
+
i
*
group_offset_filter
,
args4
.
cdesc
.
desc
(),
data_algo
,
workspace_ptr
,
workspace_size
,
&
beta
,
args4
.
odesc
.
desc
(),
transformed_dx_
+
i
*
group_offset_in
));
},
workspace_size
);
#endif // PADDLE_WITH_HIP
}
if
(
channel_last
)
{
TransToChannelLast
<
Context
,
T
>
(
ctx
,
&
transformed_dx_channel
,
dx
);
}
}
}
template
<
typename
T
,
typename
Context
>
void
Conv3dTransposeGradGPUDNNKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
filter
,
const
DenseTensor
&
dout
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings_
,
const
std
::
vector
<
int
>&
output_padding
,
const
std
::
vector
<
int
>&
output_size
,
const
std
::
string
&
padding_algorithm
,
int
groups
,
const
std
::
vector
<
int
>&
dilations_
,
const
std
::
string
&
data_format
,
DenseTensor
*
dx
,
DenseTensor
*
dfilter
)
{
ConvTransposeGradRawGPUDNNKernel
<
T
,
Context
>
(
ctx
,
x
,
filter
,
dout
,
strides
,
paddings_
,
padding_algorithm
,
groups
,
dilations_
,
data_format
,
dx
,
dfilter
);
}
}
// namespace phi
using
float16
=
phi
::
dtype
::
float16
;
#ifdef PADDLE_WITH_HIP
// MIOPEN do not support double
PD_REGISTER_KERNEL
(
conv2d_transpose_grad
,
GPUDNN
,
ALL_LAYOUT
,
phi
::
Conv2dTransposeGradGPUDNNKernel
,
float
,
float16
)
{}
PD_REGISTER_KERNEL
(
conv2d_transpose_grad_grad
,
GPUDNN
,
ALL_LAYOUT
,
phi
::
Conv2dTransposeDoubleGradGPUDNNKernel
,
float
,
float16
)
{}
PD_REGISTER_KERNEL
(
conv3d_transpose_grad
,
GPUDNN
,
ALL_LAYOUT
,
phi
::
Conv3dTransposeGradGPUDNNKernel
,
float
,
float16
)
{}
#else
PD_REGISTER_KERNEL
(
conv2d_transpose_grad
,
GPUDNN
,
ALL_LAYOUT
,
phi
::
Conv2dTransposeGradGPUDNNKernel
,
float
,
double
,
float16
)
{}
PD_REGISTER_KERNEL
(
conv2d_transpose_grad_grad
,
GPUDNN
,
ALL_LAYOUT
,
phi
::
Conv2dTransposeDoubleGradGPUDNNKernel
,
float
,
double
,
float16
)
{}
PD_REGISTER_KERNEL
(
conv3d_transpose_grad
,
GPUDNN
,
ALL_LAYOUT
,
phi
::
Conv3dTransposeGradGPUDNNKernel
,
float
,
double
,
float16
)
{}
#endif
paddle/phi/kernels/gpudnn/conv_transpose_kernel.cu
0 → 100644
浏览文件 @
1eb96eec
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/conv_transpose_kernel.h"
#include <algorithm>
#include "paddle/phi/backends/dynload/cudnn.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/ddim.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/cpu/conv_util.h"
#include "paddle/phi/kernels/funcs/padding.h"
#include "paddle/phi/kernels/funcs/slice.h"
#include "paddle/phi/kernels/transpose_kernel.h"
#ifdef PADDLE_WITH_HIP
#include "paddle/fluid/operators/conv_miopen_helper.h"
#include "paddle/fluid/platform/device/gpu/rocm/miopen_helper.h"
#else
#include "paddle/fluid/operators/conv_cudnn_helper.h"
#include "paddle/fluid/platform/device/gpu/cuda/cudnn_helper.h"
#endif
namespace
phi
{
using
GPUDNNDataLayout
=
paddle
::
platform
::
DataLayout
;
template
<
typename
T
,
typename
Context
>
void
ConvTransposeRawGPUDNNKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
filter
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
string
&
padding_algorithm
,
int
groups
,
const
std
::
vector
<
int
>&
dilations
,
const
std
::
string
&
data_format
,
DenseTensor
*
out
)
{
std
::
vector
<
int
>
paddings_
=
paddings
;
std
::
vector
<
int
>
dilations_
=
dilations
;
// cudnn v5 does not support dilations
const
T
*
filter_data
=
filter
.
data
<
T
>
();
const
GPUDNNDataLayout
data_layout
=
(
data_format
!=
"NHWC"
?
GPUDNNDataLayout
::
kNCHW
:
GPUDNNDataLayout
::
kNHWC
);
std
::
vector
<
int
>
x_vec
=
vectorize
<
int
>
(
x
.
dims
());
std
::
vector
<
int
>
out_vec
=
vectorize
<
int
>
(
out
->
dims
());
// if channel_last, transpose to channel_first
DenseTensor
x_transpose
;
if
(
data_layout
==
GPUDNNDataLayout
::
kNHWC
)
{
if
(
strides
.
size
()
==
2U
)
{
std
::
vector
<
int
>
axis
=
{
0
,
3
,
1
,
2
};
for
(
size_t
i
=
0
;
i
<
axis
.
size
();
++
i
)
{
x_vec
[
i
]
=
x
.
dims
()[
axis
[
i
]];
out_vec
[
i
]
=
out
->
dims
()[
axis
[
i
]];
}
x_transpose
=
Transpose
<
T
,
Context
>
(
ctx
,
x
,
axis
);
}
else
if
(
strides
.
size
()
==
3U
)
{
std
::
vector
<
int
>
axis
=
{
0
,
4
,
1
,
2
,
3
};
for
(
size_t
i
=
0
;
i
<
axis
.
size
();
++
i
)
{
x_vec
[
i
]
=
x
.
dims
()[
axis
[
i
]];
out_vec
[
i
]
=
out
->
dims
()[
axis
[
i
]];
}
x_transpose
=
Transpose
<
T
,
Context
>
(
ctx
,
x
,
axis
);
}
}
else
{
x_transpose
=
x
;
}
// update padding and dilation
auto
x_dims
=
x_transpose
.
dims
();
auto
filter_dims
=
filter
.
dims
();
DDim
x_data_dims
;
x_data_dims
=
slice_ddim
(
x_dims
,
2
,
x_dims
.
size
());
DDim
filter_data_dims
=
slice_ddim
(
filter_dims
,
2
,
filter_dims
.
size
());
std
::
vector
<
int
>
ksize
=
vectorize
<
int
>
(
filter_data_dims
);
UpdatePaddingAndDilation
(
&
paddings_
,
&
dilations_
,
padding_algorithm
,
x_data_dims
,
strides
,
ksize
);
int
data_dim
=
strides
.
size
();
// 2d or 3d
bool
is_sys_pad
=
funcs
::
IsSymmetricPadding
(
paddings_
,
data_dim
);
std
::
vector
<
int
>
x_pad
(
x_dims
.
size
()
*
2
,
0
);
DenseTensor
transformed_x
;
std
::
vector
<
int
>
padding_common
(
data_dim
,
0
);
if
(
!
is_sys_pad
)
{
std
::
vector
<
int
>
padding_diff
(
data_dim
);
std
::
vector
<
int
>
new_x_shape_vec
(
data_dim
+
2
);
new_x_shape_vec
[
0
]
=
x_dims
[
0
];
new_x_shape_vec
[
1
]
=
x_dims
[
1
];
for
(
size_t
i
=
0
;
i
<
data_dim
;
++
i
)
{
padding_diff
[
i
]
=
std
::
abs
(
paddings_
[
2
*
i
]
-
paddings_
[
2
*
i
+
1
]);
padding_common
[
i
]
=
std
::
min
(
paddings_
[
2
*
i
],
paddings_
[
2
*
i
+
1
]);
new_x_shape_vec
[
i
+
2
]
=
x_dims
[
i
+
2
]
+
padding_diff
[
i
];
x_pad
[
2
*
i
+
4
]
=
paddings_
[
2
*
i
]
-
padding_common
[
i
];
x_pad
[
2
*
i
+
4
+
1
]
=
paddings_
[
2
*
i
+
1
]
-
padding_common
[
i
];
}
DDim
new_x_shape
(
make_ddim
(
new_x_shape_vec
));
transformed_x
.
Resize
(
new_x_shape
);
ctx
.
template
Alloc
<
T
>(
&
transformed_x
);
const
int
rank
=
x_dims
.
size
();
T
pad_value
(
0.0
);
switch
(
rank
)
{
case
4
:
{
funcs
::
PadFunction
<
Context
,
T
,
4
>
(
ctx
,
x_pad
,
x_transpose
,
pad_value
,
&
transformed_x
);
}
break
;
case
5
:
{
funcs
::
PadFunction
<
Context
,
T
,
5
>
(
ctx
,
x_pad
,
x_transpose
,
pad_value
,
&
transformed_x
);
}
break
;
default:
PADDLE_THROW
(
errors
::
InvalidArgument
(
"Op(ConvTranspose) only supports 4-D or 5-D x DenseTensor."
));
}
}
else
{
transformed_x
=
x_transpose
;
if
(
paddings_
.
size
()
==
data_dim
)
{
for
(
size_t
i
=
0
;
i
<
data_dim
;
++
i
)
{
padding_common
[
i
]
=
paddings_
[
i
];
}
}
else
{
for
(
size_t
i
=
0
;
i
<
data_dim
;
++
i
)
{
padding_common
[
i
]
=
paddings_
[
2
*
i
];
}
}
}
std
::
vector
<
int64_t
>
starts
(
data_dim
,
0
);
std
::
vector
<
int64_t
>
ends
(
data_dim
,
0
);
std
::
vector
<
int64_t
>
axes
(
data_dim
,
0
);
for
(
size_t
i
=
0
;
i
<
data_dim
;
++
i
)
{
starts
[
i
]
=
x_pad
[
2
*
i
+
4
]
*
(
strides
[
i
]
+
1
);
ends
[
i
]
=
starts
[
i
]
+
out_vec
[
i
+
2
];
axes
[
i
]
=
i
+
2
;
}
const
T
*
x_data
=
transformed_x
.
data
<
T
>
();
x_vec
=
vectorize
<
int
>
(
transformed_x
.
dims
());
std
::
vector
<
int
>
transformed_out_vec
=
out_vec
;
for
(
size_t
i
=
0
;
i
<
data_dim
;
++
i
)
{
transformed_out_vec
[
i
+
2
]
=
out_vec
[
i
+
2
]
+
(
x_pad
[
2
*
i
+
4
]
+
x_pad
[
2
*
i
+
5
])
*
strides
[
i
]
-
2
*
padding_common
[
i
]
+
paddings_
[
2
*
i
]
+
paddings_
[
2
*
i
+
1
];
}
DenseTensor
transformed_out
;
if
(
!
is_sys_pad
)
{
transformed_out
.
Resize
(
make_ddim
(
transformed_out_vec
));
ctx
.
template
Alloc
<
T
>(
&
transformed_out
);
}
else
{
ctx
.
template
Alloc
<
T
>(
out
);
transformed_out
.
ShareDataWith
(
*
out
);
transformed_out
.
Resize
(
make_ddim
(
transformed_out_vec
));
}
T
*
transformed_out_data
=
transformed_out
.
data
<
T
>
();
GPUDNNDataLayout
layout
;
int
iwo_groups
=
groups
;
int
c_groups
=
1
;
#if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1)
iwo_groups
=
1
;
c_groups
=
groups
;
groups
=
1
;
#endif
if
(
strides
.
size
()
==
2U
)
{
layout
=
GPUDNNDataLayout
::
kNCHW
;
}
else
{
layout
=
GPUDNNDataLayout
::
kNCDHW
;
}
size_t
workspace_size
=
0
;
#ifdef PADDLE_WITH_HIP
miopenConvBwdDataAlgorithm_t
algo
{};
#else
cudnnConvolutionBwdDataAlgo_t
algo
{};
#endif
// ------------------- cudnn conv algorithm ---------------------
auto
handle
=
ctx
.
cudnn_handle
();
auto
layout_tensor
=
paddle
::
platform
::
GetCudnnTensorFormat
(
layout
);
bool
deterministic
=
FLAGS_cudnn_deterministic
;
auto
dtype
=
paddle
::
platform
::
CudnnDataType
<
T
>::
type
;
// ------------------- cudnn descriptors ---------------------
paddle
::
operators
::
ConvArgs
args
{
&
transformed_out
,
&
filter
,
&
transformed_x
,
strides
,
padding_common
,
dilations_
,
dtype
};
args
.
handle
=
handle
;
args
.
idesc
.
set
(
transformed_out
,
iwo_groups
);
args
.
wdesc
.
set
(
filter
,
layout_tensor
,
iwo_groups
);
args
.
odesc
.
set
(
transformed_x
,
iwo_groups
);
args
.
cdesc
.
set
(
dtype
,
padding_common
,
strides
,
dilations_
,
paddle
::
platform
::
AllowTF32Cudnn
(),
c_groups
);
#ifdef PADDLE_WITH_HIP
using
search
=
paddle
::
operators
::
SearchAlgorithm
<
miopenConvBwdDataAlgorithm_t
>
;
workspace_size
=
std
::
max
(
workspace_size
,
search
::
GetWorkspaceSize
(
args
));
algo
=
search
::
Find
<
T
>
(
args
,
false
,
deterministic
,
workspace_size
,
ctx
);
#else
using
search
=
paddle
::
operators
::
SearchAlgorithm
<
cudnnConvolutionBwdDataAlgoPerf_t
>
;
algo
=
search
::
Find
<
T
>
(
args
,
false
,
deterministic
,
ctx
);
workspace_size
=
std
::
max
(
workspace_size
,
search
::
GetWorkspaceSize
(
args
,
algo
));
#endif
// ------------------- cudnn conv transpose forward ---------------------
int
x_offset
=
transformed_x
.
numel
()
/
transformed_x
.
dims
()[
0
]
/
groups
;
int
out_offset
=
transformed_out
.
numel
()
/
transformed_out
.
dims
()[
0
]
/
groups
;
int
filter_offset
=
filter
.
numel
()
/
groups
;
paddle
::
operators
::
ScalingParamType
<
T
>
alpha
=
1.0
f
;
paddle
::
operators
::
ScalingParamType
<
T
>
beta
=
0.0
f
;
auto
workspace_handle
=
ctx
.
cudnn_workspace_handle
();
for
(
int
g
=
0
;
g
<
groups
;
g
++
)
{
#ifdef PADDLE_WITH_HIP
auto
cudnn_func
=
[
&
](
void
*
cudnn_workspace
)
{
PADDLE_ENFORCE_GPU_SUCCESS
(
dynload
::
miopenConvolutionBackwardData
(
handle
,
&
alpha
,
args
.
odesc
.
desc
(),
x_data
+
x_offset
*
g
,
args
.
wdesc
.
desc
(),
filter_data
+
filter_offset
*
g
,
args
.
cdesc
.
desc
(),
algo
,
&
beta
,
args
.
idesc
.
desc
(),
transformed_out_data
+
out_offset
*
g
,
cudnn_workspace
,
workspace_size
));
};
#else // PADDLE_WITH_HIP
auto
cudnn_func
=
[
&
](
void
*
cudnn_workspace
)
{
PADDLE_ENFORCE_GPU_SUCCESS
(
dynload
::
cudnnConvolutionBackwardData
(
handle
,
&
alpha
,
args
.
wdesc
.
desc
(),
filter_data
+
filter_offset
*
g
,
args
.
odesc
.
desc
(),
x_data
+
x_offset
*
g
,
args
.
cdesc
.
desc
(),
algo
,
cudnn_workspace
,
workspace_size
,
&
beta
,
args
.
idesc
.
desc
(),
transformed_out_data
+
out_offset
*
g
));
};
#endif // PADDLE_WITH_HIP
workspace_handle
.
RunFunc
(
cudnn_func
,
workspace_size
);
}
if
(
!
is_sys_pad
&&
strides
.
size
()
==
2U
)
{
funcs
::
Slice
<
Context
,
T
,
4
>
(
ctx
,
&
transformed_out
,
out
,
starts
,
ends
,
axes
);
}
else
if
(
!
is_sys_pad
&&
strides
.
size
()
==
3U
)
{
funcs
::
Slice
<
Context
,
T
,
5
>
(
ctx
,
&
transformed_out
,
out
,
starts
,
ends
,
axes
);
}
if
(
data_layout
==
GPUDNNDataLayout
::
kNHWC
)
{
DenseTensor
out_transpose
;
DenseTensor
out_nchw
;
out_nchw
.
ShareDataWith
(
*
out
);
out_nchw
.
Resize
(
make_ddim
(
out_vec
));
if
(
strides
.
size
()
==
2U
)
{
out_transpose
=
Transpose
<
T
,
Context
>
(
ctx
,
out_nchw
,
{
0
,
2
,
3
,
1
});
}
else
if
(
strides
.
size
()
==
3U
)
{
out_transpose
=
Transpose
<
T
,
Context
>
(
ctx
,
out_nchw
,
{
0
,
2
,
3
,
4
,
1
});
}
*
out
=
out_transpose
;
}
}
template
<
typename
T
,
typename
Context
>
void
Conv2dTransposeGPUDNNKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
filter
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
vector
<
int
>&
output_padding
,
const
std
::
vector
<
int
>&
output_size
,
const
std
::
string
&
padding_algorithm
,
int
groups
,
const
std
::
vector
<
int
>&
dilations
,
const
std
::
string
&
data_format
,
DenseTensor
*
out
)
{
ConvTransposeRawGPUDNNKernel
<
T
,
Context
>
(
ctx
,
x
,
filter
,
strides
,
paddings
,
padding_algorithm
,
groups
,
dilations
,
data_format
,
out
);
}
template
<
typename
T
,
typename
Context
>
void
Conv3dTransposeGPUDNNKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
filter
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
vector
<
int
>&
output_padding
,
const
std
::
vector
<
int
>&
output_size
,
const
std
::
string
&
padding_algorithm
,
int
groups
,
const
std
::
vector
<
int
>&
dilations
,
const
std
::
string
&
data_format
,
DenseTensor
*
out
)
{
ConvTransposeRawGPUDNNKernel
<
T
,
Context
>
(
ctx
,
x
,
filter
,
strides
,
paddings
,
padding_algorithm
,
groups
,
dilations
,
data_format
,
out
);
}
}
// namespace phi
using
float16
=
phi
::
dtype
::
float16
;
#ifdef PADDLE_WITH_HIP
// MIOPEN do not support double
PD_REGISTER_KERNEL
(
conv2d_transpose
,
GPUDNN
,
ALL_LAYOUT
,
phi
::
Conv2dTransposeGPUDNNKernel
,
float
,
float16
)
{}
PD_REGISTER_KERNEL
(
conv3d_transpose
,
GPUDNN
,
ALL_LAYOUT
,
phi
::
Conv3dTransposeGPUDNNKernel
,
float
,
float16
)
{}
#else
PD_REGISTER_KERNEL
(
conv2d_transpose
,
GPUDNN
,
ALL_LAYOUT
,
phi
::
Conv2dTransposeGPUDNNKernel
,
float
,
double
,
float16
)
{}
PD_REGISTER_KERNEL
(
conv3d_transpose
,
GPUDNN
,
ALL_LAYOUT
,
phi
::
Conv3dTransposeGPUDNNKernel
,
float
,
double
,
float16
)
{}
#endif
paddle/phi/kernels/impl/conv_transpose_grad_kernel_impl.h
0 → 100644
浏览文件 @
1eb96eec
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/kernels/conv_transpose_grad_kernel.h"
#include "paddle/fluid/operators/math/im2col.h"
#include "paddle/fluid/operators/math/vol2col.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/core/ddim.h"
#include "paddle/phi/kernels/cpu/conv_util.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/concat_and_split_functor.h"
#include "paddle/phi/kernels/funcs/slice.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
ConvTransposeGradRawKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
filter
,
const
DenseTensor
&
dout
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
string
&
padding_algorithm
,
int
groups
,
const
std
::
vector
<
int
>&
dilations
,
const
std
::
string
&
data_format
,
DenseTensor
*
dx
,
DenseTensor
*
dfilter
)
{
const
DataLayout
data_layout
=
paddle
::
framework
::
StringToDataLayout
(
data_format
);
// For filter, we do not use const pointer because we will do reshape,
// but we should avoid modifying its value.
DenseTensor
filter_
=
filter
;
if
((
!
dx
)
&&
(
!
dfilter
))
{
return
;
}
std
::
vector
<
int
>
paddings_
=
paddings
;
std
::
vector
<
int
>
dilations_
=
dilations
;
auto
x_dims
=
x
.
dims
();
auto
filter_dims
=
filter_
.
dims
();
auto
dout_dims
=
dout
.
dims
();
const
int
batch_size
=
static_cast
<
int
>
(
x
.
dims
()[
0
]);
DDim
in_data_dims
;
if
(
data_layout
!=
DataLayout
::
kNHWC
)
{
in_data_dims
=
slice_ddim
(
x_dims
,
2
,
x_dims
.
size
());
}
else
{
in_data_dims
=
slice_ddim
(
x_dims
,
1
,
x_dims
.
size
()
-
1
);
}
DDim
filter_data_dims
=
slice_ddim
(
filter_dims
,
2
,
filter_dims
.
size
());
std
::
vector
<
int
>
ksize
=
vectorize
<
int
>
(
filter_data_dims
);
UpdatePaddingAndDilation
(
&
paddings_
,
&
dilations_
,
padding_algorithm
,
in_data_dims
,
strides
,
ksize
);
// x_shape_vec: {n, c, h, w} or {n, c, d, h, w} for channel_first
// x_shape_vec: {n, h, w, c} or {n, d, h, w, c} for channel_last
std
::
vector
<
int64_t
>
x_shape_vec
=
vectorize
(
x
.
dims
());
// filter_shape_vec: {i_c, o_c, k_h, k_w} or {i_c, o_c, k_d, k_h, k_w}
std
::
vector
<
int64_t
>
filter_shape_vec
=
vectorize
(
filter_
.
dims
());
// use col_shape in the im2col and col2im (or vol2col and col2vol)
// calculation
// col_shape_vec: {o_c, k_h, k_w, h, w} or {o_c, k_d, k_h, k_w, d, h, w} for
size_t
data_dim
=
filter_shape_vec
.
size
()
-
2
;
std
::
vector
<
int64_t
>
col_shape_vec
(
1
+
2
*
data_dim
);
if
(
data_layout
!=
DataLayout
::
kNHWC
)
{
col_shape_vec
[
0
]
=
dout_dims
[
1
];
for
(
size_t
j
=
0
;
j
<
data_dim
;
++
j
)
{
col_shape_vec
[
j
+
1
]
=
filter_shape_vec
[
j
+
2
];
col_shape_vec
[
j
+
1
+
data_dim
]
=
x_shape_vec
[
j
+
2
];
}
}
else
{
col_shape_vec
[
0
]
=
dout_dims
[
dout_dims
.
size
()
-
1
];
for
(
size_t
j
=
0
;
j
<
data_dim
;
++
j
)
{
col_shape_vec
[
j
+
1
]
=
filter_shape_vec
[
j
+
2
];
col_shape_vec
[
j
+
1
+
data_dim
]
=
x_shape_vec
[
j
+
1
];
}
}
DDim
col_shape
(
make_ddim
(
col_shape_vec
));
// use col_matrix_shape in the gemm calculation
// size: (o_c * k_h * k_w, h * w) or (o_c * k_d * k_h * k_w, d * h * w)
DDim
col_matrix_shape
=
flatten_to_2d
(
col_shape
,
data_dim
+
1
);
// output size: (o_c, o_h, o_w) or (o_c, o_d, o_h, o_w) for channel_first
// output size: (o_h, o_w, o_c) or (o_d, o_h, o_w, o_c) for channel_last
DDim
output_shape
=
slice_ddim
(
dout
.
dims
(),
1
,
dout
.
dims
().
size
());
// x matrix size: (i_c, h * w) or (i_c, d * h * w) for channel_first
// x matrix size: (h * w, i_c) or (d * h * w, i_c) for channel_last
DDim
x_matrix_shape
;
if
(
data_layout
!=
DataLayout
::
kNHWC
)
{
x_matrix_shape
=
{
x_dims
[
1
],
col_matrix_shape
[
1
]};
}
else
{
x_matrix_shape
=
{
col_matrix_shape
[
1
],
x_dims
[
x_dims
.
size
()
-
1
]};
}
// filter size: (i_c, o_c/g * k_h * k_w) or (i_c, o_c/g * k_d * k_h * k_w)
DDim
filter_matrix_shape
;
if
(
data_layout
!=
DataLayout
::
kNHWC
)
{
filter_matrix_shape
=
{
x_dims
[
1
],
col_matrix_shape
[
0
]
/
groups
};
}
else
{
filter_matrix_shape
=
{
x_dims
[
x_dims
.
size
()
-
1
],
col_matrix_shape
[
0
]
/
groups
};
}
filter_
.
Resize
(
filter_matrix_shape
);
int
in_step
=
(
data_layout
!=
DataLayout
::
kNHWC
?
static_cast
<
int
>
(
x_dims
[
1
])
/
groups
:
static_cast
<
int
>
(
x_dims
[
x_dims
.
size
()
-
1
])
/
groups
);
int
col_step
=
static_cast
<
int
>
(
col_matrix_shape
[
0
])
/
groups
;
// convolution transpose grad on x:
// im2col + gemm (similar to conv-forward)
// x need to compute gradient
auto
blas
=
funcs
::
GetBlas
<
Context
,
T
>
(
ctx
);
if
(
dx
||
dfilter
)
{
DenseTensor
col
;
col
.
Resize
(
col_shape
);
ctx
.
template
Alloc
<
T
>(
&
col
);
// col_matrix shares the same piece of data with col,
// but will be reshaped into a two-dimensional matrix shape
// to call the matrix multiplication interface.
DenseTensor
col_matrix
;
col_matrix
.
ShareDataWith
(
col
);
col_matrix
.
Resize
(
col_matrix_shape
);
DenseTensor
dfilter_
;
funcs
::
SetConstant
<
Context
,
T
>
set_zero
;
paddle
::
operators
::
math
::
Im2ColFunctor
<
paddle
::
operators
::
math
::
ColFormat
::
kCFO
,
Context
,
T
>
im2col
;
paddle
::
operators
::
math
::
Vol2ColFunctor
<
Context
,
T
>
vol2col
;
funcs
::
ConcatFunctor
<
Context
,
T
>
concat_functor
;
if
(
dx
)
{
ctx
.
template
Alloc
<
T
>(
dx
);
set_zero
(
ctx
,
dx
,
static_cast
<
T
>
(
0
));
}
if
(
dfilter
)
{
// dfilter_ size (i_c, o_c/g, k_h, k_w)
ctx
.
template
Alloc
<
T
>(
dfilter
);
set_zero
(
ctx
,
dfilter
,
static_cast
<
T
>
(
0
));
dfilter_
=
*
dfilter
;
dfilter_
.
Resize
(
filter_matrix_shape
);
}
size_t
D
=
x
.
dims
().
size
();
for
(
int
i
=
0
;
i
<
batch_size
;
i
++
)
{
// batch with size (o_c, o_h, o_w) or (o_c, o_d, o_h, o_w) for
// channel_first
// batch with size (o_h, o_w, o_c) or (o_d, o_h, o_w, o_c) for
// channel_last
DenseTensor
dout_batch
=
dout
.
Slice
(
i
,
i
+
1
).
Resize
(
output_shape
);
if
(
data_dim
==
2U
)
{
// im2col: dy -> col matrix
// from (o_c, o_h, o_w) to (o_c * k_h * k_w, i_h * i_w) for
// channel_first
// from (o_h, o_w, o_c) to (o_c * k_h * k_w, i_h * i_w) for
// channel_last
im2col
(
ctx
,
dout_batch
,
dilations_
,
strides
,
std
::
vector
<
int
>
{
paddings_
[
0
],
paddings_
[
2
],
paddings_
[
1
],
paddings_
[
3
]},
&
col
,
data_layout
);
}
else
if
(
data_dim
==
3U
)
{
// vol2col: dy -> col_matrix
// from (o_c, o_d, o_h, o_w) to (o_c * k_d * k_h * k_w, i_d * i_h *
// i_w) for channel_first
// from (o_d, o_h, o_w, o_c) to (i_d * i_h * i_w, o_c * k_d * k_h *
// k_w) for channel_last
vol2col
(
ctx
,
dout_batch
,
dilations_
,
strides
,
paddings_
,
&
col
,
data_layout
);
}
if
(
dx
)
{
// batch with size (i_c, i_h, i_w) or (i_h, i_w, i_c)
DenseTensor
dx_batch
=
dx
->
Slice
(
i
,
i
+
1
).
Resize
(
x_matrix_shape
);
// gemm: dx = filter * dy
// (i_c, o_c * k_h * k_w) * (o_c * k_h * k_w, i_h * i_w) -> (i_c, i_h
// * i_w)
// or
// (i_c, o_c * k_d * k_h * k_w) * (o_c * k_d * k_h * k_w, i_d * i_h *
// i_w) -> (i_c,
// i_d, i_h, i_w)
// gemm: dx = dy^T * filter^T for channel_last
std
::
vector
<
DenseTensor
>
dx_batch_vec
;
for
(
int
g
=
0
;
g
<
groups
;
g
++
)
{
// dx_slice: (i_c/g, i_h * i_w) or (i_c/g, i_d * i_h * i_w)
// for channel_first
// dx_slice: (i_h * i_w, i_c/g) or (i_d * i_h * i_w, i_c/g)
// for channel_last
// filter_slice: (i_c/g, o_c/g * k_h * k_w)
DenseTensor
filter_slice
=
filter_
.
Slice
(
g
*
in_step
,
(
g
+
1
)
*
in_step
);
// col_matrix_slice: (o_c/g * k_h * k_w, h * w) or (o_c/g * k_d *
// k_h * k_w, d * h * w)
DenseTensor
col_matrix_slice
=
col_matrix
.
Slice
(
g
*
col_step
,
(
g
+
1
)
*
col_step
);
if
(
data_layout
!=
DataLayout
::
kNHWC
)
{
DenseTensor
dx_slice
=
dx_batch
.
Slice
(
g
*
in_step
,
(
g
+
1
)
*
in_step
);
blas
.
MatMul
(
filter_slice
,
false
,
col_matrix_slice
,
false
,
static_cast
<
T
>
(
1.0
),
&
dx_slice
,
static_cast
<
T
>
(
0.0
));
}
else
{
DenseTensor
dx_slice
;
funcs
::
Slice
<
Context
,
T
,
2
>
(
ctx
,
&
dx_batch
,
&
dx_slice
,
g
*
in_step
,
(
g
+
1
)
*
in_step
,
1
);
blas
.
MatMul
(
col_matrix_slice
,
true
,
filter_slice
,
true
,
static_cast
<
T
>
(
1.0
),
&
dx_slice
,
static_cast
<
T
>
(
0.0
));
DDim
dx_slice_shape
;
if
(
data_dim
==
2U
)
{
dx_slice_shape
=
{
x_dims
[
1
],
x_dims
[
2
],
in_step
};
}
else
{
dx_slice_shape
=
{
x_dims
[
1
],
x_dims
[
2
],
x_dims
[
3
],
in_step
};
}
dx_slice
=
dx_slice
.
Resize
(
dx_slice_shape
);
dx_batch_vec
.
push_back
(
dx_slice
);
}
}
if
(
data_layout
==
DataLayout
::
kNHWC
)
{
concat_functor
(
ctx
,
dx_batch_vec
,
static_cast
<
int
>
(
D
-
2
),
&
dx_batch
);
}
}
if
(
dfilter
)
{
// x batch: (i_c, i_h * i_w) or (i_h, i_w * i_c)
DenseTensor
in_batch
=
x
.
Slice
(
i
,
i
+
1
).
Resize
(
x_matrix_shape
);
// gemm: d_filter = x * dy^T
// (i_c, i_h * i_w) * (i_h * i_w, o_c * k_h * k_w) -> (i_c, o_c * k_h
// * k_w)
// or
// (i_c, i_d * i_h * i_w) * (i_d * i_h * i_w, o_c * k_d * k_h * k_w)
// -> (i_c, o_c * k_d *
// k_h * k_w)
// gemm: d_filter = x^T * dy^T for channel_last
for
(
int
g
=
0
;
g
<
groups
;
g
++
)
{
DenseTensor
dfilter_slice
=
dfilter_
.
Slice
(
g
*
in_step
,
(
g
+
1
)
*
in_step
);
DenseTensor
col_matrix_slice
=
col_matrix
.
Slice
(
g
*
col_step
,
(
g
+
1
)
*
col_step
);
if
(
data_layout
!=
DataLayout
::
kNHWC
)
{
DenseTensor
in_batch_slice
=
in_batch
.
Slice
(
g
*
in_step
,
(
g
+
1
)
*
in_step
);
blas
.
MatMul
(
in_batch_slice
,
false
,
col_matrix_slice
,
true
,
static_cast
<
T
>
(
1.0
),
&
dfilter_slice
,
static_cast
<
T
>
(
1.0
));
}
else
{
DenseTensor
in_batch_slice
;
funcs
::
Slice
<
Context
,
T
,
2
>
(
ctx
,
&
in_batch
,
&
in_batch_slice
,
g
*
in_step
,
(
g
+
1
)
*
in_step
,
1
);
blas
.
MatMul
(
in_batch_slice
,
true
,
col_matrix_slice
,
true
,
static_cast
<
T
>
(
1.0
),
&
dfilter_slice
,
static_cast
<
T
>
(
1.0
));
}
}
}
}
}
}
template
<
typename
T
,
typename
Context
>
void
Conv2dTransposeGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
filter
,
const
DenseTensor
&
dout
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
vector
<
int
>&
output_padding
,
const
std
::
vector
<
int
>&
output_size
,
const
std
::
string
&
padding_algorithm
,
int
groups
,
const
std
::
vector
<
int
>&
dilations
,
const
std
::
string
&
data_format
,
DenseTensor
*
dx
,
DenseTensor
*
dfilter
)
{
ConvTransposeGradRawKernel
<
T
,
Context
>
(
ctx
,
x
,
filter
,
dout
,
strides
,
paddings
,
padding_algorithm
,
groups
,
dilations
,
data_format
,
dx
,
dfilter
);
}
template
<
typename
T
,
typename
Context
>
void
Conv3dTransposeGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
filter
,
const
DenseTensor
&
dout
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
vector
<
int
>&
output_padding
,
const
std
::
vector
<
int
>&
output_size
,
const
std
::
string
&
padding_algorithm
,
int
groups
,
const
std
::
vector
<
int
>&
dilations
,
const
std
::
string
&
data_format
,
DenseTensor
*
dx
,
DenseTensor
*
dfilter
)
{
ConvTransposeGradRawKernel
<
T
,
Context
>
(
ctx
,
x
,
filter
,
dout
,
strides
,
paddings
,
padding_algorithm
,
groups
,
dilations
,
data_format
,
dx
,
dfilter
);
}
}
// namespace phi
paddle/phi/kernels/impl/conv_transpose_kernel_impl.h
0 → 100644
浏览文件 @
1eb96eec
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/kernels/conv_transpose_kernel.h"
#include "paddle/fluid/operators/math/im2col.h"
#include "paddle/fluid/operators/math/vol2col.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/core/ddim.h"
#include "paddle/phi/kernels/cpu/conv_util.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/concat_and_split_functor.h"
#include "paddle/phi/kernels/funcs/slice.h"
namespace
phi
{
template
<
typename
T
,
typename
Context
>
void
ConvTransposeRawKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
filter
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
string
&
padding_algorithm
,
int
groups
,
const
std
::
vector
<
int
>&
dilations
,
const
std
::
string
&
data_format
,
DenseTensor
*
out
)
{
const
DataLayout
data_layout
=
paddle
::
framework
::
StringToDataLayout
(
data_format
);
// The filter will be reshaped, so it should not be constant
DenseTensor
filter_
=
filter
;
std
::
vector
<
int
>
paddings_
=
paddings
;
std
::
vector
<
int
>
dilations_
=
dilations
;
auto
x_dims
=
x
.
dims
();
auto
filter_dims
=
filter_
.
dims
();
auto
out_dims
=
out
->
dims
();
const
int
batch_size
=
static_cast
<
int
>
(
x
.
dims
()[
0
]);
DDim
in_data_dims
;
if
(
data_layout
!=
DataLayout
::
kNHWC
)
{
in_data_dims
=
slice_ddim
(
x_dims
,
2
,
x_dims
.
size
());
}
else
{
in_data_dims
=
slice_ddim
(
x_dims
,
1
,
x_dims
.
size
()
-
1
);
}
DDim
filter_data_dims
=
slice_ddim
(
filter_dims
,
2
,
filter_dims
.
size
());
std
::
vector
<
int
>
ksize
=
vectorize
<
int
>
(
filter_data_dims
);
UpdatePaddingAndDilation
(
&
paddings_
,
&
dilations_
,
padding_algorithm
,
in_data_dims
,
strides
,
ksize
);
// x_shape_vec: {n, c, h, w} or {n, c, d, h, w} for channel_first
// x_shape_vec: {n, h, w, c} or {n, d, h, w, c} for channel_last
std
::
vector
<
int64_t
>
x_shape_vec
=
vectorize
(
x
.
dims
());
// filter_shape_vec: {k_o, k_i, k_h, k_w} or {k_o, k_i, k_d, k_h, k_w}
std
::
vector
<
int64_t
>
filter_shape_vec
=
vectorize
(
filter_
.
dims
());
// use col_shape in the im2col and col2im (or vol2col and col2vol)
// calculation
// col_shape_vec: {o_c/g, k_h, k_w, h, w} or {o_c/g, k_d, k_h, k_w, d, h, w}
size_t
data_dim
=
filter_shape_vec
.
size
()
-
2
;
std
::
vector
<
int64_t
>
col_shape_vec
(
1
+
2
*
data_dim
);
if
(
data_layout
!=
DataLayout
::
kNHWC
)
{
col_shape_vec
[
0
]
=
out_dims
[
1
]
/
groups
;
for
(
size_t
j
=
0
;
j
<
data_dim
;
++
j
)
{
col_shape_vec
[
j
+
1
]
=
filter_shape_vec
[
j
+
2
];
col_shape_vec
[
j
+
1
+
data_dim
]
=
x_shape_vec
[
j
+
2
];
}
}
else
{
col_shape_vec
[
0
]
=
out_dims
[
out_dims
.
size
()
-
1
]
/
groups
;
for
(
size_t
j
=
0
;
j
<
data_dim
;
++
j
)
{
col_shape_vec
[
j
+
1
]
=
filter_shape_vec
[
j
+
2
];
col_shape_vec
[
j
+
1
+
data_dim
]
=
x_shape_vec
[
j
+
1
];
}
}
DDim
col_shape
(
make_ddim
(
col_shape_vec
));
// use col_matrix_shape in the gemm calculation
// size: (o_c/g * k_h * k_w, h * w) or (o_c/g * k_d * k_h * k_w, d * h * w)
DDim
col_matrix_shape
=
flatten_to_2d
(
col_shape
,
data_dim
+
1
);
DenseTensor
col
;
col
.
Resize
(
col_shape
);
ctx
.
template
Alloc
<
T
>(
&
col
);
// col_matrix shares the same piece of data with col,
// but will be reshaped into a two-dimensional matrix shape
// to call the matrix multiplication interface.
DenseTensor
col_matrix
;
col_matrix
.
ShareDataWith
(
col
);
col_matrix
.
Resize
(
col_matrix_shape
);
// out size: (o_c, o_h, o_w) or (o_c, o_d, o_h, o_w) for channel_first
// out size: (o_h, o_w, o_c) or (o_d, o_h, o_w, o_c) for channel_last
DDim
out_shape
=
slice_ddim
(
out
->
dims
(),
1
,
out
->
dims
().
size
());
// x matrix size: (i_c, h * w) or (i_c, d * h * w) for channel_first
// x matrix size: (h * w, i_c) or (d * h * w, i_c) for channel_last
DDim
x_matrix_shape
;
if
(
data_layout
!=
DataLayout
::
kNHWC
)
{
x_matrix_shape
=
{
x_dims
[
1
],
col_matrix_shape
[
1
]};
}
else
{
x_matrix_shape
=
{
col_matrix_shape
[
1
],
x_dims
[
x_dims
.
size
()
-
1
]};
}
// filter size: (i_c, o_c/g * k_h * k_w) or (i_c, o_c/g * k_d * k_h * k_w)
DDim
filter_matrix_shape
;
if
(
data_layout
!=
DataLayout
::
kNHWC
)
{
filter_matrix_shape
=
{
x_dims
[
1
],
col_matrix_shape
[
0
]};
}
else
{
filter_matrix_shape
=
{
x_dims
[
x_dims
.
size
()
-
1
],
col_matrix_shape
[
0
]};
}
filter_
.
Resize
(
filter_matrix_shape
);
ctx
.
template
Alloc
<
T
>(
out
);
funcs
::
SetConstant
<
Context
,
T
>
set_zero
;
auto
blas
=
funcs
::
GetBlas
<
Context
,
T
>
(
ctx
);
set_zero
(
ctx
,
out
,
static_cast
<
T
>
(
0
));
int
in_step
=
(
data_layout
!=
DataLayout
::
kNHWC
?
static_cast
<
int
>
(
x_dims
[
1
])
/
groups
:
static_cast
<
int
>
(
x_dims
[
x_dims
.
size
()
-
1
])
/
groups
);
int
out_step
=
(
data_layout
!=
DataLayout
::
kNHWC
?
static_cast
<
int
>
(
out_dims
[
1
])
/
groups
:
static_cast
<
int
>
(
out_dims
[
out_dims
.
size
()
-
1
])
/
groups
);
paddle
::
operators
::
math
::
Col2ImFunctor
<
paddle
::
operators
::
math
::
ColFormat
::
kCFO
,
Context
,
T
>
col2im
;
paddle
::
operators
::
math
::
Col2VolFunctor
<
Context
,
T
>
col2vol
;
funcs
::
ConcatFunctor
<
Context
,
T
>
concat_functor
;
// convolution transpose: gemm + col2im or col2vol (similar to conv-backward
// on x)
size_t
D
=
x
.
dims
().
size
();
for
(
int
i
=
0
;
i
<
batch_size
;
i
++
)
{
// batch with size (i_c, h * w) or (i_c, d * h * w) for channel_first
// batch with size (h * w, i_c) or (d * h * w, i_c) for channel_last
DenseTensor
x_batch
=
x
.
Slice
(
i
,
i
+
1
).
Resize
(
x_matrix_shape
);
// out size: (o_c, o_h, o_w) or (o_c, o_d, o_h, o_w) for channel_first
// out size: (o_h, o_w, o_c) or (o_d, o_h, o_w, o_c) for channel_last
DenseTensor
out_batch
=
out
->
Slice
(
i
,
i
+
1
).
Resize
(
out_shape
);
std
::
vector
<
DenseTensor
>
out_batch_vec
;
for
(
int
g
=
0
;
g
<
groups
;
g
++
)
{
int64_t
start
=
g
*
in_step
;
int64_t
end
=
(
g
+
1
)
*
in_step
;
int
axes
=
(
data_layout
!=
DataLayout
::
kNHWC
?
0
:
1
);
DenseTensor
filter_slice
=
filter_
.
Slice
(
g
*
in_step
,
(
g
+
1
)
*
in_step
);
DenseTensor
in_slice
,
out_slice
;
// col_matrix = filter_slice * x_slice
// of shape (o_c/g * k_h * k_w, h * w)
// or (o_c/g * k_d * k_h * k_w, d * h * w)
if
(
data_layout
!=
DataLayout
::
kNHWC
)
{
in_slice
=
x_batch
.
Slice
(
g
*
in_step
,
(
g
+
1
)
*
in_step
);
out_slice
=
out_batch
.
Slice
(
g
*
out_step
,
(
g
+
1
)
*
out_step
);
blas
.
MatMul
(
filter_slice
,
true
,
in_slice
,
false
,
static_cast
<
T
>
(
1.0
),
&
col_matrix
,
static_cast
<
T
>
(
0.0
));
}
else
{
funcs
::
Slice
<
Context
,
T
,
2
>
(
ctx
,
&
x_batch
,
&
in_slice
,
start
,
end
,
axes
);
start
=
g
*
out_step
;
end
=
(
g
+
1
)
*
out_step
;
axes
=
D
-
2
;
if
(
D
==
4U
)
{
funcs
::
Slice
<
Context
,
T
,
3
>
(
ctx
,
&
out_batch
,
&
out_slice
,
start
,
end
,
axes
);
}
else
if
(
D
==
5U
)
{
funcs
::
Slice
<
Context
,
T
,
4
>
(
ctx
,
&
out_batch
,
&
out_slice
,
start
,
end
,
axes
);
}
blas
.
MatMul
(
filter_slice
,
true
,
in_slice
,
true
,
static_cast
<
T
>
(
1.0
),
&
col_matrix
,
static_cast
<
T
>
(
0.0
));
}
if
(
data_dim
==
2U
)
{
// col2im: col_matrix -> dy from (o_c/g * k_h * k_w, h * w) to (o_c/g,
// o_h, o_w) or (o_h, o_w, o_c/g)
col2im
(
ctx
,
col
,
dilations_
,
strides
,
std
::
vector
<
int
>
{
paddings_
[
0
],
paddings_
[
2
],
paddings_
[
1
],
paddings_
[
3
]},
&
out_slice
,
data_layout
);
}
else
if
(
data_dim
==
3U
)
{
// col2vol: col_matrix -> dy from (o_c/g * k_d * k_h * k_w, d * h * w)
// to (o_c/g, o_d, o_h, o_w) or (o_d, o_h, o_w, o_c/g)
col2vol
(
ctx
,
col
,
dilations_
,
strides
,
paddings_
,
&
out_slice
,
data_layout
);
}
if
(
data_layout
==
DataLayout
::
kNHWC
)
{
out_batch_vec
.
push_back
(
out_slice
);
}
}
if
(
data_layout
==
DataLayout
::
kNHWC
)
{
concat_functor
(
ctx
,
out_batch_vec
,
static_cast
<
int
>
(
D
-
2
),
&
out_batch
);
}
}
}
template
<
typename
T
,
typename
Context
>
void
Conv2dTransposeKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
filter
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
vector
<
int
>&
output_padding
,
const
std
::
vector
<
int
>&
output_size
,
const
std
::
string
&
padding_algorithm
,
int
groups
,
const
std
::
vector
<
int
>&
dilations
,
const
std
::
string
&
data_format
,
DenseTensor
*
out
)
{
ConvTransposeRawKernel
<
T
,
Context
>
(
ctx
,
x
,
filter
,
strides
,
paddings
,
padding_algorithm
,
groups
,
dilations
,
data_format
,
out
);
}
template
<
typename
T
,
typename
Context
>
void
Conv3dTransposeKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
filter
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
vector
<
int
>&
output_padding
,
const
std
::
vector
<
int
>&
output_size
,
const
std
::
string
&
padding_algorithm
,
int
groups
,
const
std
::
vector
<
int
>&
dilations
,
const
std
::
string
&
data_format
,
DenseTensor
*
out
)
{
ConvTransposeRawKernel
<
T
,
Context
>
(
ctx
,
x
,
filter
,
strides
,
paddings
,
padding_algorithm
,
groups
,
dilations
,
data_format
,
out
);
}
}
// namespace phi
paddle/phi/ops/compat/conv_transpose_sig.cc
0 → 100644
浏览文件 @
1eb96eec
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace
phi
{
KernelSignature
Conv2dTransposeOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
"conv2d_transpose"
,
{
"Input"
,
"Filter"
},
{
"strides"
,
"paddings"
,
"output_padding"
,
"output_size"
,
"padding_algorithm"
,
"groups"
,
"dilations"
,
"data_format"
},
{
"Output"
});
}
KernelSignature
Conv2dTransposeGradOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
"conv2d_transpose_grad"
,
{
"Input"
,
"Filter"
,
GradVarName
(
"Output"
)},
{
"strides"
,
"paddings"
,
"output_padding"
,
"output_size"
,
"padding_algorithm"
,
"groups"
,
"dilations"
,
"data_format"
},
{
GradVarName
(
"Input"
),
GradVarName
(
"Filter"
)});
}
KernelSignature
Conv2dTransposeDoubleGradOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
"conv2d_transpose_grad_grad"
,
{
"Input"
,
"Filter"
,
"DOutput"
,
"DDInput"
,
"DDFilter"
},
{
"strides"
,
"paddings"
,
"output_padding"
,
"output_size"
,
"padding_algorithm"
,
"groups"
,
"dilations"
,
"data_format"
},
{
"DInput"
,
"DFilter"
,
"DDOutput"
});
}
KernelSignature
Conv3dTransposeOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
"conv3d_transpose"
,
{
"Input"
,
"Filter"
},
{
"strides"
,
"paddings"
,
"output_padding"
,
"output_size"
,
"padding_algorithm"
,
"groups"
,
"dilations"
,
"data_format"
},
{
"Output"
});
}
KernelSignature
Conv3dTransposeGradOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
"conv3d_transpose_grad"
,
{
"Input"
,
"Filter"
,
GradVarName
(
"Output"
)},
{
"strides"
,
"paddings"
,
"output_padding"
,
"output_size"
,
"padding_algorithm"
,
"groups"
,
"dilations"
,
"data_format"
},
{
GradVarName
(
"Input"
),
GradVarName
(
"Filter"
)});
}
KernelSignature
DepthwiseConv2dTransposeOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
"depthwise_conv2d_transpose"
,
{
"Input"
,
"Filter"
},
{
"strides"
,
"paddings"
,
"output_padding"
,
"output_size"
,
"padding_algorithm"
,
"groups"
,
"dilations"
,
"data_format"
},
{
"Output"
});
}
KernelSignature
DepthwiseConv2dTransposeGradOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
"depthwise_conv2d_transpose_grad"
,
{
"Input"
,
"Filter"
,
GradVarName
(
"Output"
)},
{
"strides"
,
"paddings"
,
"output_padding"
,
"output_size"
,
"padding_algorithm"
,
"groups"
,
"dilations"
,
"data_format"
},
{
GradVarName
(
"Input"
),
GradVarName
(
"Filter"
)});
}
}
// namespace phi
PD_REGISTER_ARG_MAPPING_FN
(
conv2d_transpose
,
phi
::
Conv2dTransposeOpArgumentMapping
);
PD_REGISTER_ARG_MAPPING_FN
(
conv2d_transpose_grad
,
phi
::
Conv2dTransposeGradOpArgumentMapping
);
PD_REGISTER_ARG_MAPPING_FN
(
conv2d_transpose_grad_grad
,
phi
::
Conv2dTransposeDoubleGradOpArgumentMapping
);
PD_REGISTER_ARG_MAPPING_FN
(
conv3d_transpose
,
phi
::
Conv3dTransposeOpArgumentMapping
);
PD_REGISTER_ARG_MAPPING_FN
(
conv3d_transpose_grad
,
phi
::
Conv3dTransposeGradOpArgumentMapping
);
PD_REGISTER_ARG_MAPPING_FN
(
depthwise_conv2d_transpose
,
phi
::
DepthwiseConv2dTransposeOpArgumentMapping
);
PD_REGISTER_ARG_MAPPING_FN
(
depthwise_conv2d_transpose_grad
,
phi
::
DepthwiseConv2dTransposeGradOpArgumentMapping
);
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录