Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
d7f422c9
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
d7f422c9
编写于
9月 24, 2020
作者:
K
Kaipeng Deng
提交者:
GitHub
9月 24, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix error message in conv/conv_transpose. test=develop (#27464)
* fix error message in conv/conv_transpose. test=develop
上级
59c04999
变更
4
显示空白变更内容
内联
并排
Showing
4 changed file
with
70 addition
and
40 deletion
+70
-40
paddle/fluid/operators/conv_cudnn_op.cu
paddle/fluid/operators/conv_cudnn_op.cu
+36
-24
paddle/fluid/operators/conv_op.h
paddle/fluid/operators/conv_op.h
+14
-4
paddle/fluid/operators/conv_transpose_cudnn_op.cu
paddle/fluid/operators/conv_transpose_cudnn_op.cu
+10
-10
paddle/fluid/operators/conv_transpose_op.h
paddle/fluid/operators/conv_transpose_op.h
+10
-2
未找到文件。
paddle/fluid/operators/conv_cudnn_op.cu
浏览文件 @
d7f422c9
...
@@ -50,8 +50,9 @@ class CUDNNConvOpKernel : public framework::OpKernel<T> {
...
@@ -50,8 +50,9 @@ class CUDNNConvOpKernel : public framework::OpKernel<T> {
public:
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
auto
&
dev_ctx
=
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>();
auto
&
dev_ctx
=
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>();
PADDLE_ENFORCE_EQ
(
platform
::
is_gpu_place
(
ctx
.
GetPlace
()),
true
,
PADDLE_ENFORCE_EQ
(
"It must use CUDAPlace."
);
platform
::
is_gpu_place
(
ctx
.
GetPlace
()),
true
,
paddle
::
platform
::
errors
::
PreconditionNotMet
(
"It must use CUDAPlace."
));
const
Tensor
*
input
=
ctx
.
Input
<
Tensor
>
(
"Input"
);
const
Tensor
*
input
=
ctx
.
Input
<
Tensor
>
(
"Input"
);
auto
*
filter
=
ctx
.
Input
<
Tensor
>
(
"Filter"
);
auto
*
filter
=
ctx
.
Input
<
Tensor
>
(
"Filter"
);
auto
*
output
=
ctx
.
Output
<
Tensor
>
(
"Output"
);
auto
*
output
=
ctx
.
Output
<
Tensor
>
(
"Output"
);
...
@@ -60,14 +61,16 @@ class CUDNNConvOpKernel : public framework::OpKernel<T> {
...
@@ -60,14 +61,16 @@ class CUDNNConvOpKernel : public framework::OpKernel<T> {
std
::
vector
<
int
>
paddings
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"paddings"
);
std
::
vector
<
int
>
paddings
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"paddings"
);
std
::
vector
<
int
>
dilations
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"dilations"
);
std
::
vector
<
int
>
dilations
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"dilations"
);
int
groups
=
ctx
.
Attr
<
int
>
(
"groups"
);
int
groups
=
ctx
.
Attr
<
int
>
(
"groups"
);
bool
exhaustive_search
=
bool
exhaustive_search
=
FLAGS_cudnn_exhaustive_search
||
ctx
.
Attr
<
bool
>
(
"exhaustive_search"
);
FLAGS_cudnn_exhaustive_search
||
ctx
.
Attr
<
bool
>
(
"exhaustive_search"
);
bool
deterministic
=
FLAGS_cudnn_deterministic
;
if
(
exhaustive_search
&&
FLAGS_cudnn_deterministic
)
{
auto
exhaustive_deterministic
=
exhaustive_search
&&
deterministic
;
PADDLE_THROW
(
PADDLE_ENFORCE_EQ
(
exhaustive_deterministic
,
false
,
platform
::
errors
::
InvalidArgument
(
"Cann't set exhaustive_search True and "
"Cann't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time."
);
"FLAGS_cudnn_deterministic True at same time."
)
);
}
const
std
::
string
padding_algorithm
=
const
std
::
string
padding_algorithm
=
ctx
.
Attr
<
std
::
string
>
(
"padding_algorithm"
);
ctx
.
Attr
<
std
::
string
>
(
"padding_algorithm"
);
const
std
::
string
data_format
=
ctx
.
Attr
<
std
::
string
>
(
"data_format"
);
const
std
::
string
data_format
=
ctx
.
Attr
<
std
::
string
>
(
"data_format"
);
...
@@ -197,7 +200,8 @@ class CUDNNConvOpKernel : public framework::OpKernel<T> {
...
@@ -197,7 +200,8 @@ class CUDNNConvOpKernel : public framework::OpKernel<T> {
&
transformed_input
);
&
transformed_input
);
}
break
;
}
break
;
default:
default:
PADDLE_THROW
(
"ConvOp only support tensors with 4 or 5 dimensions."
);
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
"ConvOp only support tensors with 4 or 5 dimensions."
));
}
}
}
else
{
}
else
{
...
@@ -317,8 +321,9 @@ class CUDNNConvGradOpKernel : public framework::OpKernel<T> {
...
@@ -317,8 +321,9 @@ class CUDNNConvGradOpKernel : public framework::OpKernel<T> {
public:
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
auto
&
dev_ctx
=
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>();
auto
&
dev_ctx
=
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>();
PADDLE_ENFORCE_EQ
(
platform
::
is_gpu_place
(
ctx
.
GetPlace
()),
true
,
PADDLE_ENFORCE_EQ
(
"It must use CUDAPlace."
);
platform
::
is_gpu_place
(
ctx
.
GetPlace
()),
true
,
paddle
::
platform
::
errors
::
PreconditionNotMet
(
"It must use CUDAPlace."
));
auto
input
=
ctx
.
Input
<
Tensor
>
(
"Input"
);
auto
input
=
ctx
.
Input
<
Tensor
>
(
"Input"
);
auto
filter
=
ctx
.
Input
<
Tensor
>
(
"Filter"
);
auto
filter
=
ctx
.
Input
<
Tensor
>
(
"Filter"
);
auto
output_grad
=
ctx
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Output"
));
auto
output_grad
=
ctx
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Output"
));
...
@@ -337,14 +342,16 @@ class CUDNNConvGradOpKernel : public framework::OpKernel<T> {
...
@@ -337,14 +342,16 @@ class CUDNNConvGradOpKernel : public framework::OpKernel<T> {
std
::
vector
<
int
>
paddings
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"paddings"
);
std
::
vector
<
int
>
paddings
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"paddings"
);
std
::
string
padding_algorithm
=
ctx
.
Attr
<
std
::
string
>
(
"padding_algorithm"
);
std
::
string
padding_algorithm
=
ctx
.
Attr
<
std
::
string
>
(
"padding_algorithm"
);
int
groups
=
ctx
.
Attr
<
int
>
(
"groups"
);
int
groups
=
ctx
.
Attr
<
int
>
(
"groups"
);
bool
exhaustive_search
=
bool
exhaustive_search
=
FLAGS_cudnn_exhaustive_search
||
ctx
.
Attr
<
bool
>
(
"exhaustive_search"
);
FLAGS_cudnn_exhaustive_search
||
ctx
.
Attr
<
bool
>
(
"exhaustive_search"
);
bool
deterministic
=
FLAGS_cudnn_deterministic
;
bool
deterministic
=
FLAGS_cudnn_deterministic
;
if
(
exhaustive_search
&&
deterministic
)
{
auto
exhaustive_deterministic
=
exhaustive_search
&&
deterministic
;
PADDLE_THROW
(
PADDLE_ENFORCE_EQ
(
exhaustive_deterministic
,
false
,
"Can't set exhaustive_search True and "
platform
::
errors
::
InvalidArgument
(
"FLAGS_cudnn_deterministic True at same time."
);
"Cann't set exhaustive_search True and "
}
"FLAGS_cudnn_deterministic True at same time."
));
const
std
::
string
data_format
=
ctx
.
Attr
<
std
::
string
>
(
"data_format"
);
const
std
::
string
data_format
=
ctx
.
Attr
<
std
::
string
>
(
"data_format"
);
const
bool
channel_last
=
(
data_format
==
"NHWC"
||
data_format
==
"NDHWC"
);
const
bool
channel_last
=
(
data_format
==
"NHWC"
||
data_format
==
"NDHWC"
);
...
@@ -495,7 +502,8 @@ class CUDNNConvGradOpKernel : public framework::OpKernel<T> {
...
@@ -495,7 +502,8 @@ class CUDNNConvGradOpKernel : public framework::OpKernel<T> {
&
transformed_input
);
&
transformed_input
);
}
break
;
}
break
;
default:
default:
PADDLE_THROW
(
"ConvOp only support tensors with 4 or 5 dimensions."
);
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
"ConvOp only support tensors with 4 or 5 dimensions."
));
}
}
}
else
{
}
else
{
transformed_input
.
ShareDataWith
(
transformed_input_channel
);
transformed_input
.
ShareDataWith
(
transformed_input_channel
);
...
@@ -701,8 +709,9 @@ class CUDNNConvDoubleGradOpKernel : public framework::OpKernel<T> {
...
@@ -701,8 +709,9 @@ class CUDNNConvDoubleGradOpKernel : public framework::OpKernel<T> {
public:
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
auto
&
dev_ctx
=
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>();
auto
&
dev_ctx
=
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>();
PADDLE_ENFORCE_EQ
(
platform
::
is_gpu_place
(
ctx
.
GetPlace
()),
true
,
PADDLE_ENFORCE_EQ
(
"It must use CUDAPlace."
);
platform
::
is_gpu_place
(
ctx
.
GetPlace
()),
true
,
paddle
::
platform
::
errors
::
PreconditionNotMet
(
"It must use CUDAPlace."
));
auto
X
=
ctx
.
Input
<
Tensor
>
(
"Input"
);
auto
X
=
ctx
.
Input
<
Tensor
>
(
"Input"
);
auto
W
=
ctx
.
Input
<
Tensor
>
(
"Filter"
);
auto
W
=
ctx
.
Input
<
Tensor
>
(
"Filter"
);
auto
dO
=
ctx
.
Input
<
Tensor
>
(
"DOutput"
);
auto
dO
=
ctx
.
Input
<
Tensor
>
(
"DOutput"
);
...
@@ -736,14 +745,16 @@ class CUDNNConvDoubleGradOpKernel : public framework::OpKernel<T> {
...
@@ -736,14 +745,16 @@ class CUDNNConvDoubleGradOpKernel : public framework::OpKernel<T> {
const
std
::
vector
<
int
>&
strides
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"strides"
);
const
std
::
vector
<
int
>&
strides
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"strides"
);
std
::
vector
<
int
>
dilations
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"dilations"
);
std
::
vector
<
int
>
dilations
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"dilations"
);
int
groups
=
ctx
.
Attr
<
int
>
(
"groups"
);
int
groups
=
ctx
.
Attr
<
int
>
(
"groups"
);
bool
exhaustive_search
=
bool
exhaustive_search
=
FLAGS_cudnn_exhaustive_search
||
ctx
.
Attr
<
bool
>
(
"exhaustive_search"
);
FLAGS_cudnn_exhaustive_search
||
ctx
.
Attr
<
bool
>
(
"exhaustive_search"
);
bool
deterministic
=
FLAGS_cudnn_deterministic
;
bool
deterministic
=
FLAGS_cudnn_deterministic
;
if
(
exhaustive_search
&&
deterministic
)
{
auto
exhaustive_deterministic
=
exhaustive_search
&&
deterministic
;
PADDLE_THROW
(
PADDLE_ENFORCE_EQ
(
exhaustive_deterministic
,
false
,
"Can't set exhaustive_search True and "
platform
::
errors
::
InvalidArgument
(
"FLAGS_cudnn_deterministic True at same time."
);
"Cann't set exhaustive_search True and "
}
"FLAGS_cudnn_deterministic True at same time."
));
std
::
vector
<
int
>
paddings
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"paddings"
);
std
::
vector
<
int
>
paddings
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"paddings"
);
std
::
string
padding_algorithm
=
ctx
.
Attr
<
std
::
string
>
(
"padding_algorithm"
);
std
::
string
padding_algorithm
=
ctx
.
Attr
<
std
::
string
>
(
"padding_algorithm"
);
...
@@ -878,7 +889,8 @@ class CUDNNConvDoubleGradOpKernel : public framework::OpKernel<T> {
...
@@ -878,7 +889,8 @@ class CUDNNConvDoubleGradOpKernel : public framework::OpKernel<T> {
}
}
}
break
;
}
break
;
default:
default:
PADDLE_THROW
(
"ConvOp only support tensors with 4 or 5 dimensions."
);
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
"ConvOp only support tensors with 4 or 5 dimensions."
));
}
}
}
else
{
}
else
{
...
...
paddle/fluid/operators/conv_op.h
浏览文件 @
d7f422c9
...
@@ -685,8 +685,9 @@ class GemmConvDoubleGradKernel : public framework::OpKernel<T> {
...
@@ -685,8 +685,9 @@ class GemmConvDoubleGradKernel : public framework::OpKernel<T> {
public:
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
auto
&
dev_ctx
=
ctx
.
template
device_context
<
platform
::
CPUDeviceContext
>();
auto
&
dev_ctx
=
ctx
.
template
device_context
<
platform
::
CPUDeviceContext
>();
PADDLE_ENFORCE_EQ
(
platform
::
is_cpu_place
(
ctx
.
GetPlace
()),
true
,
PADDLE_ENFORCE_EQ
(
"It must use CPUPlace."
);
platform
::
is_cpu_place
(
ctx
.
GetPlace
()),
true
,
paddle
::
platform
::
errors
::
PreconditionNotMet
(
"It must use CPUPlace."
));
const
Tensor
*
X
=
ctx
.
Input
<
Tensor
>
(
"Input"
);
const
Tensor
*
X
=
ctx
.
Input
<
Tensor
>
(
"Input"
);
const
Tensor
*
dY
=
ctx
.
Input
<
Tensor
>
(
"DOutput"
);
const
Tensor
*
dY
=
ctx
.
Input
<
Tensor
>
(
"DOutput"
);
const
Tensor
*
ddX
=
ctx
.
Input
<
Tensor
>
(
"DDInput"
);
const
Tensor
*
ddX
=
ctx
.
Input
<
Tensor
>
(
"DDInput"
);
...
@@ -982,11 +983,20 @@ class DepthwiseConvKernel : public framework::OpKernel<T> {
...
@@ -982,11 +983,20 @@ class DepthwiseConvKernel : public framework::OpKernel<T> {
PADDLE_ENFORCE_EQ
(
PADDLE_ENFORCE_EQ
(
output
->
dims
()[
output
->
dims
().
size
()
-
1
]
%
output
->
dims
()[
output
->
dims
().
size
()
-
1
]
%
input
->
dims
()[
input
->
dims
().
size
()
-
1
],
input
->
dims
()[
input
->
dims
().
size
()
-
1
],
0
,
"The output channels must be a multiple of the input channels"
);
0
,
platform
::
errors
::
InvalidArgument
(
"ShapeError: The output channels must be a multiple of the "
"input channels. But receivced output channel number is %d "
"and input channel number is %d"
,
output
->
dims
()[
output
->
dims
().
size
()
-
1
],
input
->
dims
()[
input
->
dims
().
size
()
-
1
]));
}
else
{
}
else
{
PADDLE_ENFORCE_EQ
(
PADDLE_ENFORCE_EQ
(
output
->
dims
()[
1
]
%
input
->
dims
()[
1
],
0
,
output
->
dims
()[
1
]
%
input
->
dims
()[
1
],
0
,
"The output channels must be a multiple of the input channels"
);
platform
::
errors
::
InvalidArgument
(
"ShapeError: The output channels must be a multiple of the "
"input channels. But receivced output channel number is %d "
"and input channel number is %d"
,
output
->
dims
()[
1
],
input
->
dims
()[
1
]));
}
}
// transform tensor
// transform tensor
Tensor
transformed_input
(
input
->
type
());
Tensor
transformed_input
(
input
->
type
());
...
...
paddle/fluid/operators/conv_transpose_cudnn_op.cu
浏览文件 @
d7f422c9
...
@@ -51,8 +51,9 @@ template <typename T>
...
@@ -51,8 +51,9 @@ template <typename T>
class
CUDNNConvTransposeOpKernel
:
public
framework
::
OpKernel
<
T
>
{
class
CUDNNConvTransposeOpKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_EQ
(
platform
::
is_gpu_place
(
ctx
.
GetPlace
()),
true
,
PADDLE_ENFORCE_EQ
(
"It must use CUDAPlace."
);
platform
::
is_gpu_place
(
ctx
.
GetPlace
()),
true
,
paddle
::
platform
::
errors
::
PreconditionNotMet
(
"It must use CUDAPlace."
));
auto
*
input
=
ctx
.
Input
<
Tensor
>
(
"Input"
);
auto
*
input
=
ctx
.
Input
<
Tensor
>
(
"Input"
);
auto
*
filter
=
ctx
.
Input
<
Tensor
>
(
"Filter"
);
auto
*
filter
=
ctx
.
Input
<
Tensor
>
(
"Filter"
);
auto
*
output
=
ctx
.
Output
<
Tensor
>
(
"Output"
);
auto
*
output
=
ctx
.
Output
<
Tensor
>
(
"Output"
);
...
@@ -145,9 +146,8 @@ class CUDNNConvTransposeOpKernel : public framework::OpKernel<T> {
...
@@ -145,9 +146,8 @@ class CUDNNConvTransposeOpKernel : public framework::OpKernel<T> {
ctx
,
input_pad
,
input_transpose
,
pad_value
,
&
transformed_input
);
ctx
,
input_pad
,
input_transpose
,
pad_value
,
&
transformed_input
);
}
break
;
}
break
;
default:
default:
PADDLE_ENFORCE_EQ
(
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
rank
==
4
||
rank
==
5
,
true
,
"Op(ConvTranspose) only supports 4-D or 5-D input Tensor."
));
"Op(ConvTranspose) only supports 4-D or 5-D input Tensor."
);
}
}
}
else
{
}
else
{
transformed_input
=
input_transpose
;
transformed_input
=
input_transpose
;
...
@@ -290,8 +290,9 @@ template <typename T>
...
@@ -290,8 +290,9 @@ template <typename T>
class
CUDNNConvTransposeGradOpKernel
:
public
framework
::
OpKernel
<
T
>
{
class
CUDNNConvTransposeGradOpKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
PADDLE_ENFORCE
(
platform
::
is_gpu_place
(
ctx
.
GetPlace
()),
PADDLE_ENFORCE_EQ
(
"It must use CUDAPlace."
);
platform
::
is_gpu_place
(
ctx
.
GetPlace
()),
true
,
paddle
::
platform
::
errors
::
PreconditionNotMet
(
"It must use CUDAPlace."
));
auto
input
=
ctx
.
Input
<
Tensor
>
(
"Input"
);
auto
input
=
ctx
.
Input
<
Tensor
>
(
"Input"
);
auto
filter
=
ctx
.
Input
<
Tensor
>
(
"Filter"
);
auto
filter
=
ctx
.
Input
<
Tensor
>
(
"Filter"
);
auto
output_grad
=
ctx
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Output"
));
auto
output_grad
=
ctx
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Output"
));
...
@@ -393,9 +394,8 @@ class CUDNNConvTransposeGradOpKernel : public framework::OpKernel<T> {
...
@@ -393,9 +394,8 @@ class CUDNNConvTransposeGradOpKernel : public framework::OpKernel<T> {
&
transformed_output_grad
);
&
transformed_output_grad
);
}
break
;
}
break
;
default:
default:
PADDLE_ENFORCE_EQ
(
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
rank
==
4
||
rank
==
5
,
true
,
"Op(ConvTranspose) only supports 4-D or 5-D input Tensor."
));
"Op(ConvTranspose) only supports 4-D or 5-D input Tensor."
);
}
}
}
else
{
}
else
{
transformed_output_grad
=
output_grad_transpose
;
transformed_output_grad
=
output_grad_transpose
;
...
...
paddle/fluid/operators/conv_transpose_op.h
浏览文件 @
d7f422c9
...
@@ -580,7 +580,12 @@ class DepthwiseConvTransposeKernel : public framework::OpKernel<T> {
...
@@ -580,7 +580,12 @@ class DepthwiseConvTransposeKernel : public framework::OpKernel<T> {
output
->
mutable_data
<
T
>
(
context
.
GetPlace
());
output
->
mutable_data
<
T
>
(
context
.
GetPlace
());
int
groups
=
context
.
Attr
<
int
>
(
"groups"
);
int
groups
=
context
.
Attr
<
int
>
(
"groups"
);
PADDLE_ENFORCE_EQ
(
groups
,
filter
.
dims
()[
0
]);
PADDLE_ENFORCE_EQ
(
groups
,
filter
.
dims
()[
0
],
platform
::
errors
::
InvalidArgument
(
"groups should be error to the 1st dimension of filter. But "
"received groups is %d and filter dimension[0] is %d"
,
groups
,
filter
.
dims
()[
0
]));
std
::
vector
<
int
>
strides
=
context
.
Attr
<
std
::
vector
<
int
>>
(
"strides"
);
std
::
vector
<
int
>
strides
=
context
.
Attr
<
std
::
vector
<
int
>>
(
"strides"
);
std
::
vector
<
int
>
paddings
=
context
.
Attr
<
std
::
vector
<
int
>>
(
"paddings"
);
std
::
vector
<
int
>
paddings
=
context
.
Attr
<
std
::
vector
<
int
>>
(
"paddings"
);
...
@@ -588,7 +593,10 @@ class DepthwiseConvTransposeKernel : public framework::OpKernel<T> {
...
@@ -588,7 +593,10 @@ class DepthwiseConvTransposeKernel : public framework::OpKernel<T> {
std
::
string
padding_algorithm
=
std
::
string
padding_algorithm
=
context
.
Attr
<
std
::
string
>
(
"padding_algorithm"
);
context
.
Attr
<
std
::
string
>
(
"padding_algorithm"
);
for
(
auto
v
:
dilations
)
{
for
(
auto
v
:
dilations
)
{
PADDLE_ENFORCE_EQ
(
v
,
1
);
PADDLE_ENFORCE_EQ
(
v
,
1
,
platform
::
errors
::
InvalidArgument
(
"dilations should be 1 in depthwise conv. "
"But received dilations is %d"
,
v
));
}
}
auto
in_dims
=
input
->
dims
();
auto
in_dims
=
input
->
dims
();
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录