Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
MegEngine 天元
MegEngine
提交
44376f70
MegEngine
项目概览
MegEngine 天元
/
MegEngine
1 年多 前同步成功
通知
403
Star
4705
Fork
582
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
MegEngine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
44376f70
编写于
6月 11, 2021
作者:
M
Megvii Engine Team
提交者:
huangxinda
7月 19, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
refactor(mgb): make conv-backward-data handle noncontiguous tensors
GitOrigin-RevId: 0a8f66f9d378b6466bc383a94c57ec80bcc5cb74
上级
7b2a76d1
变更
9
隐藏空白更改
内联
并排
Showing
9 changed file
with
75 addition
and
9 deletion
+75
-9
dnn/src/cuda/conv_bias/cudnn_conv.cpp
dnn/src/cuda/conv_bias/cudnn_conv.cpp
+8
-0
dnn/src/cuda/conv_bias/cudnn_conv_bias_activation.cpp
dnn/src/cuda/conv_bias/cudnn_conv_bias_activation.cpp
+6
-3
dnn/src/cuda/convolution/backward_data/algo.cpp
dnn/src/cuda/convolution/backward_data/algo.cpp
+2
-2
dnn/src/cuda/convolution/backward_data/cudnn.cpp
dnn/src/cuda/convolution/backward_data/cudnn.cpp
+8
-0
dnn/src/cuda/convolution/backward_data/implicit_gemm_int8_nchw4_dp4a.cpp
...nvolution/backward_data/implicit_gemm_int8_nchw4_dp4a.cpp
+5
-0
dnn/src/cuda/convolution/backward_data/implicit_gemm_int8_nchw_dp4a.cpp
...onvolution/backward_data/implicit_gemm_int8_nchw_dp4a.cpp
+5
-0
dnn/src/cuda/convolution/backward_data/matmul.cpp
dnn/src/cuda/convolution/backward_data/matmul.cpp
+2
-2
dnn/src/naive/convolution/helper.h
dnn/src/naive/convolution/helper.h
+0
-2
dnn/test/cuda/convolution.cpp
dnn/test/cuda/convolution.cpp
+39
-0
未找到文件。
dnn/src/cuda/conv_bias/cudnn_conv.cpp
浏览文件 @
44376f70
...
...
@@ -23,6 +23,14 @@ bool ConvBiasForwardImpl::AlgoCUDNNConv::is_available(
if
(
args
.
z_layout
->
ndim
>
0
)
return
false
;
if
(
args
.
filter_meta
.
format
!=
Param
::
Format
::
NCHW
&&
args
.
filter_meta
.
format
!=
Param
::
Format
::
NHWC
)
{
if
(
!
args
.
src_layout
->
is_contiguous
()
||
!
args
.
dst_layout
->
is_contiguous
())
{
return
false
;
}
}
auto
dst_layout
=
*
args
.
dst_layout
;
if
(
dst_layout
.
dtype
.
enumv
()
!=
args
.
bias_layout
->
dtype
.
enumv
())
{
dst_layout
.
dtype
=
DType
();
...
...
dnn/src/cuda/conv_bias/cudnn_conv_bias_activation.cpp
浏览文件 @
44376f70
...
...
@@ -24,9 +24,12 @@ using namespace conv_bias;
bool
ConvBiasForwardImpl
::
AlgoCUDNNConvBiasActivation
::
is_available
(
const
SizeArgs
&
args
)
const
{
if
(
!
args
.
src_layout
->
is_contiguous
()
||
!
args
.
dst_layout
->
is_contiguous
())
{
return
false
;
if
(
args
.
filter_meta
.
format
!=
Param
::
Format
::
NCHW
&&
args
.
filter_meta
.
format
!=
Param
::
Format
::
NHWC
)
{
if
(
!
args
.
src_layout
->
is_contiguous
()
||
!
args
.
dst_layout
->
is_contiguous
())
{
return
false
;
}
}
if
((
args
.
src_layout
->
dtype
.
enumv
()
==
DTypeEnum
::
QuantizedS4
||
args
.
src_layout
->
dtype
.
enumv
()
==
DTypeEnum
::
Quantized4Asymm
)
&&
...
...
dnn/src/cuda/convolution/backward_data/algo.cpp
浏览文件 @
44376f70
...
...
@@ -82,8 +82,8 @@ ConvolutionBackwardDataImpl::AlgoPack ConvolutionBackwardDataImpl::sm_algo_pack;
ConvolutionBackwardDataImpl
::
AlgoBase
::
SizeArgs
::
SizeArgs
(
ConvolutionBackwardDataImpl
*
o
,
const
TensorLayout
&
filter
,
const
TensorLayout
&
diff
,
const
TensorLayout
&
grad
)
:
SizeArgs
(
o
,
filter
,
o
->
check_layout_fwd
(
grad
,
filter
,
diff
),
diff
,
grad
)
{}
:
SizeArgs
(
o
,
filter
,
o
->
make_canonized_filter_meta
(
grad
.
ndim
,
filter
)
,
diff
,
grad
)
{}
ConvolutionBackwardDataImpl
::
AlgoBase
::
SizeArgs
::
SizeArgs
(
ConvolutionBackwardDataImpl
*
o
,
const
TensorLayout
&
filter
,
...
...
dnn/src/cuda/convolution/backward_data/cudnn.cpp
浏览文件 @
44376f70
...
...
@@ -21,6 +21,14 @@ using namespace convolution;
bool
ConvolutionBackwardDataImpl
::
AlgoCUDNN
::
is_available
(
const
SizeArgs
&
args
)
const
{
if
(
args
.
filter_meta
.
format
!=
Param
::
Format
::
NCHW
&&
args
.
filter_meta
.
format
!=
Param
::
Format
::
NHWC
)
{
if
(
!
args
.
grad_layout
->
is_contiguous
()
||
!
args
.
diff_layout
->
is_contiguous
())
{
return
false
;
}
}
CUDNNBwdDataDescs
D
;
if
(
!
is_cudnn_supported
(
args
.
as_fwd_args
()))
...
...
dnn/src/cuda/convolution/backward_data/implicit_gemm_int8_nchw4_dp4a.cpp
浏览文件 @
44376f70
...
...
@@ -25,6 +25,11 @@ bool ConvolutionBackwardDataImpl::AlgoInt8NCHW4DotProdImplicitGemm::
if
(
fm
.
format
!=
Param
::
Format
::
NCHW4
)
return
false
;
if
(
!
args
.
grad_layout
->
is_contiguous
()
||
!
args
.
diff_layout
->
is_contiguous
())
{
return
false
;
}
bool
available
=
true
;
auto
src_dtype
=
args
.
diff_layout
->
dtype
,
...
...
dnn/src/cuda/convolution/backward_data/implicit_gemm_int8_nchw_dp4a.cpp
浏览文件 @
44376f70
...
...
@@ -25,6 +25,11 @@ bool ConvolutionBackwardDataImpl::AlgoInt8NCHWDotProdImplicitGemm::
if
(
fm
.
format
!=
Param
::
Format
::
NCHW
)
return
false
;
if
(
!
args
.
grad_layout
->
is_contiguous
()
||
!
args
.
diff_layout
->
is_contiguous
())
{
return
false
;
}
bool
available
=
true
;
auto
src_dtype
=
args
.
diff_layout
->
dtype
,
...
...
dnn/src/cuda/convolution/backward_data/matmul.cpp
浏览文件 @
44376f70
...
...
@@ -64,8 +64,8 @@ ConvolutionBackwardDataImpl::AlgoMatmul::get_subopr_list(
const
TensorLayoutArray
&
layouts
,
const
OperatorBase
*
opr
)
const
{
const
ConvolutionBackwardDataImpl
*
conv_backward_data_opr
=
static_cast
<
const
ConvolutionBackwardDataImpl
*>
(
opr
);
CanonizedFilterMeta
fm
=
conv_backward_data_opr
->
check_layout_fwd
(
layouts
[
2
]
,
layouts
[
0
],
layouts
[
1
]);
CanonizedFilterMeta
fm
=
conv_backward_data_opr
->
make_canonized_filter_meta
(
layouts
[
2
]
.
ndim
,
layouts
[
0
]);
auto
&&
config
=
sub_opr_config
(
fm
,
layouts
[
0
],
layouts
[
1
],
layouts
[
2
],
conv_backward_data_opr
);
...
...
dnn/src/naive/convolution/helper.h
浏览文件 @
44376f70
...
...
@@ -661,7 +661,6 @@ template <typename ftype, typename dtype, typename gtype>
void
backward_data
(
_megdnn_tensor_in
filter
,
_megdnn_tensor_in
diff
,
_megdnn_tensor_out
grad
,
const
Convolution
::
CanonizedFilterMeta
&
filter_meta
)
{
megdnn_assert
(
grad
.
layout
.
is_contiguous
());
memset
(
grad
.
raw_ptr
,
0
,
grad
.
layout
.
span
().
dist_byte
());
megdnn_assert
(
filter_meta
.
spatial_ndim
==
2
);
if
(
filter_meta
.
format
==
param
::
Convolution
::
Format
::
NHWCD4
)
{
...
...
@@ -676,7 +675,6 @@ template <typename stype, typename dtype, typename gtype>
void
backward_filter
(
_megdnn_tensor_in
src
,
_megdnn_tensor_in
diff
,
_megdnn_tensor_out
grad
,
const
Convolution
::
CanonizedFilterMeta
&
filter_meta
)
{
megdnn_assert
(
grad
.
layout
.
is_contiguous
());
memset
(
grad
.
raw_ptr
,
0
,
grad
.
layout
.
span
().
dist_byte
());
megdnn_assert
(
filter_meta
.
spatial_ndim
==
2
);
compute2d
<
stype
,
gtype
,
dtype
,
dtype
,
StrategyBwdFlt
>
(
...
...
dnn/test/cuda/convolution.cpp
浏览文件 @
44376f70
...
...
@@ -238,6 +238,25 @@ TEST_F(CUDA, CONVOLUTION_BACKWARD_DATA) {
}
}
TEST_F
(
CUDA
,
CONVOLUTION_BACKWARD_DATA_CUDNN
)
{
if
(
cuda
::
is_compute_capability_required
(
7
,
0
))
return
;
using
namespace
convolution
;
Checker
<
ConvolutionBackwardData
>
checker
(
handle_cuda
());
checker
.
set_before_exec_callback
(
AlgoChecker
<
ConvolutionBackwardData
>
(
"CUDNN_CONVOLUTION"
));
//! noncontiguous case
{
param
::
Convolution
param
;
param
.
pad_h
=
param
.
pad_w
=
1
;
checker
.
set_param
(
param
).
execl
(
TensorLayoutArray
{
{{
16
,
16
,
3
,
3
},
{
144
,
9
,
3
,
1
},
dtype
::
Float32
()},
{{
2
,
16
,
7
,
7
},
{
1568
,
49
,
7
,
1
},
dtype
::
Float32
()},
{{
2
,
16
,
7
,
7
},
{
1568
,
49
,
7
,
1
},
dtype
::
Float32
()},
});
}
}
TEST_F
(
CUDA
,
CONVOLUTION_BACKWARD_DATA_MATMUL
)
{
using
namespace
convolution
;
std
::
vector
<
TestArg
>
args
=
get_args_cuda_conv_bwd_data
();
...
...
@@ -265,6 +284,16 @@ TEST_F(CUDA, CONVOLUTION_BACKWARD_DATA_MATMUL) {
.
set_param
(
arg
.
param
)
.
exec
(
TensorLayoutArray
{
filter
,
dst
,
src
});
}
//! noncontiguous case
{
param
::
Convolution
param
;
param
.
pad_h
=
param
.
pad_w
=
1
;
checker
.
set_param
(
param
).
execl
(
TensorLayoutArray
{
{{
16
,
16
,
3
,
3
},
{
144
,
9
,
3
,
1
},
dtype
::
Float32
()},
{{
2
,
16
,
7
,
7
},
{
1568
,
49
,
7
,
1
},
dtype
::
Float32
()},
{{
2
,
16
,
7
,
7
},
{
1568
,
49
,
7
,
1
},
dtype
::
Float32
()},
});
}
}
TEST_F
(
CUDA
,
CONVOLUTION_BACKWARD_DATA_INT8_NCHW4_DP4A
)
{
...
...
@@ -355,6 +384,16 @@ TEST_F(CUDA, CONVOLUTION_BACKWARD_DATA_INT8_NCHW_DP4A) {
}
checker
.
set_rng
(
0
,
&
rng
).
set_rng
(
1
,
&
rng
).
set_param
(
arg
.
param
).
exec
(
TensorLayoutArray
{
filter
,
dst
,
src
});
//! noncontiguous case
{
param
::
Convolution
param
;
param
.
pad_h
=
param
.
pad_w
=
1
;
checker
.
set_param
(
param
).
execl
(
TensorLayoutArray
{
{{
16
,
16
,
3
,
3
},
{
144
,
9
,
3
,
1
},
dtype
::
QuantizedS8
{
1.3
f
}},
{{
2
,
16
,
7
,
7
},
{
1568
,
49
,
7
,
1
},
dtype
::
QuantizedS8
{
1.2
f
}},
{{
2
,
16
,
7
,
7
},
{
1568
,
49
,
7
,
1
},
dtype
::
QuantizedS8
{
1.2
f
}}
});
}
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录