Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
f4bb60ae
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
f4bb60ae
编写于
8月 10, 2017
作者:
H
hedaoyuan
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Refine NNPACKConvOpTest.
上级
1d74d16c
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
22 addition
and
80 deletion
+22
-80
paddle/function/ConvOpTest.h
paddle/function/ConvOpTest.h
+14
-3
paddle/function/nnpack/NNPACKConvOpTest.cpp
paddle/function/nnpack/NNPACKConvOpTest.cpp
+8
-77
未找到文件。
paddle/function/ConvOpTest.h
浏览文件 @
f4bb60ae
...
...
@@ -80,6 +80,12 @@ void Convolution(const std::string& conv1,
for
(
size_t
stride
:
{
1
,
2
})
{
for
(
size_t
padding
:
{
0
,
1
})
{
if
(
padding
>=
filterSize
)
break
;
// NNPACK only supports stride = 1 if batchSize > 1
if
((
conv1
==
"NNPACKConv-CPU"
||
conv2
==
"NNPACKConv-CPU"
)
&&
batchSize
>
1
&&
stride
>
1
)
break
;
size_t
outputSize
=
(
inputSize
-
filterSize
+
2
*
padding
+
stride
)
/
stride
;
VLOG
(
3
)
<<
" batchSize="
<<
batchSize
...
...
@@ -102,7 +108,7 @@ void Convolution(const std::string& conv1,
.
set
(
"paddings"
,
paddings
)
.
set
(
"strides"
,
strides
)
.
set
(
"groups"
,
(
size_t
)
1
)
.
set
(
"algo"
,
"auto"
));
.
set
(
"algo"
,
(
std
::
string
)
"auto"
));
TensorShape
input
{
batchSize
,
inputChannels
,
inputSize
,
inputSize
};
...
...
@@ -163,7 +169,7 @@ void Convolution2(const std::string& conv1,
.
set
(
"paddings"
,
paddings
)
.
set
(
"strides"
,
strides
)
.
set
(
"groups"
,
(
size_t
)
1
)
.
set
(
"algo"
,
"auto"
));
.
set
(
"algo"
,
(
std
::
string
)
"auto"
));
TensorShape
input
{
batchSize
,
inputChannels
,
inputHeight
,
inputWidth
};
...
...
@@ -196,6 +202,11 @@ void DepthwiseConvolution(const std::string& conv1,
for
(
size_t
outputChannels
:
{
32
,
64
})
{
for
(
size_t
stride
:
{
1
,
2
})
{
for
(
size_t
padding
:
{
0
,
1
})
{
// NNPACK only supports stride = 1 if batchSize > 1
if
((
conv1
==
"NNPACKConv-CPU"
||
conv2
==
"NNPACKConv-CPU"
)
&&
batchSize
>
1
&&
stride
>
1
)
break
;
size_t
outputSize
=
(
inputSize
-
filterSize
+
2
*
padding
+
stride
)
/
stride
;
VLOG
(
3
)
<<
" batchSize="
<<
batchSize
...
...
@@ -219,7 +230,7 @@ void DepthwiseConvolution(const std::string& conv1,
.
set
(
"paddings"
,
paddings
)
.
set
(
"strides"
,
strides
)
.
set
(
"groups"
,
groups
)
.
set
(
"algo"
,
"auto"
));
.
set
(
"algo"
,
(
std
::
string
)
"auto"
));
TensorShape
input
{
batchSize
,
inputChannels
,
inputSize
,
inputSize
};
...
...
paddle/function/nnpack/NNPACKConvOpTest.cpp
浏览文件 @
f4bb60ae
...
...
@@ -13,87 +13,18 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include "paddle/function/Function.h"
#include "paddle/function/FunctionTest.h"
DEFINE_string
(
algo
,
"auto"
,
"The algorithm (auto, ft8x8, ft16x16, wt8x8, "
"implicit-gemm, or direct) for computing convolution of NNPACK."
);
#include "paddle/function/ConvOpTest.h"
namespace
paddle
{
#define IS_NNPACK_SUPPORT(algo, filterSize, stride) \
if (algo == "direct" && filterSize != 1) continue; \
if (algo == "direct" && batchSize != 1) continue; \
if (algo == "wt8x8" && filterSize != 3) continue; \
if (algo == "implicit-gemm" && batchSize != 1) continue; \
if (algo != "auto" && algo != "implicit-gemm" && stride > 1) continue;
class
ConvolutionTest
{
public:
ConvolutionTest
(
const
std
::
string
&
conv1
,
const
std
::
string
&
conv2
,
std
::
string
algo
=
"auto"
)
{
for
(
size_t
batchSize
:
{
1
,
32
})
{
for
(
size_t
inputSize
:
{
7
,
14
,
54
})
{
for
(
size_t
filterSize
:
{
1
,
3
,
5
})
{
for
(
size_t
inputChannels
:
{
3
,
64
})
{
for
(
size_t
outputChannels
:
{
3
,
64
,
128
})
{
if
(
inputChannels
<
outputChannels
)
break
;
for
(
size_t
stride
:
{
1
,
2
})
{
// if batchSize > 1 NNPACKConv only supports stride = 1
if
(
batchSize
>
1
&&
stride
>
1
)
break
;
for
(
size_t
padding
:
{
0
,
1
})
{
if
(
padding
>=
filterSize
)
break
;
size_t
outputSize
=
(
inputSize
-
filterSize
+
2
*
padding
+
stride
)
/
stride
;
IS_NNPACK_SUPPORT
(
algo
,
filterSize
,
stride
);
LOG
(
INFO
)
<<
" batchSize="
<<
batchSize
<<
" inputChannels="
<<
inputChannels
<<
" inputHeight="
<<
inputSize
<<
" inputWidth="
<<
inputSize
<<
" outputChannels="
<<
outputChannels
<<
" filterHeight="
<<
filterSize
<<
" filterWidth="
<<
filterSize
<<
" outputHeight="
<<
outputSize
<<
" outputWidth="
<<
outputSize
<<
" stride="
<<
stride
<<
" padding="
<<
padding
;
std
::
vector
<
size_t
>
paddings
=
{
padding
,
padding
};
std
::
vector
<
size_t
>
strides
=
{
stride
,
stride
};
Compare2Function
<
DEVICE_TYPE_CPU
,
DEVICE_TYPE_CPU
>
test
(
conv1
,
conv2
,
FuncConfig
()
.
set
(
"paddings"
,
paddings
)
.
set
(
"strides"
,
strides
)
.
set
(
"groups"
,
(
size_t
)
1
)
.
set
(
"algo"
,
algo
));
TensorShape
shape0
{
batchSize
,
inputChannels
,
inputSize
,
inputSize
};
TensorShape
shape1
{
outputChannels
,
inputChannels
,
filterSize
,
filterSize
};
TensorShape
shape2
{
batchSize
,
outputChannels
,
outputSize
,
outputSize
};
test
.
addInputs
(
BufferArg
(
VALUE_TYPE_FLOAT
,
shape0
));
test
.
addInputs
(
BufferArg
(
VALUE_TYPE_FLOAT
,
shape1
));
test
.
addOutputs
(
BufferArg
(
VALUE_TYPE_FLOAT
,
shape2
));
test
.
run
();
}
}
}
}
}
}
}
}
};
TEST
(
NNPACK
,
Forward
)
{
Convolution
<
DEVICE_TYPE_CPU
,
DEVICE_TYPE_CPU
>
(
"GemmConv-CPU"
,
"NNPACKConv-CPU"
,
forward
);
}
TEST
(
Convolution
,
NNPACK
)
{
// NNPACK only supports stride = 1
ConvolutionTest
test
(
"GemmConv-CPU"
,
"NNPACKConv-CPU"
,
FLAGS_algo
);
TEST
(
NNPACK
,
Depthwise
)
{
DepthwiseConvolution
<
DEVICE_TYPE_CPU
,
DEVICE_TYPE_CPU
>
(
"GemmConv-CPU"
,
"NNPACKConv-CPU"
,
forward
);
}
}
// namespace paddle
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录