Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
1f516fa0
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
1f516fa0
编写于
7月 19, 2017
作者:
X
xzl
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
modify format, and modify the layer grad test, op test
上级
81998868
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
168 addition
and
243 deletion
+168
-243
paddle/function/ConvOpTest.cpp
paddle/function/ConvOpTest.cpp
+152
-231
paddle/gserver/layers/ExpandConvLayer.cpp
paddle/gserver/layers/ExpandConvLayer.cpp
+9
-8
paddle/gserver/tests/test_LayerGrad.cpp
paddle/gserver/tests/test_LayerGrad.cpp
+7
-4
未找到文件。
paddle/function/ConvOpTest.cpp
浏览文件 @
1f516fa0
...
...
@@ -25,40 +25,32 @@ enum TestType {
kBackwardFilterTest
=
2
,
};
enum
LayerType
{
convolutionType
=
0
,
depthwiseConvolutionType
=
1
,
};
template
<
DeviceType
DType1
,
DeviceType
DType2
>
class
ConvolutionTest
{
public:
ConvolutionTest
(
const
std
::
string
&
conv1
,
const
std
::
string
&
conv2
,
LayerType
layerType
,
TestType
type
,
bool
useGroups
=
true
,
std
::
string
algo
=
"auto"
)
{
for
(
size_t
batchSize
:
{
1
,
32
})
{
for
(
size_t
inputSize
:
{
7
,
14
,
54
})
{
for
(
size_t
filterSize
:
{
1
,
3
,
5
})
{
for
(
size_t
inputChannels
:
{
3
,
64
})
{
for
(
size_t
outputChannels
:
{
3
,
64
,
128
})
{
for
(
size_t
groups
:
{
1
,
3
,
64
})
{
if
(
inputChannels
>
outputChannels
)
break
;
if
(
layerType
==
depthwiseConvolutionType
&&
outputChannels
%
inputChannels
!=
0
)
break
;
size_t
groups
=
1
;
if
(
layerType
==
depthwiseConvolutionType
)
{
groups
=
inputChannels
;
}
if
(
groups
!=
1
&&
(
inputChannels
!=
groups
||
outputChannels
%
groups
!=
0
))
continue
;
if
(
!
useGroups
)
groups
=
1
;
for
(
size_t
stride
:
{
1
,
2
})
{
for
(
size_t
padding
:
{
0
,
1
})
{
if
(
padding
>=
filterSize
)
break
;
size_t
outputSize
=
(
inputSize
-
filterSize
+
2
*
padding
+
stride
)
/
stride
;
(
inputSize
-
filterSize
+
2
*
padding
+
stride
)
/
stride
;
VLOG
(
3
)
<<
" batchSize="
<<
batchSize
<<
" inputChannels="
<<
inputChannels
<<
" inputHeight="
<<
inputSize
...
...
@@ -85,10 +77,10 @@ public:
batchSize
,
inputChannels
,
inputSize
,
inputSize
};
TensorShape
filter
;
if
(
layerType
==
depthwiseConvolutionType
)
if
(
groups
>
1
)
filter
=
TensorShape
({
groups
,
outputChannels
/
groups
,
(
size_t
)
1
,
inputChannels
/
groups
,
filterSize
,
filterSize
});
else
...
...
@@ -107,7 +99,8 @@ public:
}
else
if
(
type
==
kBackwardInputTest
)
{
test
.
addInputs
(
BufferArg
(
VALUE_TYPE_FLOAT
,
output
));
test
.
addInputs
(
BufferArg
(
VALUE_TYPE_FLOAT
,
filter
));
test
.
addOutputs
(
BufferArg
(
VALUE_TYPE_FLOAT
,
input
),
ADD_TO
);
test
.
addOutputs
(
BufferArg
(
VALUE_TYPE_FLOAT
,
input
),
ADD_TO
);
test
.
run
();
}
else
if
(
type
==
kBackwardFilterTest
)
{
test
.
addInputs
(
BufferArg
(
VALUE_TYPE_FLOAT
,
output
));
...
...
@@ -123,6 +116,7 @@ public:
}
}
}
}
};
// Mainly used to test cases where the height and width (input, filter)
...
...
@@ -132,8 +126,8 @@ class ConvolutionTest2 {
public:
ConvolutionTest2
(
const
std
::
string
&
conv1
,
const
std
::
string
&
conv2
,
LayerType
layerType
,
TestType
type
,
bool
useGroups
=
true
,
std
::
string
algo
=
"auto"
)
{
for
(
size_t
batchSize
:
{
16
})
{
for
(
size_t
inputHeight
:
{
7
,
31
})
{
...
...
@@ -142,15 +136,13 @@ public:
for
(
size_t
filterWidth
:
{
3
,
7
})
{
for
(
size_t
inputChannels
:
{
7
})
{
for
(
size_t
outputChannels
:
{
7
,
32
})
{
if
(
layerType
==
depthwiseConvolutionType
&&
outputChannels
%
inputChannels
!=
0
)
break
;
for
(
size_t
groups
:
{
1
,
7
})
{
if
(
!
useGroups
&&
groups
!=
1
&&
(
inputChannels
!=
groups
||
outputChannels
%
groups
!=
0
))
continue
;
if
(
!
useGroups
)
groups
=
1
;
size_t
groups
=
1
;
if
(
layerType
==
depthwiseConvolutionType
)
{
groups
=
inputChannels
;
}
size_t
stride
=
1
;
size_t
padding
=
0
;
size_t
outputHeight
=
...
...
@@ -185,10 +177,10 @@ public:
batchSize
,
inputChannels
,
inputHeight
,
inputWidth
};
TensorShape
filter
;
if
(
layerType
==
depthwiseConvolutionType
)
if
(
groups
>
1
)
filter
=
TensorShape
({
groups
,
outputChannels
/
groups
,
(
size_t
)
1
,
inputChannels
/
groups
,
filterHeight
,
filterWidth
});
else
...
...
@@ -207,7 +199,8 @@ public:
}
else
if
(
type
==
kBackwardInputTest
)
{
test
.
addInputs
(
BufferArg
(
VALUE_TYPE_FLOAT
,
output
));
test
.
addInputs
(
BufferArg
(
VALUE_TYPE_FLOAT
,
filter
));
test
.
addOutputs
(
BufferArg
(
VALUE_TYPE_FLOAT
,
input
),
ADD_TO
);
test
.
addOutputs
(
BufferArg
(
VALUE_TYPE_FLOAT
,
input
),
ADD_TO
);
test
.
run
();
}
else
if
(
type
==
kBackwardFilterTest
)
{
test
.
addInputs
(
BufferArg
(
VALUE_TYPE_FLOAT
,
output
));
...
...
@@ -223,109 +216,37 @@ public:
}
}
}
}
};
// ======Start Convolution TEST======
TEST
(
Forward
,
GEMM
)
{
ConvolutionTest
<
DEVICE_TYPE_CPU
,
DEVICE_TYPE_CPU
>
test
(
"NaiveConv-CPU"
,
"GemmConv-CPU"
,
convolutionType
,
kForwardTest
);
"NaiveConv-CPU"
,
"GemmConv-CPU"
,
kForwardTest
,
false
);
ConvolutionTest2
<
DEVICE_TYPE_CPU
,
DEVICE_TYPE_CPU
>
test2
(
"NaiveConv-CPU"
,
"GemmConv-CPU"
,
convolutionType
,
kForwardTest
);
"NaiveConv-CPU"
,
"GemmConv-CPU"
,
kForwardTest
,
false
);
}
#ifndef PADDLE_ONLY_CPU
TEST
(
Forward
,
GEMM2
)
{
ConvolutionTest
<
DEVICE_TYPE_CPU
,
DEVICE_TYPE_GPU
>
test
(
"GemmConv-CPU"
,
"GemmConv-GPU"
,
convolutionType
,
kForwardTest
);
"GemmConv-CPU"
,
"GemmConv-GPU"
,
kForwardTest
);
ConvolutionTest2
<
DEVICE_TYPE_CPU
,
DEVICE_TYPE_GPU
>
test2
(
"GemmConv-CPU"
,
"GemmConv-GPU"
,
convolutionType
,
kForwardTest
);
"GemmConv-CPU"
,
"GemmConv-GPU"
,
kForwardTest
);
}
TEST
(
BackwardInput
,
GEMM
)
{
ConvolutionTest
<
DEVICE_TYPE_CPU
,
DEVICE_TYPE_GPU
>
test
(
"GemmConvGradInput-CPU"
,
"GemmConvGradInput-GPU"
,
convolutionType
,
kBackwardInputTest
);
"GemmConvGradInput-CPU"
,
"GemmConvGradInput-GPU"
,
kBackwardInputTest
);
ConvolutionTest2
<
DEVICE_TYPE_CPU
,
DEVICE_TYPE_GPU
>
test2
(
"GemmConvGradInput-CPU"
,
"GemmConvGradInput-GPU"
,
convolutionType
,
kBackwardInputTest
);
"GemmConvGradInput-CPU"
,
"GemmConvGradInput-GPU"
,
kBackwardInputTest
);
}
TEST
(
BackwardFilter
,
GEMM
)
{
ConvolutionTest
<
DEVICE_TYPE_CPU
,
DEVICE_TYPE_GPU
>
test
(
"GemmConvGradFilter-CPU"
,
"GemmConvGradFilter-GPU"
,
convolutionType
,
kBackwardFilterTest
);
ConvolutionTest2
<
DEVICE_TYPE_CPU
,
DEVICE_TYPE_GPU
>
test2
(
"GemmConvGradFilter-CPU"
,
"GemmConvGradFilter-GPU"
,
convolutionType
,
kBackwardFilterTest
);
}
#endif
// ======End Convolution TEST======
// ======Start DepthwiseConvolution TEST======
// TODO(zhaolong) The depthwise convolution cpu test will be added when the cpu
// version of depthwiseConv is implemented.
#ifndef PADDLE_ONLY_CPU
TEST
(
DepthwiseConvForward
,
GEMM
)
{
ConvolutionTest
<
DEVICE_TYPE_GPU
,
DEVICE_TYPE_GPU
>
test
(
"GemmConv-GPU"
,
"DepthwiseConv-GPU"
,
depthwiseConvolutionType
,
kForwardTest
);
ConvolutionTest2
<
DEVICE_TYPE_CPU
,
DEVICE_TYPE_GPU
>
test2
(
"GemmConv-GPU"
,
"DepthwiseConv-GPU"
,
depthwiseConvolutionType
,
kForwardTest
);
}
TEST
(
DepthwiseConvForward
,
GEMM2
)
{
ConvolutionTest
<
DEVICE_TYPE_GPU
,
DEVICE_TYPE_GPU
>
test
(
"DepthwiseConv-GPU"
,
"DepthwiseConv-GPU"
,
depthwiseConvolutionType
,
kForwardTest
);
ConvolutionTest2
<
DEVICE_TYPE_CPU
,
DEVICE_TYPE_GPU
>
test2
(
"DepthwiseConv-GPU"
,
"DepthwiseConv-GPU"
,
depthwiseConvolutionType
,
kForwardTest
);
}
TEST
(
DepthwiseConvBackwardInput
,
GEMM
)
{
ConvolutionTest
<
DEVICE_TYPE_CPU
,
DEVICE_TYPE_GPU
>
test
(
"DepthwiseConvGradInput-GPU"
,
"DepthwiseConvGradInput-GPU"
,
depthwiseConvolutionType
,
kBackwardInputTest
);
ConvolutionTest2
<
DEVICE_TYPE_CPU
,
DEVICE_TYPE_GPU
>
test2
(
"DepthwiseConvGradInput-GPU"
,
"DepthwiseConvGradInput-GPU"
,
depthwiseConvolutionType
,
kBackwardInputTest
);
}
TEST
(
DepthwiseConvBackwardFilter
,
GEMM
)
{
ConvolutionTest
<
DEVICE_TYPE_CPU
,
DEVICE_TYPE_GPU
>
test
(
"DepthwiseConvGradFilter-GPU"
,
"DepthwiseConvGradFilter-GPU"
,
depthwiseConvolutionType
,
kBackwardFilterTest
);
"GemmConvGradFilter-CPU"
,
"GemmConvGradFilter-GPU"
,
kBackwardFilterTest
);
ConvolutionTest2
<
DEVICE_TYPE_CPU
,
DEVICE_TYPE_GPU
>
test2
(
"DepthwiseConvGradFilter-GPU"
,
"DepthwiseConvGradFilter-GPU"
,
depthwiseConvolutionType
,
kBackwardFilterTest
);
"GemmConvGradFilter-CPU"
,
"GemmConvGradFilter-GPU"
,
kBackwardFilterTest
);
}
#endif
// ======End DepthwiseConvolution TEST======
}
// namespace paddle
paddle/gserver/layers/ExpandConvLayer.cpp
浏览文件 @
1f516fa0
...
...
@@ -39,21 +39,22 @@ bool ExpandConvLayer::init(const LayerMap &layerMap,
filterShape_
.
resize
(
numInputs
);
outputShape_
.
resize
(
numInputs
);
string
convType
;
string
convGradInputType
;
string
convGradFilterType
;
st
d
::
st
ring
convType
;
st
d
::
st
ring
convGradInputType
;
st
d
::
st
ring
convGradFilterType
;
for
(
int
i
=
0
;
i
<
config_
.
inputs_size
();
i
++
)
{
std
::
vector
<
size_t
>
paddings
=
{(
size_t
)
paddingY_
[
i
],
(
size_t
)
padding_
[
i
]};
std
::
vector
<
size_t
>
strides
=
{(
size_t
)
strideY_
[
i
],
(
size_t
)
stride_
[
i
]};
if
(
useGpu_
&&
(
size_t
)
groups_
[
i
]
==
(
size_t
)
channels_
[
i
]
&&
!
isDeconv_
)
{
convType
=
"DepthwiseConv"
convGradInputType
=
"DepthwiseConvGradInput"
convGradFilterType
=
"DepthwiseConvGradFilter"
convType
=
"DepthwiseConv"
;
convGradInputType
=
"DepthwiseConvGradInput"
;
convGradFilterType
=
"DepthwiseConvGradFilter"
;
}
else
{
convType
=
"GemmConv"
convGradInputType
=
"GemmConvGradInput"
convGradFilterType
=
"GemmConvGradFilter"
convType
=
"GemmConv"
;
convGradInputType
=
"GemmConvGradInput"
;
convGradFilterType
=
"GemmConvGradFilter"
;
}
if
(
FLAGS_use_nnpack
)
{
...
...
paddle/gserver/tests/test_LayerGrad.cpp
浏览文件 @
1f516fa0
...
...
@@ -349,13 +349,13 @@ TEST(Layer, CosSimVecMatLayer) {
void
testDepthwiseConvLayer
(
const
string
&
type
,
bool
useGpu
)
{
TestConfig
config
;
config
.
biasSize
=
16
;
config
.
biasSize
=
32
;
config
.
layerConfig
.
set_type
(
type
);
config
.
layerConfig
.
set_num_filters
(
16
);
config
.
layerConfig
.
set_num_filters
(
32
);
config
.
layerConfig
.
set_partial_sum
(
1
);
config
.
layerConfig
.
set_shared_biases
(
true
);
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
2048
,
192
/
2
});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
2048
,
192
});
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
ConvConfig
*
conv
=
input
->
mutable_conv_conf
();
conv
->
set_filter_size
(
2
);
...
...
@@ -388,8 +388,11 @@ void testDepthwiseConvLayer(const string& type, bool useGpu) {
}
TEST
(
Layer
,
depthwiseConvLayer
)
{
// 'depthwise_conv' is a sepecial case of 'exconv' whose
// groups size equals to the input channels size.
testDepthwiseConvLayer
(
"exconv"
,
/* useGpu= */
false
);
#ifndef PADDLE_ONLY_CPU
testDepthwiseConvLayer
(
"
depthwise_
conv"
,
/* useGpu= */
true
);
testDepthwiseConvLayer
(
"
ex
conv"
,
/* useGpu= */
true
);
#endif
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录