Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
eeaccc83
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
大约 1 年 前同步成功
通知
694
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
eeaccc83
编写于
9月 18, 2017
作者:
Y
Yu Yang
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of github.com:baidu/Paddle into feature/transform_ctx
上级
87e4e25d
c4e783e5
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
499 addition
and
9 deletion
+499
-9
paddle/gserver/layers/MKLDNNPoolLayer.cpp
paddle/gserver/layers/MKLDNNPoolLayer.cpp
+277
-0
paddle/gserver/layers/MKLDNNPoolLayer.h
paddle/gserver/layers/MKLDNNPoolLayer.h
+138
-0
paddle/gserver/tests/test_MKLDNN.cpp
paddle/gserver/tests/test_MKLDNN.cpp
+62
-0
python/paddle/trainer/config_parser.py
python/paddle/trainer/config_parser.py
+13
-1
python/paddle/v2/framework/tests/op_test.py
python/paddle/v2/framework/tests/op_test.py
+9
-8
未找到文件。
paddle/gserver/layers/MKLDNNPoolLayer.cpp
0 → 100644
浏览文件 @
eeaccc83
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "MKLDNNPoolLayer.h"
#include "paddle/math/MathUtils.h"
#include "paddle/utils/Logging.h"
using
namespace
mkldnn
;
// NOLINT
typedef
memory
::
format
format
;
namespace
paddle
{
REGISTER_LAYER
(
mkldnn_pool
,
MKLDNNPoolLayer
);
bool
MKLDNNPoolLayer
::
init
(
const
LayerMap
&
layerMap
,
const
ParameterMap
&
parameterMap
)
{
if
(
!
MKLDNNLayer
::
init
(
layerMap
,
parameterMap
))
{
return
false
;
}
/* the size of inputs for pool-layer is 1 */
CHECK_EQ
(
config_
.
inputs_size
(),
1
);
const
PoolConfig
&
conf
=
config_
.
inputs
(
0
).
pool_conf
();
ic_
=
conf
.
channels
();
ih_
=
conf
.
img_size_y
();
iw_
=
conf
.
img_size
();
oc_
=
ic_
;
oh_
=
conf
.
output_y
();
ow_
=
conf
.
output_x
();
fh_
=
conf
.
size_y
();
fw_
=
conf
.
size_x
();
ph_
=
conf
.
padding_y
();
pw_
=
conf
.
padding
();
sh_
=
conf
.
stride_y
();
sw_
=
conf
.
stride
();
const
std
::
string
&
type
=
conf
.
pool_type
();
if
(
type
==
"max-projection"
)
{
poolAlgo_
=
algorithm
::
pooling_max
;
}
else
if
(
type
==
"avg-projection"
)
{
// paddle only use exclude_padding
poolAlgo_
=
algorithm
::
pooling_avg_exclude_padding
;
}
else
{
LOG
(
FATAL
)
<<
"unknow pooling type!"
;
}
return
true
;
}
void
MKLDNNPoolLayer
::
reshape
(
int
&
bs
,
int
&
ic
,
int
&
ih
,
int
&
iw
,
int
oc
,
int
&
oh
,
int
&
ow
)
{
reshapeInput
(
bs
,
ih
,
iw
);
// ic_ and oc can not be changed
CHECK_EQ
(
inputElemenCnt_
/
bs
/
ih
/
iw
,
(
size_t
)
ic
)
<<
"Input channel can not be changed"
;
// cal output sizes
// paddle used false caffeMode for pooling
oh
=
outputSize
(
ih
,
fh_
,
ph_
,
sh_
,
false
);
ow
=
outputSize
(
iw
,
fw_
,
pw_
,
sw_
,
false
);
reshapeOutput
(
oh
,
ow
);
resizeOutput
(
bs
,
oc
*
oh
*
ow
);
printSizeInfo
();
}
void
MKLDNNPoolLayer
::
resetFwd
(
std
::
vector
<
primitive
>&
pipeline
,
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
)
{
resetFwdBuffers
(
in
,
out
);
resetFwdPD
(
fwdPD_
,
in
,
out
);
resetFwdPipeline
(
pipeline
,
fwdPD_
,
in
,
out
);
printValueFormatFlow
();
}
void
MKLDNNPoolLayer
::
resetBwd
(
std
::
vector
<
primitive
>&
pipeline
,
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
)
{
std
::
shared_ptr
<
pool_bwd
::
primitive_desc
>
pd
;
resetBwdBuffers
(
in
,
out
);
resetBwdPD
(
pd
,
in
,
out
);
resetBwdPipeline
(
pipeline
,
pd
,
in
,
out
);
printGradFormatFlow
();
}
void
MKLDNNPoolLayer
::
updateInputData
()
{
inVal_
->
setData
(
getInputValue
(
0
,
CPU_DEVICE
)
->
getData
());
}
void
MKLDNNPoolLayer
::
resetFwdBuffers
(
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
out
)
{
resetInValue
(
in
);
resetOutValue
(
out
);
}
void
MKLDNNPoolLayer
::
resetInValue
(
MKLDNNMatrixPtr
&
in
)
{
if
(
inputIsOnlyMKLDNN
())
{
const
MatrixPtr
&
dnnIn
=
getInputValue
(
0
);
in
=
std
::
dynamic_pointer_cast
<
MKLDNNMatrix
>
(
dnnIn
);
CHECK
(
in
)
<<
"Input should be MKLDNNMatrix"
;
}
else
{
CHECK_EQ
(
getPrev
(
0
)
->
getDeviceId
(),
CPU_DEVICE
)
<<
"Only support CPU yet"
;
const
MatrixPtr
&
cpuIn
=
getInputValue
(
0
,
CPU_DEVICE
);
in
=
MKLDNNMatrix
::
create
(
cpuIn
,
{
bs_
,
ic_
,
ih_
,
iw_
},
format
::
nchw
,
engine_
);
}
}
void
MKLDNNPoolLayer
::
resetOutValue
(
MKLDNNMatrixPtr
&
out
)
{
CHECK
(
inVal_
)
<<
"Should reset input value first"
;
memory
::
dims
outDims
=
memory
::
dims
{
bs_
,
oc_
,
oh_
,
ow_
};
out
=
MKLDNNMatrix
::
create
(
output_
.
value
,
outDims
,
inVal_
->
getFormat
(),
engine_
);
output_
.
value
=
std
::
dynamic_pointer_cast
<
Matrix
>
(
out
);
// create reorder if output value has cpu device and pd do not match
cpuOutVal_
=
nullptr
;
cvtOutVal_
=
nullptr
;
if
(
!
outputIsOnlyMKLDNN
())
{
const
MatrixPtr
&
cpuOut
=
getOutput
(
CPU_DEVICE
).
value
;
cpuOutVal_
=
MKLDNNMatrix
::
create
(
cpuOut
,
outDims
,
format
::
nchw
,
engine_
);
if
(
cpuOutVal_
->
getPrimitiveDesc
()
!=
out
->
getPrimitiveDesc
())
{
cvtOutVal_
=
MKLDNNMatrix
::
createReorder
(
out
,
cpuOutVal_
);
CHECK
(
cvtOutVal_
)
<<
"should not be emptry"
;
}
else
{
// CPU output share the same data of MKLDNN output
cpuOut
->
setData
(
out
->
getData
());
cpuOutVal_
=
out
;
}
}
}
void
MKLDNNPoolLayer
::
resetFwdPD
(
std
::
shared_ptr
<
pool_fwd
::
primitive_desc
>&
pd
,
MKLDNNMatrixPtr
in
,
MKLDNNMatrixPtr
out
)
{
memory
::
dims
inDims
=
memory
::
dims
{
bs_
,
ic_
,
ih_
,
iw_
};
memory
::
dims
outDims
=
memory
::
dims
{
bs_
,
oc_
,
oh_
,
ow_
};
memory
::
dims
kernels
=
memory
::
dims
{
fh_
,
fw_
};
memory
::
dims
strides
=
memory
::
dims
{
sh_
,
sw_
};
memory
::
dims
padL
=
memory
::
dims
{
ph_
,
pw_
};
memory
::
dims
padR
=
getPaddingR
();
padding_kind
padKind
=
padding_kind
::
zero
;
prop_kind
pk
=
passType_
==
PASS_TEST
?
prop_kind
::
forward_scoring
:
prop_kind
::
forward_training
;
auto
fwdDesc
=
pool_fwd
::
desc
(
pk
,
poolAlgo_
,
in
->
getMemoryDesc
(),
out
->
getMemoryDesc
(),
strides
,
kernels
,
padL
,
padR
,
padKind
);
pd
.
reset
(
new
pool_fwd
::
primitive_desc
(
fwdDesc
,
engine_
));
// prepare workspace if necessary
workspace_
=
(
passType_
!=
PASS_TEST
&&
poolAlgo_
==
algorithm
::
pooling_max
)
?
std
::
make_shared
<
memory
>
(
memory
(
pd
->
workspace_primitive_desc
()))
:
nullptr
;
}
void
MKLDNNPoolLayer
::
resetFwdPipeline
(
std
::
vector
<
primitive
>&
pipeline
,
std
::
shared_ptr
<
pool_fwd
::
primitive_desc
>&
pd
,
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
out
)
{
pipeline
.
clear
();
fwd_
=
workspace_
?
std
::
make_shared
<
pool_fwd
>
(
pool_fwd
(
*
pd
,
*
in
,
*
out
,
*
workspace_
))
:
std
::
make_shared
<
pool_fwd
>
(
pool_fwd
(
*
pd
,
*
in
,
*
out
));
pipeline
.
push_back
(
*
fwd_
);
if
(
cvtOutVal_
)
{
pipeline
.
push_back
(
*
cvtOutVal_
);
}
}
void
MKLDNNPoolLayer
::
resetBwdBuffers
(
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
out
)
{
resetOutGrad
(
out
);
resetInGrad
(
in
);
}
void
MKLDNNPoolLayer
::
resetOutGrad
(
MKLDNNMatrixPtr
&
out
)
{
CHECK
(
outVal_
)
<<
"Should have output value"
;
out
=
MKLDNNMatrix
::
create
(
output_
.
grad
,
outVal_
->
getPrimitiveDesc
());
// create reorder if output value has cpu device and pd do not match
cpuOutGrad_
=
nullptr
;
cvtOutGrad_
=
nullptr
;
if
(
!
outputIsOnlyMKLDNN
())
{
const
MatrixPtr
&
cpuOut
=
getOutput
(
CPU_DEVICE
).
grad
;
cpuOutGrad_
=
MKLDNNMatrix
::
create
(
cpuOut
,
memory
::
dims
{
bs_
,
oc_
,
oh_
,
ow_
},
format
::
nchw
,
engine_
);
if
(
cpuOutGrad_
->
getPrimitiveDesc
()
!=
out
->
getPrimitiveDesc
())
{
cvtOutGrad_
=
MKLDNNMatrix
::
createReorder
(
cpuOutGrad_
,
out
);
CHECK
(
cvtOutGrad_
)
<<
"should not be emptry"
;
}
else
{
// share the same data of CPU output
output_
.
grad
->
setData
(
cpuOut
->
getData
());
out
=
cpuOutGrad_
;
}
}
}
void
MKLDNNPoolLayer
::
resetInGrad
(
MKLDNNMatrixPtr
&
in
)
{
in
=
nullptr
;
const
MatrixPtr
&
inGrad
=
inputLayers_
[
0
]
->
getOutput
().
grad
;
if
(
inGrad
==
nullptr
)
{
return
;
}
CHECK
(
inVal_
);
in
=
MKLDNNMatrix
::
create
(
inGrad
,
inVal_
->
getPrimitiveDesc
());
}
void
MKLDNNPoolLayer
::
resetBwdPD
(
std
::
shared_ptr
<
pool_bwd
::
primitive_desc
>&
pd
,
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
out
)
{
memory
::
dims
kernels
=
memory
::
dims
{
fh_
,
fw_
};
memory
::
dims
strides
=
memory
::
dims
{
sh_
,
sw_
};
memory
::
dims
padL
=
memory
::
dims
{
ph_
,
pw_
};
memory
::
dims
padR
=
getPaddingR
();
CHECK
(
in
);
CHECK
(
out
);
auto
bwdDesc
=
pool_bwd
::
desc
(
poolAlgo_
,
in
->
getMemoryDesc
(),
out
->
getMemoryDesc
(),
strides
,
kernels
,
padL
,
padR
,
padding_kind
::
zero
);
pd
.
reset
(
new
pool_bwd
::
primitive_desc
(
bwdDesc
,
engine_
,
*
fwdPD_
));
}
void
MKLDNNPoolLayer
::
resetBwdPipeline
(
std
::
vector
<
primitive
>&
pipeline
,
std
::
shared_ptr
<
pool_bwd
::
primitive_desc
>&
pd
,
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
out
)
{
pipeline
.
clear
();
if
(
cvtOutGrad_
)
{
pipeline
.
push_back
(
*
cvtOutGrad_
);
}
bwdData_
=
workspace_
?
std
::
make_shared
<
pool_bwd
>
(
pool_bwd
(
*
pd
,
*
out
,
*
workspace_
,
*
in
))
:
std
::
make_shared
<
pool_bwd
>
(
pool_bwd
(
*
pd
,
*
out
,
*
in
));
pipeline
.
push_back
(
*
bwdData_
);
}
}
// namespace paddle
paddle/gserver/layers/MKLDNNPoolLayer.h
0 → 100644
浏览文件 @
eeaccc83
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "MKLDNNLayer.h"
#include "mkldnn.hpp"
namespace
paddle
{
typedef
mkldnn
::
pooling_forward
pool_fwd
;
typedef
mkldnn
::
pooling_backward
pool_bwd
;
/**
* @brief A subclass of MKLDNNLayer pool layer.
*
* The config file api is mkldnn_pool
*/
class
MKLDNNPoolLayer
:
public
MKLDNNLayer
{
protected:
// padding height and width
int
ph_
,
pw_
;
// stride height and width
int
sh_
,
sw_
;
// filter(kenerl) height and width
int
fh_
,
fw_
;
// pooling_avg or pooling_max
mkldnn
::
algorithm
poolAlgo_
;
// MKLDNNMatrixPtr which should be created from CPU Device
MKLDNNMatrixPtr
cpuOutVal_
;
MKLDNNMatrixPtr
cpuOutGrad_
;
// convert handle between CPU device and MKLDNN device
std
::
shared_ptr
<
mkldnn
::
reorder
>
cvtOutVal_
;
std
::
shared_ptr
<
mkldnn
::
reorder
>
cvtOutGrad_
;
// save forward primitive_desc, which can be used backward
std
::
shared_ptr
<
pool_fwd
::
primitive_desc
>
fwdPD_
;
// according to https://github.com/01org/mkl-dnn/blob/master/tests/gtests/
// test_pooling_forward.cpp, pool need workspace for backward
std
::
shared_ptr
<
mkldnn
::
memory
>
workspace_
;
public:
explicit
MKLDNNPoolLayer
(
const
LayerConfig
&
config
)
:
MKLDNNLayer
(
config
)
{}
~
MKLDNNPoolLayer
()
{}
bool
init
(
const
LayerMap
&
layerMap
,
const
ParameterMap
&
parameterMap
)
override
;
void
reshape
(
int
&
bs
,
int
&
ic
,
int
&
ih
,
int
&
iw
,
int
oc
,
int
&
oh
,
int
&
ow
)
override
;
void
resetFwd
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
)
override
;
void
resetBwd
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
wgt
,
MKLDNNMatrixPtr
&
bias
,
MKLDNNMatrixPtr
&
out
)
override
;
void
updateInputData
()
override
;
void
printSizeInfo
()
override
{
MKLDNNLayer
::
printSizeInfo
();
VLOG
(
MKLDNN_SIZES
)
<<
getName
()
<<
": fh: "
<<
fh_
<<
", fw: "
<<
fw_
<<
": ph: "
<<
ph_
<<
", pw: "
<<
pw_
<<
", sh: "
<<
sh_
<<
", sw: "
<<
sw_
;
}
protected:
/**
* Forward functions: reset buffers(input, output),
* reset primitive descriptor,
* reset pipeline.
*/
void
resetFwdBuffers
(
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
out
);
void
resetInValue
(
MKLDNNMatrixPtr
&
in
);
void
resetOutValue
(
MKLDNNMatrixPtr
&
out
);
void
resetFwdPD
(
std
::
shared_ptr
<
pool_fwd
::
primitive_desc
>&
pd
,
MKLDNNMatrixPtr
in
,
MKLDNNMatrixPtr
out
);
void
resetFwdPipeline
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
std
::
shared_ptr
<
pool_fwd
::
primitive_desc
>&
pd
,
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
out
);
/**
* Backward functions: reset buffers(input, output),
* reset primitive descriptor,
* reset pipeline.
*/
void
resetBwdBuffers
(
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
out
);
void
resetOutGrad
(
MKLDNNMatrixPtr
&
out
);
void
resetInGrad
(
MKLDNNMatrixPtr
&
in
);
void
resetBwdPD
(
std
::
shared_ptr
<
pool_bwd
::
primitive_desc
>&
pd
,
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
out
);
void
resetBwdPipeline
(
std
::
vector
<
mkldnn
::
primitive
>&
pipeline
,
std
::
shared_ptr
<
pool_bwd
::
primitive_desc
>&
pd
,
MKLDNNMatrixPtr
&
in
,
MKLDNNMatrixPtr
&
out
);
/**
* get padding_r according to
* https://github.com/01org/mkl-dnn/blob/master/tests/gtests/
* test_pooling_forward.cpp
*/
mkldnn
::
memory
::
dims
getPaddingR
()
const
{
mkldnn
::
memory
::
dims
padR
=
{
ph_
,
pw_
};
for
(
int
i
=
0
;
i
<
2
;
++
i
)
{
if
((
ih_
+
ph_
+
padR
[
0
]
-
fh_
)
/
sh_
+
1
<
oh_
)
{
++
padR
[
0
];
}
if
((
iw_
+
pw_
+
padR
[
1
]
-
fw_
)
/
sw_
+
1
<
ow_
)
{
++
padR
[
1
];
}
}
return
padR
;
}
};
}
// namespace paddle
paddle/gserver/tests/test_MKLDNN.cpp
浏览文件 @
eeaccc83
...
...
@@ -141,6 +141,68 @@ TEST(MKLDNNLayer, ConvLayer) {
testConvLayer
({
4
,
4
,
16
,
3
,
3
,
16
,
3
,
3
,
3
,
3
,
1
,
1
,
1
,
1
,
1
,
1
});
}
struct
testPoolDesc
{
int
bs
,
ch
;
// input channel and output channel are the same
int
ih
,
iw
;
int
oh
,
ow
;
int
fh
,
fw
;
int
ph
,
pw
;
int
sh
,
sw
;
};
void
testPoolLayer
(
const
testPoolDesc
&
pm
)
{
const
std
::
string
compareTypes
[]
=
{
"mkldnn_pool"
,
"pool"
};
TestConfig
cfg
;
cfg
.
layerConfig
.
set_type
(
compareTypes
[
0
]);
cfg
.
layerConfig
.
set_size
(
pm
.
ch
*
pm
.
oh
*
pm
.
ow
);
cfg
.
inputDefs
.
push_back
(
{
INPUT_DATA
,
"layer_0"
,
/* size of input layer= */
size_t
(
pm
.
ch
*
pm
.
ih
*
pm
.
iw
),
0
});
LayerInputConfig
*
input
=
cfg
.
layerConfig
.
add_inputs
();
PoolConfig
*
pool
=
input
->
mutable_pool_conf
();
// pool->set_pool_type(poolType);
pool
->
set_channels
(
pm
.
ch
);
pool
->
set_img_size
(
pm
.
iw
);
pool
->
set_img_size_y
(
pm
.
ih
);
pool
->
set_output_x
(
pm
.
ow
);
pool
->
set_output_y
(
pm
.
oh
);
pool
->
set_size_x
(
pm
.
fw
);
pool
->
set_size_y
(
pm
.
fh
);
pool
->
set_padding
(
pm
.
pw
);
pool
->
set_padding_y
(
pm
.
ph
);
pool
->
set_stride
(
pm
.
sw
);
pool
->
set_stride_y
(
pm
.
sh
);
int
oh
=
outputSize
(
pm
.
ih
,
pm
.
fh
,
pm
.
ph
,
pm
.
sh
,
false
);
int
ow
=
outputSize
(
pm
.
iw
,
pm
.
fw
,
pm
.
pw
,
pm
.
sw
,
false
);
CHECK_EQ
(
ow
,
pm
.
ow
)
<<
"output size check failed"
;
CHECK_EQ
(
oh
,
pm
.
oh
)
<<
"output size check failed"
;
MKLDNNTester
tester
;
for
(
auto
type
:
{
"max-projection"
,
"avg-projection"
})
{
pool
->
set_pool_type
(
type
);
TestConfig
ref
=
cfg
;
ref
.
layerConfig
.
set_type
(
compareTypes
[
1
]);
for
(
auto
bs
:
{
pm
.
bs
,
1
})
{
tester
.
run
(
cfg
,
ref
,
bs
,
pm
.
ih
,
pm
.
iw
);
}
}
}
TEST
(
MkldnnLayer
,
PoolLayer
)
{
/* bs, ch, ih, iw, oh, ow, fh, fw, ph, pw, sh, sw*/
testPoolLayer
({
2
,
1
,
4
,
4
,
2
,
2
,
3
,
3
,
0
,
0
,
2
,
2
});
testPoolLayer
({
10
,
8
,
16
,
16
,
8
,
8
,
2
,
2
,
0
,
0
,
2
,
2
});
testPoolLayer
({
4
,
2
,
5
,
5
,
3
,
3
,
3
,
3
,
1
,
1
,
2
,
2
});
testPoolLayer
({
8
,
16
,
56
,
56
,
28
,
28
,
3
,
3
,
0
,
0
,
2
,
2
});
testPoolLayer
({
8
,
16
,
14
,
14
,
7
,
7
,
3
,
3
,
0
,
0
,
2
,
2
});
testPoolLayer
({
4
,
16
,
7
,
7
,
1
,
1
,
7
,
7
,
0
,
0
,
1
,
1
});
testPoolLayer
({
4
,
2
,
5
,
5
,
3
,
3
,
5
,
5
,
1
,
1
,
1
,
1
});
testPoolLayer
({
2
,
8
,
56
,
56
,
29
,
29
,
3
,
3
,
1
,
1
,
2
,
2
});
}
// TODO(TJ): add branch test
int
main
(
int
argc
,
char
**
argv
)
{
...
...
python/paddle/trainer/config_parser.py
浏览文件 @
eeaccc83
...
...
@@ -2286,8 +2286,15 @@ class NormLayer(LayerBase):
@
config_layer
(
'pool'
)
class
PoolLayer
(
LayerBase
):
layer_type
=
'pool'
def
__init__
(
self
,
name
,
inputs
,
ceil_mode
=
True
,
**
xargs
):
super
(
PoolLayer
,
self
).
__init__
(
name
,
'pool'
,
0
,
inputs
=
inputs
,
**
xargs
)
use_mkldnn
=
int
(
g_command_config_args
.
get
(
"use_mkldnn"
,
0
))
if
self
.
layer_type
==
"mkldnn_pool"
:
config_assert
(
use_mkldnn
,
"mkldnn_pool only support MKLDNN"
)
self
.
layer_type
=
'mkldnn_pool'
if
use_mkldnn
else
'pool'
super
(
PoolLayer
,
self
).
__init__
(
name
,
self
.
layer_type
,
0
,
inputs
=
inputs
,
**
xargs
)
for
input_index
in
xrange
(
len
(
self
.
inputs
)):
input_layer
=
self
.
get_input_layer
(
input_index
)
pool_conf
=
self
.
config
.
inputs
[
input_index
].
pool_conf
...
...
@@ -2297,6 +2304,11 @@ class PoolLayer(LayerBase):
pool_conf
.
channels
)
@
config_layer
(
'mkldnn_pool'
)
class
MKLDNNPoolLayer
(
PoolLayer
):
layer_type
=
'mkldnn_pool'
@
config_layer
(
'pool3d'
)
class
Pool3DLayer
(
LayerBase
):
def
__init__
(
self
,
name
,
inputs
,
ceil_mode
=
True
,
**
xargs
):
...
...
python/paddle/v2/framework/tests/op_test.py
浏览文件 @
eeaccc83
...
...
@@ -192,6 +192,9 @@ class OpTest(unittest.TestCase):
self
.
op
.
run
(
self
.
scope
,
ctx
)
for
out_name
,
out_dup
in
Operator
.
get_op_outputs
(
self
.
op
.
type
()):
if
out_name
not
in
self
.
outputs
:
continue
if
out_dup
:
sub_out
=
self
.
outputs
[
out_name
]
if
not
isinstance
(
sub_out
,
list
):
...
...
@@ -206,14 +209,12 @@ class OpTest(unittest.TestCase):
actual
,
expect
,
atol
=
1e-05
),
"output name: "
+
out_name
+
" has diff"
)
else
:
var
=
self
.
scope
.
find_var
(
out_name
)
if
var
is
not
None
:
actual
=
np
.
array
(
var
.
get_tensor
())
expect
=
self
.
outputs
[
out_name
]
self
.
assertTrue
(
np
.
allclose
(
actual
,
expect
,
atol
=
1e-05
),
"output name: "
+
out_name
+
" has diff"
)
actual
=
np
.
array
(
self
.
scope
.
find_var
(
out_name
).
get_tensor
())
expect
=
self
.
outputs
[
out_name
]
self
.
assertTrue
(
np
.
allclose
(
actual
,
expect
,
atol
=
1e-05
),
"output name: "
+
out_name
+
" has diff"
)
def
check_output
(
self
):
places
=
[
core
.
CPUPlace
()]
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录