Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
9f643322
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
9f643322
编写于
7月 05, 2017
作者:
Y
yi.wu
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of
https://github.com/PaddlePaddle/Paddle
into add_golang_precommit
上级
b68e90be
f21e3f73
变更
27
显示空白变更内容
内联
并排
Showing
27 changed file
with
211 addition
and
100 deletion
+211
-100
cmake/generic.cmake
cmake/generic.cmake
+8
-5
go/cmd/master/CMakeLists.txt
go/cmd/master/CMakeLists.txt
+1
-1
go/pserver/client/c/CMakeLists.txt
go/pserver/client/c/CMakeLists.txt
+1
-1
paddle/api/CMakeLists.txt
paddle/api/CMakeLists.txt
+1
-0
paddle/gserver/layers/AverageLayer.h
paddle/gserver/layers/AverageLayer.h
+4
-0
paddle/gserver/layers/CrossChannelNormLayer.cpp
paddle/gserver/layers/CrossChannelNormLayer.cpp
+26
-11
paddle/gserver/layers/MaxLayer.h
paddle/gserver/layers/MaxLayer.h
+4
-0
paddle/gserver/layers/NormLayer.cpp
paddle/gserver/layers/NormLayer.cpp
+0
-10
paddle/gserver/layers/SequenceLastInstanceLayer.cpp
paddle/gserver/layers/SequenceLastInstanceLayer.cpp
+4
-6
paddle/gserver/layers/SequencePoolLayer.cpp
paddle/gserver/layers/SequencePoolLayer.cpp
+2
-3
paddle/gserver/layers/SequencePoolLayer.h
paddle/gserver/layers/SequencePoolLayer.h
+3
-4
paddle/gserver/tests/LayerGradUtil.cpp
paddle/gserver/tests/LayerGradUtil.cpp
+3
-1
paddle/gserver/tests/LayerGradUtil.h
paddle/gserver/tests/LayerGradUtil.h
+4
-0
paddle/gserver/tests/test_LayerGrad.cpp
paddle/gserver/tests/test_LayerGrad.cpp
+13
-3
paddle/parameter/Argument.cpp
paddle/parameter/Argument.cpp
+3
-3
paddle/parameter/Argument.h
paddle/parameter/Argument.h
+1
-1
paddle/parameter/tests/test_argument.cpp
paddle/parameter/tests/test_argument.cpp
+2
-2
paddle/pserver/LightNetwork.cpp
paddle/pserver/LightNetwork.cpp
+14
-14
paddle/pserver/SocketChannel.cpp
paddle/pserver/SocketChannel.cpp
+11
-11
paddle/pserver/test/SocketTest.cpp
paddle/pserver/test/SocketTest.cpp
+14
-14
paddle/scripts/docker/build.sh
paddle/scripts/docker/build.sh
+1
-1
paddle/trainer/Tester.cpp
paddle/trainer/Tester.cpp
+1
-1
paddle/utils/ThreadLocal.h
paddle/utils/ThreadLocal.h
+6
-6
python/paddle/trainer/config_parser.py
python/paddle/trainer/config_parser.py
+8
-0
python/paddle/trainer_config_helpers/layers.py
python/paddle/trainer_config_helpers/layers.py
+17
-2
python/paddle/trainer_config_helpers/tests/configs/protostr/test_sequence_pooling.protostr
...ers/tests/configs/protostr/test_sequence_pooling.protostr
+51
-0
python/paddle/trainer_config_helpers/tests/configs/test_sequence_pooling.py
...ner_config_helpers/tests/configs/test_sequence_pooling.py
+8
-0
未找到文件。
cmake/generic.cmake
浏览文件 @
9f643322
...
@@ -192,9 +192,9 @@ function(cc_test TARGET_NAME)
...
@@ -192,9 +192,9 @@ function(cc_test TARGET_NAME)
set
(
multiValueArgs SRCS DEPS
)
set
(
multiValueArgs SRCS DEPS
)
cmake_parse_arguments
(
cc_test
"
${
options
}
"
"
${
oneValueArgs
}
"
"
${
multiValueArgs
}
"
${
ARGN
}
)
cmake_parse_arguments
(
cc_test
"
${
options
}
"
"
${
oneValueArgs
}
"
"
${
multiValueArgs
}
"
${
ARGN
}
)
add_executable
(
${
TARGET_NAME
}
${
cc_test_SRCS
}
)
add_executable
(
${
TARGET_NAME
}
${
cc_test_SRCS
}
)
target_link_libraries
(
${
TARGET_NAME
}
${
cc_test_DEPS
}
gtest gtest_main
)
target_link_libraries
(
${
TARGET_NAME
}
${
cc_test_DEPS
}
gtest gtest_main
-lstdc++ -lm
)
add_dependencies
(
${
TARGET_NAME
}
${
cc_test_DEPS
}
gtest gtest_main
)
add_dependencies
(
${
TARGET_NAME
}
${
cc_test_DEPS
}
gtest gtest_main
)
add_test
(
${
TARGET_NAME
}
${
TARGET_NAME
}
)
add_test
(
NAME
${
TARGET_NAME
}
COMMAND
${
TARGET_NAME
}
WORKING_DIRECTORY
${
CMAKE_CURRENT_SOURCE_DIR
}
)
endif
()
endif
()
endfunction
(
cc_test
)
endfunction
(
cc_test
)
...
@@ -281,10 +281,11 @@ function(go_library TARGET_NAME)
...
@@ -281,10 +281,11 @@ function(go_library TARGET_NAME)
file
(
GLOB GO_SOURCE RELATIVE
"
${
CMAKE_CURRENT_SOURCE_DIR
}
"
"*.go"
)
file
(
GLOB GO_SOURCE RELATIVE
"
${
CMAKE_CURRENT_SOURCE_DIR
}
"
"*.go"
)
string
(
REPLACE
"
${
PADDLE_GO_PATH
}
/"
""
CMAKE_CURRENT_SOURCE_REL_DIR
${
CMAKE_CURRENT_SOURCE_DIR
}
)
string
(
REPLACE
"
${
PADDLE_GO_PATH
}
/"
""
CMAKE_CURRENT_SOURCE_REL_DIR
${
CMAKE_CURRENT_SOURCE_DIR
}
)
# FIXME: link path
add_custom_command
(
TARGET
${
TARGET_NAME
}
POST_BUILD
add_custom_command
(
TARGET
${
TARGET_NAME
}
POST_BUILD
COMMAND rm
"
${${
TARGET_NAME
}
_LIB_PATH
}
"
COMMAND rm
"
${${
TARGET_NAME
}
_LIB_PATH
}
"
# Golang build source code
# Golang build source code
COMMAND env GOPATH=
${
GOPATH
}
${
CMAKE_Go_COMPILER
}
build
${
BUILD_MODE
}
COMMAND env
LIBRARY_PATH=
${
CMAKE_BINARY_DIR
}
/go/pserver/client/c/:$ENV{LIBRARY_PATH}
GOPATH=
${
GOPATH
}
${
CMAKE_Go_COMPILER
}
build
${
BUILD_MODE
}
-o
"
${${
TARGET_NAME
}
_LIB_PATH
}
"
-o
"
${${
TARGET_NAME
}
_LIB_PATH
}
"
"./
${
CMAKE_CURRENT_SOURCE_REL_DIR
}
/
${
GO_SOURCE
}
"
"./
${
CMAKE_CURRENT_SOURCE_REL_DIR
}
/
${
GO_SOURCE
}
"
# must run under GOPATH
# must run under GOPATH
...
@@ -299,8 +300,10 @@ function(go_binary TARGET_NAME)
...
@@ -299,8 +300,10 @@ function(go_binary TARGET_NAME)
cmake_parse_arguments
(
go_binary
"
${
options
}
"
"
${
oneValueArgs
}
"
"
${
multiValueArgs
}
"
${
ARGN
}
)
cmake_parse_arguments
(
go_binary
"
${
options
}
"
"
${
oneValueArgs
}
"
"
${
multiValueArgs
}
"
${
ARGN
}
)
string
(
REPLACE
"
${
PADDLE_GO_PATH
}
/"
""
CMAKE_CURRENT_SOURCE_REL_DIR
${
CMAKE_CURRENT_SOURCE_DIR
}
)
string
(
REPLACE
"
${
PADDLE_GO_PATH
}
/"
""
CMAKE_CURRENT_SOURCE_REL_DIR
${
CMAKE_CURRENT_SOURCE_DIR
}
)
# FIXME: link path
add_custom_command
(
OUTPUT
${
TARGET_NAME
}
_timestamp
add_custom_command
(
OUTPUT
${
TARGET_NAME
}
_timestamp
COMMAND env GOPATH=
${
GOPATH
}
${
CMAKE_Go_COMPILER
}
build
COMMAND env LIBRARY_PATH=
${
CMAKE_BINARY_DIR
}
/go/pserver/client/c/:$ENV{LIBRARY_PATH}
GOPATH=
${
GOPATH
}
${
CMAKE_Go_COMPILER
}
build
-o
"
${
CMAKE_CURRENT_BINARY_DIR
}
/
${
TARGET_NAME
}
"
-o
"
${
CMAKE_CURRENT_BINARY_DIR
}
/
${
TARGET_NAME
}
"
"./
${
CMAKE_CURRENT_SOURCE_REL_DIR
}
/
${
go_binary_SRCS
}
"
"./
${
CMAKE_CURRENT_SOURCE_REL_DIR
}
/
${
go_binary_SRCS
}
"
WORKING_DIRECTORY
"
${
PADDLE_IN_GOPATH
}
/go"
)
WORKING_DIRECTORY
"
${
PADDLE_IN_GOPATH
}
/go"
)
...
...
go/cmd/master/CMakeLists.txt
浏览文件 @
9f643322
...
@@ -12,4 +12,4 @@
...
@@ -12,4 +12,4 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
go_binary
(
master SRC master.go
)
go_binary
(
master SRC master.go
DEPS paddle_go_optimizer
)
go/pserver/client/c/CMakeLists.txt
浏览文件 @
9f643322
paddle/api/CMakeLists.txt
浏览文件 @
9f643322
...
@@ -66,6 +66,7 @@ SWIG_LINK_LIBRARIES(swig_paddle
...
@@ -66,6 +66,7 @@ SWIG_LINK_LIBRARIES(swig_paddle
paddle_trainer_lib
paddle_trainer_lib
paddle_network
paddle_network
paddle_parameter
paddle_parameter
paddle_optimizer
paddle_math
paddle_math
paddle_utils
paddle_utils
paddle_proto
paddle_proto
...
...
paddle/gserver/layers/AverageLayer.h
浏览文件 @
9f643322
...
@@ -25,6 +25,10 @@ namespace paddle {
...
@@ -25,6 +25,10 @@ namespace paddle {
* If SequenceLevel = kNonSeq:
* If SequenceLevel = kNonSeq:
* Output: output size is the number of input sequences (NOT input instances)
* Output: output size is the number of input sequences (NOT input instances)
* output[i] = average_{for each instance in this sequence}{input[i]}
* output[i] = average_{for each instance in this sequence}{input[i]}
* If stride_ > 0:
* Output: a shorten sequence. Stride is the step size by which we slide a
* window upon the input sequence, and the average pooling
* operation is then applied to each interval independently.
* If SequenceLevel = kSeq:
* If SequenceLevel = kSeq:
* Check input sequence must has sub-sequence
* Check input sequence must has sub-sequence
* Output: output size is the number of input sub-sequences
* Output: output size is the number of input sub-sequences
...
...
paddle/gserver/layers/CrossChannelNormLayer.cpp
浏览文件 @
9f643322
...
@@ -36,6 +36,16 @@ MatrixPtr CrossChannelNormLayer::createSpatialMatrix(MatrixPtr data,
...
@@ -36,6 +36,16 @@ MatrixPtr CrossChannelNormLayer::createSpatialMatrix(MatrixPtr data,
data
->
getData
()
+
iter
*
spatialDim
,
1
,
spatialDim
,
false
,
useGpu_
);
data
->
getData
()
+
iter
*
spatialDim
,
1
,
spatialDim
,
false
,
useGpu_
);
}
}
bool
CrossChannelNormLayer
::
init
(
const
LayerMap
&
layerMap
,
const
ParameterMap
&
parameterMap
)
{
Layer
::
init
(
layerMap
,
parameterMap
);
CHECK
(
parameters_
[
0
]);
const
NormConfig
&
conf
=
config_
.
inputs
(
0
).
norm_conf
();
channels_
=
conf
.
channels
();
scale_
.
reset
(
new
Weight
(
channels_
,
1
,
parameters_
[
0
]));
return
true
;
}
void
CrossChannelNormLayer
::
forward
(
PassType
passType
)
{
void
CrossChannelNormLayer
::
forward
(
PassType
passType
)
{
Layer
::
forward
(
passType
);
Layer
::
forward
(
passType
);
MatrixPtr
inV
=
getInputValue
(
0
);
MatrixPtr
inV
=
getInputValue
(
0
);
...
@@ -51,9 +61,7 @@ void CrossChannelNormLayer::forward(PassType passType) {
...
@@ -51,9 +61,7 @@ void CrossChannelNormLayer::forward(PassType passType) {
Matrix
::
resizeOrCreate
(
dataBuffer_
,
batchSize
,
dataDim
,
false
,
useGpu_
);
Matrix
::
resizeOrCreate
(
dataBuffer_
,
batchSize
,
dataDim
,
false
,
useGpu_
);
Matrix
::
resizeOrCreate
(
spatialBuffer_
,
1
,
spatialDim
,
false
,
useGpu_
);
Matrix
::
resizeOrCreate
(
spatialBuffer_
,
1
,
spatialDim
,
false
,
useGpu_
);
Matrix
::
resizeOrCreate
(
normBuffer_
,
batchSize
,
spatialDim
,
false
,
useGpu_
);
Matrix
::
resizeOrCreate
(
normBuffer_
,
batchSize
,
spatialDim
,
false
,
useGpu_
);
normBuffer_
->
zeroMem
();
// add eps to avoid overflow
normBuffer_
->
addScalar
(
*
normBuffer_
,
1e-6
);
inV
->
square2
(
*
dataBuffer_
);
inV
->
square2
(
*
dataBuffer_
);
for
(
size_t
i
=
0
;
i
<
batchSize
;
i
++
)
{
for
(
size_t
i
=
0
;
i
<
batchSize
;
i
++
)
{
const
MatrixPtr
inVTmp
=
createSampleMatrix
(
inV
,
i
,
spatialDim
);
const
MatrixPtr
inVTmp
=
createSampleMatrix
(
inV
,
i
,
spatialDim
);
...
@@ -63,6 +71,8 @@ void CrossChannelNormLayer::forward(PassType passType) {
...
@@ -63,6 +71,8 @@ void CrossChannelNormLayer::forward(PassType passType) {
// compute norm.
// compute norm.
spatialBuffer_
->
sumCols
(
*
dataTmp
,
1
,
0
);
spatialBuffer_
->
sumCols
(
*
dataTmp
,
1
,
0
);
// add eps to avoid overflow
spatialBuffer_
->
add
(
1e-6
);
spatialBuffer_
->
sqrt2
(
*
spatialBuffer_
);
spatialBuffer_
->
sqrt2
(
*
spatialBuffer_
);
normTmp
->
copyFrom
(
*
spatialBuffer_
);
normTmp
->
copyFrom
(
*
spatialBuffer_
);
outVTmp
->
copyFrom
(
*
inVTmp
);
outVTmp
->
copyFrom
(
*
inVTmp
);
...
@@ -82,6 +92,9 @@ void CrossChannelNormLayer::backward(const UpdateCallback& callback) {
...
@@ -82,6 +92,9 @@ void CrossChannelNormLayer::backward(const UpdateCallback& callback) {
size_t
dataDim
=
inG
->
getWidth
();
size_t
dataDim
=
inG
->
getWidth
();
size_t
spatialDim
=
dataDim
/
channels_
;
size_t
spatialDim
=
dataDim
/
channels_
;
MatrixPtr
inGBuffer
;
Matrix
::
resizeOrCreate
(
inGBuffer
,
channels_
,
spatialDim
,
false
,
useGpu_
);
dataBuffer_
->
dotMul
(
*
outG
,
*
outV
);
dataBuffer_
->
dotMul
(
*
outG
,
*
outV
);
Matrix
::
resizeOrCreate
(
scaleDiff_
,
channels_
,
1
,
false
,
useGpu_
);
Matrix
::
resizeOrCreate
(
scaleDiff_
,
channels_
,
1
,
false
,
useGpu_
);
Matrix
::
resizeOrCreate
(
channelBuffer_
,
channels_
,
1
,
false
,
useGpu_
);
Matrix
::
resizeOrCreate
(
channelBuffer_
,
channels_
,
1
,
false
,
useGpu_
);
...
@@ -100,22 +113,24 @@ void CrossChannelNormLayer::backward(const UpdateCallback& callback) {
...
@@ -100,22 +113,24 @@ void CrossChannelNormLayer::backward(const UpdateCallback& callback) {
scaleDiff_
->
add
(
*
channelBuffer_
,
1.
);
scaleDiff_
->
add
(
*
channelBuffer_
,
1.
);
sampleBuffer_
->
dotMul
(
*
inVTmp
,
*
outGTmp
);
sampleBuffer_
->
dotMul
(
*
inVTmp
,
*
outGTmp
);
spatialBuffer_
->
sumCols
(
*
sampleBuffer_
,
1.
,
1
.
);
spatialBuffer_
->
sumCols
(
*
sampleBuffer_
,
1.
,
0
.
);
// scale the grad
// scale the grad
inG
Tmp
->
copyFrom
(
*
inVTmp
);
inG
Buffer
->
copyFrom
(
*
inVTmp
);
inG
Tmp
->
mulRowVector
(
*
spatialBuffer_
);
inG
Buffer
->
mulRowVector
(
*
spatialBuffer_
);
// divide by square of norm
// divide by square of norm
spatialBuffer_
->
dotMul
(
*
normTmp
,
*
normTmp
);
spatialBuffer_
->
dotMul
(
*
normTmp
,
*
normTmp
);
inG
Tmp
->
divRowVector
(
*
spatialBuffer_
);
inG
Buffer
->
divRowVector
(
*
spatialBuffer_
);
// subtract
// subtract
inG
Tmp
->
add
(
*
outGTmp
,
-
1
,
1
);
inG
Buffer
->
add
(
*
outGTmp
,
-
1
,
1
);
// divide by norm
// divide by norm
inG
Tmp
->
divRowVector
(
*
normTmp
);
inG
Buffer
->
divRowVector
(
*
normTmp
);
// scale the diff
// scale the diff
inGTmp
->
mulColVector
(
*
scale_
->
getW
());
inGBuffer
->
mulColVector
(
*
scale_
->
getW
());
inGTmp
->
add
(
*
inGBuffer
);
}
}
// updata scale
// updata scale
if
(
scale_
->
getWGrad
())
scale_
->
getWGrad
()
->
copyFrom
(
*
scaleDiff_
);
if
(
scale_
->
getWGrad
())
scale_
->
getWGrad
()
->
add
(
*
scaleDiff_
);
scale_
->
getParameterPtr
()
->
incUpdate
(
callback
);
scale_
->
getParameterPtr
()
->
incUpdate
(
callback
);
}
}
...
...
paddle/gserver/layers/MaxLayer.h
浏览文件 @
9f643322
...
@@ -26,6 +26,10 @@ namespace paddle {
...
@@ -26,6 +26,10 @@ namespace paddle {
* If SequenceLevel = kNonSeq:
* If SequenceLevel = kNonSeq:
* Output: output size is the number of input sequences (NOT input instances)
* Output: output size is the number of input sequences (NOT input instances)
* output[i] = max_{for each instance in this sequence}{input[i]}
* output[i] = max_{for each instance in this sequence}{input[i]}
* If stride_ > 0:
* Output: a shorten sequence. Stride is the step size by which we slide a
* window upon the input sequence, and the max pooling operation is
* then applied to each interval independently.
* If SequenceLevel = kSeq:
* If SequenceLevel = kSeq:
* Check input sequence must has sub-sequence
* Check input sequence must has sub-sequence
* Output: output size is the number of input sub-sequences
* Output: output size is the number of input sub-sequences
...
...
paddle/gserver/layers/NormLayer.cpp
浏览文件 @
9f643322
...
@@ -56,14 +56,4 @@ bool ResponseNormLayer::init(const LayerMap& layerMap,
...
@@ -56,14 +56,4 @@ bool ResponseNormLayer::init(const LayerMap& layerMap,
return
true
;
return
true
;
}
}
bool
CrossChannelNormLayer
::
init
(
const
LayerMap
&
layerMap
,
const
ParameterMap
&
parameterMap
)
{
Layer
::
init
(
layerMap
,
parameterMap
);
CHECK
(
parameters_
[
0
]);
const
NormConfig
&
conf
=
config_
.
inputs
(
0
).
norm_conf
();
channels_
=
conf
.
channels
();
scale_
.
reset
(
new
Weight
(
channels_
,
1
,
parameters_
[
0
]));
return
true
;
}
}
// namespace paddle
}
// namespace paddle
paddle/gserver/layers/SequenceLastInstanceLayer.cpp
浏览文件 @
9f643322
...
@@ -26,10 +26,9 @@ namespace paddle {
...
@@ -26,10 +26,9 @@ namespace paddle {
* If SequenceLevel = kNonseq:
* If SequenceLevel = kNonseq:
* Output: a sequence containing only the last instance of the input sequence
* Output: a sequence containing only the last instance of the input sequence
* If stride_ > 0:
* If stride_ > 0:
* Output: a shorten sequence. The operation of getting last instance of a
* Output: a shorten sequence. Stride is the step size by which we slide a
* sequence is independently performed on every slice of the input
* window upon the input sequence, and getting last instance
* sequence, which is obtained by sliding a window with the window
* operation is then applied to each interval independently.
* size set to stride_.
* If SequenceLevel = kSeq:
* If SequenceLevel = kSeq:
* Check input sequence must has sub-sequence
* Check input sequence must has sub-sequence
* Output: a sequence containing only the last instance of each sub-sequence
* Output: a sequence containing only the last instance of each sub-sequence
...
@@ -73,8 +72,7 @@ bool SequenceLastInstanceLayer::init(const LayerMap& layerMap,
...
@@ -73,8 +72,7 @@ bool SequenceLastInstanceLayer::init(const LayerMap& layerMap,
void
SequenceLastInstanceLayer
::
forward
(
PassType
passType
)
{
void
SequenceLastInstanceLayer
::
forward
(
PassType
passType
)
{
SequencePoolLayer
::
forward
(
passType
);
SequencePoolLayer
::
forward
(
passType
);
auto
starts
=
(
stride_
>
0
)
?
stridePositions_
->
getData
()
auto
starts
=
startPositions_
->
getData
(
false
);
:
startPositions_
->
getData
(
false
);
MatrixPtr
inputValue
=
getInputValue
(
0
);
MatrixPtr
inputValue
=
getInputValue
(
0
);
MatrixPtr
outputValue
=
getOutputValue
();
MatrixPtr
outputValue
=
getOutputValue
();
...
...
paddle/gserver/layers/SequencePoolLayer.cpp
浏览文件 @
9f643322
...
@@ -72,9 +72,8 @@ void SequencePoolLayer::forward(PassType passType) {
...
@@ -72,9 +72,8 @@ void SequencePoolLayer::forward(PassType passType) {
if
(
stride_
>
0
)
{
if
(
stride_
>
0
)
{
CHECK_EQ
(
input
.
hasSubseq
(),
0UL
)
CHECK_EQ
(
input
.
hasSubseq
(),
0UL
)
<<
"sequence stride pooling is invalid for hasSubseq now"
;
<<
"sequence stride pooling is invalid for hasSubseq now"
;
output_
.
poolSequenceWithStride
(
output_
.
poolSequenceWithStride
(
input
,
stride_
,
&
startPositions_
,
reversed_
);
input
,
stride_
,
&
stridePositions_
,
reversed_
);
newBatchSize_
=
startPositions_
->
getSize
()
-
1
;
newBatchSize_
=
stridePositions_
->
getSize
()
-
1
;
}
}
resetOutput
(
newBatchSize_
,
dim
);
resetOutput
(
newBatchSize_
,
dim
);
...
...
paddle/gserver/layers/SequencePoolLayer.h
浏览文件 @
9f643322
...
@@ -28,8 +28,9 @@ namespace paddle {
...
@@ -28,8 +28,9 @@ namespace paddle {
* sequence}{input[i]}
* sequence}{input[i]}
* If stride_ > 0:
* If stride_ > 0:
* Check input sequence must not have sub-sequence
* Check input sequence must not have sub-sequence
* Output: a shorten sequence, pooling is performed upon a small local
* Output: a shorten sequence. Stride is the step size by which we slide
* area
* a window upon the input sequence, and the pooling operation
* is then applied to each interval independently.
* If SequenceLevel = kSeq:
* If SequenceLevel = kSeq:
* Check input sequence must has sub-sequence
* Check input sequence must has sub-sequence
* Output: output size is the number of input sub-sequences
* Output: output size is the number of input sub-sequences
...
@@ -47,8 +48,6 @@ protected:
...
@@ -47,8 +48,6 @@ protected:
size_t
newBatchSize_
;
size_t
newBatchSize_
;
ICpuGpuVectorPtr
startPositions_
;
ICpuGpuVectorPtr
startPositions_
;
int
stride_
;
int
stride_
;
// Store the start position of each window.
IVectorPtr
stridePositions_
;
// Whether the input sequence is reversed or not.
// Whether the input sequence is reversed or not.
bool
reversed_
=
false
;
bool
reversed_
=
false
;
...
...
paddle/gserver/tests/LayerGradUtil.cpp
浏览文件 @
9f643322
...
@@ -465,7 +465,6 @@ void initTestLayer(TestConfig testConf,
...
@@ -465,7 +465,6 @@ void initTestLayer(TestConfig testConf,
ParameterConfig
paraConfig
)
{
ParameterConfig
paraConfig
)
{
paraConfig
.
set_name
(
paraName
);
paraConfig
.
set_name
(
paraName
);
paraConfig
.
set_size
(
paraSize
);
paraConfig
.
set_size
(
paraSize
);
paraConfig
.
set_initial_std
(
1
);
paraConfig
.
set_is_static
(
isStatic
);
paraConfig
.
set_is_static
(
isStatic
);
auto
para
=
auto
para
=
std
::
make_shared
<
Parameter
>
(
paraConfig
,
FLAGS_use_gpu
,
initialize
);
std
::
make_shared
<
Parameter
>
(
paraConfig
,
FLAGS_use_gpu
,
initialize
);
...
@@ -499,6 +498,9 @@ void initTestLayer(TestConfig testConf,
...
@@ -499,6 +498,9 @@ void initTestLayer(TestConfig testConf,
paraConfig
.
add_dims
((
*
layerMap
)[
input
.
input_layer_name
()]
->
getSize
());
paraConfig
.
add_dims
((
*
layerMap
)[
input
.
input_layer_name
()]
->
getSize
());
paraConfig
.
add_dims
(
testConf
.
layerConfig
.
size
());
paraConfig
.
add_dims
(
testConf
.
layerConfig
.
size
());
}
}
CHECK_GE
(
testConf
.
paramInitialStd
,
0
);
paraConfig
.
set_initial_mean
(
testConf
.
paramInitialMean
);
paraConfig
.
set_initial_std
(
testConf
.
paramInitialStd
);
initParameter
(
paraName
,
paraSize
,
inputDef
.
isStatic
,
false
,
paraConfig
);
initParameter
(
paraName
,
paraSize
,
inputDef
.
isStatic
,
false
,
paraConfig
);
}
}
}
}
...
...
paddle/gserver/tests/LayerGradUtil.h
浏览文件 @
9f643322
...
@@ -125,12 +125,16 @@ struct TestConfig {
...
@@ -125,12 +125,16 @@ struct TestConfig {
LayerConfig
layerConfig
;
LayerConfig
layerConfig
;
std
::
vector
<
InputDef
>
inputDefs
;
std
::
vector
<
InputDef
>
inputDefs
;
size_t
biasSize
;
size_t
biasSize
;
real
paramInitialMean
;
real
paramInitialStd
;
bool
testAccumulate
;
bool
testAccumulate
;
bool
testState
;
bool
testState
;
bool
staticBias
;
bool
staticBias
;
bool
testBatchState
;
bool
testBatchState
;
TestConfig
()
TestConfig
()
:
biasSize
(
0
),
:
biasSize
(
0
),
paramInitialMean
(
0.0
),
paramInitialStd
(
1.0
),
testAccumulate
(
true
),
testAccumulate
(
true
),
testState
(
false
),
testState
(
false
),
staticBias
(
false
),
staticBias
(
false
),
...
...
paddle/gserver/tests/test_LayerGrad.cpp
浏览文件 @
9f643322
...
@@ -845,6 +845,10 @@ void testDegradeLayer(bool hasSubseq,
...
@@ -845,6 +845,10 @@ void testDegradeLayer(bool hasSubseq,
TEST
(
Layer
,
MaxLayer
)
{
TEST
(
Layer
,
MaxLayer
)
{
testDegradeLayer
(
false
,
"max"
,
"non-seq"
,
-
1
);
// seq max to non-seq
testDegradeLayer
(
false
,
"max"
,
"non-seq"
,
-
1
);
// seq max to non-seq
testDegradeLayer
(
false
,
"max"
,
"non-seq"
,
5
);
// seq max to a shorten seq, stride window = 5
testDegradeLayer
(
true
,
"max"
,
"non-seq"
,
-
1
);
// hasSubseq max to non-seq
testDegradeLayer
(
true
,
"max"
,
"non-seq"
,
-
1
);
// hasSubseq max to non-seq
testDegradeLayer
(
true
,
"max"
,
"seq"
,
-
1
);
// hasSubseq max to seq
testDegradeLayer
(
true
,
"max"
,
"seq"
,
-
1
);
// hasSubseq max to seq
}
}
...
@@ -868,6 +872,10 @@ TEST(Layer, SequenceLastInstanceLayer) {
...
@@ -868,6 +872,10 @@ TEST(Layer, SequenceLastInstanceLayer) {
TEST
(
Layer
,
AverageLayer
)
{
TEST
(
Layer
,
AverageLayer
)
{
testDegradeLayer
(
false
,
"average"
,
"non-seq"
,
-
1
);
// seq average to non-seq
testDegradeLayer
(
false
,
"average"
,
"non-seq"
,
-
1
);
// seq average to non-seq
testDegradeLayer
(
false
,
"average"
,
"non-seq"
,
5
);
// seq average to a shorten seq, stride window = 5
testDegradeLayer
(
testDegradeLayer
(
true
,
"average"
,
"non-seq"
,
-
1
);
// hasSubseq average to non-seq
true
,
"average"
,
"non-seq"
,
-
1
);
// hasSubseq average to non-seq
testDegradeLayer
(
true
,
"average"
,
"seq"
,
-
1
);
// hasSubseq average to seq
testDegradeLayer
(
true
,
"average"
,
"seq"
,
-
1
);
// hasSubseq average to seq
...
@@ -1661,6 +1669,8 @@ TEST(Layer, PadLayer) {
...
@@ -1661,6 +1669,8 @@ TEST(Layer, PadLayer) {
TEST
(
Layer
,
CrossChannelNormLayer
)
{
TEST
(
Layer
,
CrossChannelNormLayer
)
{
TestConfig
config
;
TestConfig
config
;
config
.
paramInitialMean
=
1.
;
config
.
paramInitialStd
=
0.
;
config
.
layerConfig
.
set_type
(
"norm"
);
config
.
layerConfig
.
set_type
(
"norm"
);
config
.
layerConfig
.
set_size
(
100
);
config
.
layerConfig
.
set_size
(
100
);
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
...
@@ -1674,7 +1684,7 @@ TEST(Layer, CrossChannelNormLayer) {
...
@@ -1674,7 +1684,7 @@ TEST(Layer, CrossChannelNormLayer) {
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
100
,
10
});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
100
,
10
});
for
(
auto
useGpu
:
{
false
,
true
})
{
for
(
auto
useGpu
:
{
false
,
true
})
{
testLayerGrad
(
config
,
"cross-channel-norm"
,
10
,
false
,
useGpu
,
false
,
5
);
testLayerGrad
(
config
,
"cross-channel-norm"
,
10
,
false
,
useGpu
,
false
);
}
}
}
}
...
...
paddle/parameter/Argument.cpp
浏览文件 @
9f643322
...
@@ -561,7 +561,7 @@ void Argument::degradeSequence(const Argument& input) {
...
@@ -561,7 +561,7 @@ void Argument::degradeSequence(const Argument& input) {
void
Argument
::
poolSequenceWithStride
(
const
Argument
&
input
,
void
Argument
::
poolSequenceWithStride
(
const
Argument
&
input
,
size_t
stride
,
size_t
stride
,
IVectorPtr
*
stridePostions
,
I
CpuGpu
VectorPtr
*
stridePostions
,
bool
reversed
)
{
bool
reversed
)
{
// If input.sequenceStartPositions = [0, 9, 14, 17, 30] and stride = 5,
// If input.sequenceStartPositions = [0, 9, 14, 17, 30] and stride = 5,
// then sequenceStartPositions = [0, 2, 3, 4, 7].
// then sequenceStartPositions = [0, 2, 3, 4, 7].
...
@@ -598,8 +598,8 @@ void Argument::poolSequenceWithStride(const Argument& input,
...
@@ -598,8 +598,8 @@ void Argument::poolSequenceWithStride(const Argument& input,
stridePos
.
emplace_back
(
starts
[
numSequences
]);
stridePos
.
emplace_back
(
starts
[
numSequences
]);
int
size
=
stridePos
.
size
();
int
size
=
stridePos
.
size
();
CHECK_EQ
(
size
-
1
,
tgtBuf
[
numSequences
]);
CHECK_EQ
(
size
-
1
,
tgtBuf
[
numSequences
]);
IVector
::
resizeOrCreate
(
*
stridePostions
,
size
,
false
);
I
CpuGpu
Vector
::
resizeOrCreate
(
*
stridePostions
,
size
,
false
);
(
*
stridePostions
)
->
copyFrom
(
stridePos
.
data
(),
size
);
(
*
stridePostions
)
->
getMutableVector
(
false
)
->
copyFrom
(
stridePos
.
data
(),
size
);
}
}
void
Argument
::
getValueString
(
void
Argument
::
getValueString
(
...
...
paddle/parameter/Argument.h
浏览文件 @
9f643322
...
@@ -299,7 +299,7 @@ struct Argument {
...
@@ -299,7 +299,7 @@ struct Argument {
*/
*/
void
poolSequenceWithStride
(
const
Argument
&
input
,
void
poolSequenceWithStride
(
const
Argument
&
input
,
size_t
stride
,
size_t
stride
,
IVectorPtr
*
stridePositions
,
I
CpuGpu
VectorPtr
*
stridePositions
,
bool
reversed
=
false
);
bool
reversed
=
false
);
/**
/**
* @brief getValueString will return the argument's output in string. There
* @brief getValueString will return the argument's output in string. There
...
...
paddle/parameter/tests/test_argument.cpp
浏览文件 @
9f643322
...
@@ -31,7 +31,7 @@ TEST(Argument, poolSequenceWithStride) {
...
@@ -31,7 +31,7 @@ TEST(Argument, poolSequenceWithStride) {
int
strideResultReversed
[]
=
{
0
,
4
,
9
,
14
,
17
,
20
,
25
,
30
};
int
strideResultReversed
[]
=
{
0
,
4
,
9
,
14
,
17
,
20
,
25
,
30
};
for
(
auto
reversed
:
{
false
,
true
})
{
for
(
auto
reversed
:
{
false
,
true
})
{
IVectorPtr
stridePositions
;
I
CpuGpu
VectorPtr
stridePositions
;
output
.
poolSequenceWithStride
(
output
.
poolSequenceWithStride
(
input
,
5
/* stride */
,
&
stridePositions
,
reversed
);
input
,
5
/* stride */
,
&
stridePositions
,
reversed
);
...
@@ -45,7 +45,7 @@ TEST(Argument, poolSequenceWithStride) {
...
@@ -45,7 +45,7 @@ TEST(Argument, poolSequenceWithStride) {
CHECK_EQ
(
stridePositions
->
getSize
(),
8UL
);
CHECK_EQ
(
stridePositions
->
getSize
(),
8UL
);
auto
result
=
reversed
?
strideResultReversed
:
strideResult
;
auto
result
=
reversed
?
strideResultReversed
:
strideResult
;
for
(
int
i
=
0
;
i
<
8
;
i
++
)
{
for
(
int
i
=
0
;
i
<
8
;
i
++
)
{
CHECK_EQ
(
stridePositions
->
getData
()[
i
],
result
[
i
]);
CHECK_EQ
(
stridePositions
->
getData
(
false
)[
i
],
result
[
i
]);
}
}
}
}
}
}
...
...
paddle/pserver/LightNetwork.cpp
浏览文件 @
9f643322
...
@@ -142,7 +142,7 @@ SocketServer::SocketServer(const std::string &addr, int port, int rdmaCpu)
...
@@ -142,7 +142,7 @@ SocketServer::SocketServer(const std::string &addr, int port, int rdmaCpu)
}
}
/// trigger to initialize RDMA lib
/// trigger to initialize RDMA lib
P
CHECK
(
RdmaClientDaemons
::
get
())
<<
"initilizate RDMA failed
\n
"
;
CHECK
(
RdmaClientDaemons
::
get
())
<<
"initilizate RDMA failed
\n
"
;
}
}
SocketServer
::~
SocketServer
()
{
SocketServer
::~
SocketServer
()
{
...
@@ -168,7 +168,7 @@ void SocketServer::tcpServer() {
...
@@ -168,7 +168,7 @@ void SocketServer::tcpServer() {
/// First call to socket() function
/// First call to socket() function
socket_
=
socket
(
AF_INET
,
SOCK_STREAM
,
0
);
socket_
=
socket
(
AF_INET
,
SOCK_STREAM
,
0
);
P
CHECK
(
socket_
>=
0
)
<<
"ERROR opening socket"
;
CHECK
(
socket_
>=
0
)
<<
"ERROR opening socket"
;
/// Initialize socket structure
/// Initialize socket structure
bzero
((
char
*
)
&
serv_addr
,
sizeof
(
serv_addr
));
bzero
((
char
*
)
&
serv_addr
,
sizeof
(
serv_addr
));
...
@@ -176,7 +176,7 @@ void SocketServer::tcpServer() {
...
@@ -176,7 +176,7 @@ void SocketServer::tcpServer() {
serv_addr
.
sin_port
=
htons
(
port_
);
serv_addr
.
sin_port
=
htons
(
port_
);
if
(
!
addr_
.
empty
())
{
if
(
!
addr_
.
empty
())
{
server
=
gethostbyname
(
addr_
.
c_str
());
server
=
gethostbyname
(
addr_
.
c_str
());
P
CHECK
(
server
)
<<
"ERROR, no such host: "
<<
addr_
;
CHECK
(
server
)
<<
"ERROR, no such host: "
<<
addr_
;
bcopy
((
char
*
)
server
->
h_addr
,
bcopy
((
char
*
)
server
->
h_addr
,
(
char
*
)
&
serv_addr
.
sin_addr
.
s_addr
,
(
char
*
)
&
serv_addr
.
sin_addr
.
s_addr
,
server
->
h_length
);
server
->
h_length
);
...
@@ -187,7 +187,7 @@ void SocketServer::tcpServer() {
...
@@ -187,7 +187,7 @@ void SocketServer::tcpServer() {
setOption
(
socket_
);
setOption
(
socket_
);
/// Now bind the host address using bind() call.
/// Now bind the host address using bind() call.
P
CHECK
(
bind
(
socket_
,
(
struct
sockaddr
*
)
&
serv_addr
,
sizeof
(
serv_addr
))
>=
0
)
CHECK
(
bind
(
socket_
,
(
struct
sockaddr
*
)
&
serv_addr
,
sizeof
(
serv_addr
))
>=
0
)
<<
"ERROR on binding "
<<
addr_
;
<<
"ERROR on binding "
<<
addr_
;
/// Now start listening for the clients, here process will
/// Now start listening for the clients, here process will
...
@@ -201,7 +201,7 @@ void SocketServer::tcpServer() {
...
@@ -201,7 +201,7 @@ void SocketServer::tcpServer() {
if
(
stopping_
)
{
if
(
stopping_
)
{
break
;
break
;
}
}
P
CHECK
(
newsockfd
>=
0
)
<<
"ERROR on accept"
;
CHECK
(
newsockfd
>=
0
)
<<
"ERROR on accept"
;
constexpr
int
kPeerNameLen
=
128
;
constexpr
int
kPeerNameLen
=
128
;
char
peerName
[
kPeerNameLen
];
char
peerName
[
kPeerNameLen
];
CHECK
(
inet_ntop
(
AF_INET
,
&
cli_addr
.
sin_addr
,
peerName
,
kPeerNameLen
));
CHECK
(
inet_ntop
(
AF_INET
,
&
cli_addr
.
sin_addr
,
peerName
,
kPeerNameLen
));
...
@@ -227,14 +227,14 @@ void SocketServer::rdmaServer() {
...
@@ -227,14 +227,14 @@ void SocketServer::rdmaServer() {
/// First call to socket() function
/// First call to socket() function
rdmaSocket_
=
rdma
::
ssocket
(
rdmaCpu_
);
rdmaSocket_
=
rdma
::
ssocket
(
rdmaCpu_
);
P
CHECK
(
rdmaSocket_
)
<<
"ERROR opening RDMA socket"
;
CHECK
(
rdmaSocket_
)
<<
"ERROR opening RDMA socket"
;
P
CHECK
(
rdma
::
bind
(
rdmaSocket_
,
rdmaUri_
.
c_str
())
==
0
)
CHECK
(
rdma
::
bind
(
rdmaSocket_
,
rdmaUri_
.
c_str
())
==
0
)
<<
"ERROR bind RDMA socket"
;
<<
"ERROR bind RDMA socket"
;
/// Now start listening for the clients, here process will
/// Now start listening for the clients, here process will
/// go in sleep mode and will wait for the incoming connection
/// go in sleep mode and will wait for the incoming connection
P
CHECK
(
rdma
::
listen
(
rdmaSocket_
)
==
0
)
<<
"ERROR listen RDMA socket"
;
CHECK
(
rdma
::
listen
(
rdmaSocket_
)
==
0
)
<<
"ERROR listen RDMA socket"
;
while
(
true
)
{
while
(
true
)
{
/// Accept actual connection from the client
/// Accept actual connection from the client
...
@@ -242,7 +242,7 @@ void SocketServer::rdmaServer() {
...
@@ -242,7 +242,7 @@ void SocketServer::rdmaServer() {
if
(
stopping_
)
{
if
(
stopping_
)
{
break
;
break
;
}
}
P
CHECK
(
newsock
)
<<
"ERROR on accept"
;
CHECK
(
newsock
)
<<
"ERROR on accept"
;
constexpr
int
kPeerNameLen
=
128
;
constexpr
int
kPeerNameLen
=
128
;
char
peerName
[
kPeerNameLen
];
char
peerName
[
kPeerNameLen
];
...
@@ -290,7 +290,7 @@ RdmaClientDaemons::RdmaClientDaemons() {
...
@@ -290,7 +290,7 @@ RdmaClientDaemons::RdmaClientDaemons() {
onlineCpus_
=
rdma
::
numCpus
();
onlineCpus_
=
rdma
::
numCpus
();
for
(
auto
i
=
0
;
i
<
onlineCpus_
;
i
++
)
{
for
(
auto
i
=
0
;
i
<
onlineCpus_
;
i
++
)
{
socket
=
rdma
::
csocket
(
i
);
socket
=
rdma
::
csocket
(
i
);
P
CHECK
(
socket
)
<<
"ERROR open client socket daemon"
;
CHECK
(
socket
)
<<
"ERROR open client socket daemon"
;
rdmaClientSocket_
.
push_back
(
socket
);
rdmaClientSocket_
.
push_back
(
socket
);
}
}
...
@@ -355,7 +355,7 @@ void SocketClient::TcpClient(const std::string &serverAddr, int serverPort) {
...
@@ -355,7 +355,7 @@ void SocketClient::TcpClient(const std::string &serverAddr, int serverPort) {
/// Create a socket point
/// Create a socket point
int
sockfd
=
socket
(
AF_INET
,
SOCK_STREAM
,
0
);
int
sockfd
=
socket
(
AF_INET
,
SOCK_STREAM
,
0
);
P
CHECK
(
sockfd
>=
0
)
<<
"ERROR opening socket"
;
CHECK
(
sockfd
>=
0
)
<<
"ERROR opening socket"
;
#if defined(__OSX__) || defined(__APPLE__)
#if defined(__OSX__) || defined(__APPLE__)
server
=
getipnodebyname
(
serverAddr
.
c_str
(),
AF_INET
,
AI_DEFAULT
,
&
errRet
);
server
=
getipnodebyname
(
serverAddr
.
c_str
(),
AF_INET
,
AI_DEFAULT
,
&
errRet
);
...
@@ -396,7 +396,7 @@ void SocketClient::TcpClient(const std::string &serverAddr, int serverPort) {
...
@@ -396,7 +396,7 @@ void SocketClient::TcpClient(const std::string &serverAddr, int serverPort) {
}
}
std
::
this_thread
::
sleep_for
(
std
::
chrono
::
seconds
(
1
));
std
::
this_thread
::
sleep_for
(
std
::
chrono
::
seconds
(
1
));
}
else
{
}
else
{
P
CHECK
(
errno
!=
0
)
<<
"ERROR connecting to "
<<
serverAddr
<<
":"
CHECK
(
errno
!=
0
)
<<
"ERROR connecting to "
<<
serverAddr
<<
":"
<<
serverPort
<<
"errorno: "
<<
errno
;
<<
serverPort
<<
"errorno: "
<<
errno
;
}
}
}
while
(
errno
==
ECONNREFUSED
);
}
while
(
errno
==
ECONNREFUSED
);
...
@@ -426,7 +426,7 @@ void SocketClient::RdmaClient(const std::string &serverAddr, int serverPort) {
...
@@ -426,7 +426,7 @@ void SocketClient::RdmaClient(const std::string &serverAddr, int serverPort) {
/// connect to server with socket daemon
/// connect to server with socket daemon
sock
=
rdma
::
connect
(
socketDaemon_
,
rdmaUri
.
c_str
());
sock
=
rdma
::
connect
(
socketDaemon_
,
rdmaUri
.
c_str
());
P
CHECK
(
sock
)
<<
"ERROR connect to server"
<<
rdmaUri
;
CHECK
(
sock
)
<<
"ERROR connect to server"
<<
rdmaUri
;
std
::
vector
<
std
::
string
>
seg
;
std
::
vector
<
std
::
string
>
seg
;
str
::
split
(
rdmaUri
,
'/'
,
&
seg
);
str
::
split
(
rdmaUri
,
'/'
,
&
seg
);
...
...
paddle/pserver/SocketChannel.cpp
浏览文件 @
9f643322
...
@@ -51,7 +51,7 @@ size_t SocketChannel::read(void* buf, size_t size) {
...
@@ -51,7 +51,7 @@ size_t SocketChannel::read(void* buf, size_t size) {
else
else
len
=
rdma
::
read
(
rdmaSocket_
,
(
char
*
)
buf
+
total
,
size
-
total
);
len
=
rdma
::
read
(
rdmaSocket_
,
(
char
*
)
buf
+
total
,
size
-
total
);
P
CHECK
(
len
>=
0
)
<<
" peer="
<<
peerName_
;
CHECK
(
len
>=
0
)
<<
" peer="
<<
peerName_
;
if
(
len
<=
0
)
{
if
(
len
<=
0
)
{
return
total
;
return
total
;
}
}
...
@@ -69,7 +69,7 @@ size_t SocketChannel::write(const void* buf, size_t size) {
...
@@ -69,7 +69,7 @@ size_t SocketChannel::write(const void* buf, size_t size) {
else
else
len
=
rdma
::
write
(
rdmaSocket_
,
(
char
*
)
buf
+
total
,
size
-
total
);
len
=
rdma
::
write
(
rdmaSocket_
,
(
char
*
)
buf
+
total
,
size
-
total
);
P
CHECK
(
len
>=
0
)
<<
" peer="
<<
peerName_
;
CHECK
(
len
>=
0
)
<<
" peer="
<<
peerName_
;
if
(
len
<=
0
)
{
if
(
len
<=
0
)
{
return
total
;
return
total
;
}
}
...
@@ -98,7 +98,7 @@ static size_t readwritev(IOFunc iofunc,
...
@@ -98,7 +98,7 @@ static size_t readwritev(IOFunc iofunc,
while
(
size
<
total
)
{
while
(
size
<
total
)
{
ssize_t
len
=
ssize_t
len
=
iofunc
(
socket
,
&
iovs
[
curIov
],
std
::
min
(
iovcnt
-
curIov
,
maxiovs
));
iofunc
(
socket
,
&
iovs
[
curIov
],
std
::
min
(
iovcnt
-
curIov
,
maxiovs
));
P
CHECK
(
len
>
0
)
<<
" peer="
<<
peerName
<<
" curIov="
<<
curIov
CHECK
(
len
>
0
)
<<
" peer="
<<
peerName
<<
" curIov="
<<
curIov
<<
" iovCnt="
<<
iovcnt
<<
" iovCnt="
<<
iovcnt
<<
" iovs[curIov].base="
<<
iovs
[
curIov
].
iov_base
<<
" iovs[curIov].base="
<<
iovs
[
curIov
].
iov_base
<<
" iovs[curIov].iov_len="
<<
iovs
[
curIov
].
iov_len
;
<<
" iovs[curIov].iov_len="
<<
iovs
[
curIov
].
iov_len
;
...
@@ -183,7 +183,7 @@ void SocketChannel::writeMessage(const std::vector<struct iovec>& userIovs) {
...
@@ -183,7 +183,7 @@ void SocketChannel::writeMessage(const std::vector<struct iovec>& userIovs) {
header
.
totalLength
+=
iov
.
iov_len
;
header
.
totalLength
+=
iov
.
iov_len
;
}
}
P
CHECK
(
writev
(
iovs
)
==
(
size_t
)
header
.
totalLength
);
CHECK
(
writev
(
iovs
)
==
(
size_t
)
header
.
totalLength
);
}
}
std
::
unique_ptr
<
MsgReader
>
SocketChannel
::
readMessage
()
{
std
::
unique_ptr
<
MsgReader
>
SocketChannel
::
readMessage
()
{
...
@@ -194,7 +194,7 @@ std::unique_ptr<MsgReader> SocketChannel::readMessage() {
...
@@ -194,7 +194,7 @@ std::unique_ptr<MsgReader> SocketChannel::readMessage() {
return
nullptr
;
return
nullptr
;
}
}
P
CHECK
(
len
==
sizeof
(
header
));
CHECK
(
len
==
sizeof
(
header
));
std
::
unique_ptr
<
MsgReader
>
msgReader
(
new
MsgReader
(
this
,
header
.
numIovs
));
std
::
unique_ptr
<
MsgReader
>
msgReader
(
new
MsgReader
(
this
,
header
.
numIovs
));
...
@@ -209,7 +209,7 @@ std::unique_ptr<MsgReader> SocketChannel::readMessage() {
...
@@ -209,7 +209,7 @@ std::unique_ptr<MsgReader> SocketChannel::readMessage() {
MsgReader
::
MsgReader
(
SocketChannel
*
channel
,
size_t
numBlocks
)
MsgReader
::
MsgReader
(
SocketChannel
*
channel
,
size_t
numBlocks
)
:
channel_
(
channel
),
blockLengths_
(
numBlocks
),
currentBlockIndex_
(
0
)
{
:
channel_
(
channel
),
blockLengths_
(
numBlocks
),
currentBlockIndex_
(
0
)
{
size_t
size
=
numBlocks
*
sizeof
(
blockLengths_
[
0
]);
size_t
size
=
numBlocks
*
sizeof
(
blockLengths_
[
0
]);
P
CHECK
(
channel_
->
read
(
&
blockLengths_
[
0
],
size
)
==
size
);
CHECK
(
channel_
->
read
(
&
blockLengths_
[
0
],
size
)
==
size
);
}
}
void
MsgReader
::
readBlocks
(
const
std
::
vector
<
void
*>&
bufs
)
{
void
MsgReader
::
readBlocks
(
const
std
::
vector
<
void
*>&
bufs
)
{
...
@@ -223,12 +223,12 @@ void MsgReader::readBlocks(const std::vector<void*>& bufs) {
...
@@ -223,12 +223,12 @@ void MsgReader::readBlocks(const std::vector<void*>& bufs) {
++
currentBlockIndex_
;
++
currentBlockIndex_
;
}
}
P
CHECK
(
channel_
->
readv
(
&
iovs
)
==
totalLength
);
CHECK
(
channel_
->
readv
(
&
iovs
)
==
totalLength
);
}
}
void
MsgReader
::
readNextBlock
(
void
*
buf
)
{
void
MsgReader
::
readNextBlock
(
void
*
buf
)
{
CHECK_LT
(
currentBlockIndex_
,
blockLengths_
.
size
());
CHECK_LT
(
currentBlockIndex_
,
blockLengths_
.
size
());
P
CHECK
(
channel_
->
read
(
buf
,
getNextBlockLength
())
==
getNextBlockLength
());
CHECK
(
channel_
->
read
(
buf
,
getNextBlockLength
())
==
getNextBlockLength
());
++
currentBlockIndex_
;
++
currentBlockIndex_
;
}
}
...
...
paddle/pserver/test/SocketTest.cpp
浏览文件 @
9f643322
...
@@ -113,7 +113,7 @@ void SocketServer::run() {
...
@@ -113,7 +113,7 @@ void SocketServer::run() {
/* First call to socket() function */
/* First call to socket() function */
socket_
=
socket
(
AF_INET
,
SOCK_STREAM
,
0
);
socket_
=
socket
(
AF_INET
,
SOCK_STREAM
,
0
);
P
CHECK
(
socket_
>=
0
)
<<
"ERROR opening socket"
;
CHECK
(
socket_
>=
0
)
<<
"ERROR opening socket"
;
/* Initialize socket structure */
/* Initialize socket structure */
bzero
((
char
*
)
&
serv_addr
,
sizeof
(
serv_addr
));
bzero
((
char
*
)
&
serv_addr
,
sizeof
(
serv_addr
));
...
@@ -122,7 +122,7 @@ void SocketServer::run() {
...
@@ -122,7 +122,7 @@ void SocketServer::run() {
serv_addr
.
sin_port
=
htons
(
port_
);
serv_addr
.
sin_port
=
htons
(
port_
);
/* Now bind the host address using bind() call.*/
/* Now bind the host address using bind() call.*/
P
CHECK
(
bind
(
socket_
,
(
struct
sockaddr
*
)
&
serv_addr
,
sizeof
(
serv_addr
))
>=
0
)
CHECK
(
bind
(
socket_
,
(
struct
sockaddr
*
)
&
serv_addr
,
sizeof
(
serv_addr
))
>=
0
)
<<
"ERROR on binding"
;
<<
"ERROR on binding"
;
/* Now start listening for the clients, here process will
/* Now start listening for the clients, here process will
...
@@ -134,7 +134,7 @@ void SocketServer::run() {
...
@@ -134,7 +134,7 @@ void SocketServer::run() {
while
(
true
)
{
while
(
true
)
{
/* Accept actual connection from the client */
/* Accept actual connection from the client */
newsockfd
=
accept
(
socket_
,
(
struct
sockaddr
*
)
&
cli_addr
,
&
clilen
);
newsockfd
=
accept
(
socket_
,
(
struct
sockaddr
*
)
&
cli_addr
,
&
clilen
);
P
CHECK
(
newsockfd
>=
0
)
<<
"ERROR on accept"
;
CHECK
(
newsockfd
>=
0
)
<<
"ERROR on accept"
;
SocketWorker
*
worker
=
new
SocketWorker
(
newsockfd
);
SocketWorker
*
worker
=
new
SocketWorker
(
newsockfd
);
worker
->
start
();
worker
->
start
();
...
@@ -146,17 +146,17 @@ void SocketWorker::run() {
...
@@ -146,17 +146,17 @@ void SocketWorker::run() {
while
(
true
)
{
while
(
true
)
{
int64_t
n
=
channel_
.
readAll
(
&
header
,
sizeof
(
header
));
int64_t
n
=
channel_
.
readAll
(
&
header
,
sizeof
(
header
));
P
CHECK
(
n
==
sizeof
(
header
))
<<
"ERROR reading from socket"
;
CHECK
(
n
==
sizeof
(
header
))
<<
"ERROR reading from socket"
;
buffer_
.
resize
(
header
.
dataLength
);
buffer_
.
resize
(
header
.
dataLength
);
n
=
channel_
.
readAll
(
&
buffer_
[
0
],
header
.
dataLength
);
n
=
channel_
.
readAll
(
&
buffer_
[
0
],
header
.
dataLength
);
P
CHECK
(
n
==
header
.
dataLength
)
<<
"ERROR reading from socket"
;
CHECK
(
n
==
header
.
dataLength
)
<<
"ERROR reading from socket"
;
/* Write a response to the client */
/* Write a response to the client */
n
=
channel_
.
writeAll
(
&
header
,
sizeof
(
header
));
n
=
channel_
.
writeAll
(
&
header
,
sizeof
(
header
));
P
CHECK
(
n
==
sizeof
(
header
))
<<
"ERROR reading from socket"
;
CHECK
(
n
==
sizeof
(
header
))
<<
"ERROR reading from socket"
;
n
=
channel_
.
writeAll
(
buffer_
.
data
(),
buffer_
.
size
());
n
=
channel_
.
writeAll
(
buffer_
.
data
(),
buffer_
.
size
());
P
CHECK
(
n
==
header
.
dataLength
)
<<
"ERROR writing to socket"
;
CHECK
(
n
==
header
.
dataLength
)
<<
"ERROR writing to socket"
;
}
}
}
}
...
@@ -177,9 +177,9 @@ SocketClient::SocketClient(const std::string& serverAddr, int serverPort) {
...
@@ -177,9 +177,9 @@ SocketClient::SocketClient(const std::string& serverAddr, int serverPort) {
/* Create a socket point */
/* Create a socket point */
int
sockfd
=
socket
(
AF_INET
,
SOCK_STREAM
,
0
);
int
sockfd
=
socket
(
AF_INET
,
SOCK_STREAM
,
0
);
P
CHECK
(
sockfd
>=
0
)
<<
"ERROR opening socket"
;
CHECK
(
sockfd
>=
0
)
<<
"ERROR opening socket"
;
server
=
gethostbyname
(
serverAddr
.
c_str
());
server
=
gethostbyname
(
serverAddr
.
c_str
());
P
CHECK
(
server
)
<<
"ERROR, no such host: "
<<
serverAddr
;
CHECK
(
server
)
<<
"ERROR, no such host: "
<<
serverAddr
;
bzero
((
char
*
)
&
serv_addr
,
sizeof
(
serv_addr
));
bzero
((
char
*
)
&
serv_addr
,
sizeof
(
serv_addr
));
serv_addr
.
sin_family
=
AF_INET
;
serv_addr
.
sin_family
=
AF_INET
;
...
@@ -189,7 +189,7 @@ SocketClient::SocketClient(const std::string& serverAddr, int serverPort) {
...
@@ -189,7 +189,7 @@ SocketClient::SocketClient(const std::string& serverAddr, int serverPort) {
serv_addr
.
sin_port
=
htons
(
serverPort
);
serv_addr
.
sin_port
=
htons
(
serverPort
);
/* Now connect to the server */
/* Now connect to the server */
P
CHECK
(
connect
(
sockfd
,
(
sockaddr
*
)
&
serv_addr
,
sizeof
(
serv_addr
))
>=
0
)
CHECK
(
connect
(
sockfd
,
(
sockaddr
*
)
&
serv_addr
,
sizeof
(
serv_addr
))
>=
0
)
<<
"ERROR connecting"
;
<<
"ERROR connecting"
;
channel_
.
reset
(
new
SocketChannel
(
sockfd
));
channel_
.
reset
(
new
SocketChannel
(
sockfd
));
...
@@ -234,18 +234,18 @@ int main(int argc, char** argv) {
...
@@ -234,18 +234,18 @@ int main(int argc, char** argv) {
cpuGrad
.
copyFrom
(
gpuGrad
);
cpuGrad
.
copyFrom
(
gpuGrad
);
header
.
dataLength
=
dataSize
;
header
.
dataLength
=
dataSize
;
P
CHECK
(
channel
->
writeAll
(
&
header
,
sizeof
(
header
))
==
sizeof
(
header
))
CHECK
(
channel
->
writeAll
(
&
header
,
sizeof
(
header
))
==
sizeof
(
header
))
<<
"Client write header error"
;
<<
"Client write header error"
;
P
CHECK
(
channel
->
writeAll
(
cpuGrad
.
getData
(),
dataSize
)
==
dataSize
)
CHECK
(
channel
->
writeAll
(
cpuGrad
.
getData
(),
dataSize
)
==
dataSize
)
<<
"Client write data error"
;
<<
"Client write data error"
;
/* Now read server response */
/* Now read server response */
P
CHECK
(
channel
->
readAll
(
&
header
,
sizeof
(
header
))
==
sizeof
(
header
))
CHECK
(
channel
->
readAll
(
&
header
,
sizeof
(
header
))
==
sizeof
(
header
))
<<
"Client read header error"
;
<<
"Client read header error"
;
CHECK_EQ
((
uint64_t
)
header
.
dataLength
,
dataSize
);
CHECK_EQ
((
uint64_t
)
header
.
dataLength
,
dataSize
);
P
CHECK
(
channel
->
readAll
(
cpuParam
.
getData
(),
dataSize
)
==
dataSize
)
CHECK
(
channel
->
readAll
(
cpuParam
.
getData
(),
dataSize
)
==
dataSize
)
<<
"Client read data error"
;
<<
"Client read data error"
;
gpuParam
.
copyFrom
(
cpuParam
);
gpuParam
.
copyFrom
(
cpuParam
);
...
...
paddle/scripts/docker/build.sh
浏览文件 @
9f643322
...
@@ -78,7 +78,7 @@ paddle version
...
@@ -78,7 +78,7 @@ paddle version
# PaddlePaddle. This awkwardness is due to
# PaddlePaddle. This awkwardness is due to
# https://github.com/PaddlePaddle/Paddle/issues/1854. It also
# https://github.com/PaddlePaddle/Paddle/issues/1854. It also
# describes a solution.
# describes a solution.
if
[
${
WITH_DOC
}
==
"ON"
]
;
then
if
[
[
${
WITH_DOC
}
==
"ON"
]
]
;
then
cat
<<
EOF
cat
<<
EOF
========================================
========================================
Building documentation ...
Building documentation ...
...
...
paddle/trainer/Tester.cpp
浏览文件 @
9f643322
...
@@ -175,7 +175,7 @@ real Tester::forwardOneBatch(const DataBatch& dataBatch,
...
@@ -175,7 +175,7 @@ real Tester::forwardOneBatch(const DataBatch& dataBatch,
}
}
hl_stream_synchronize
(
HPPL_STREAM_DEFAULT
);
hl_stream_synchronize
(
HPPL_STREAM_DEFAULT
);
FILE
*
fp
=
fopen
(
featFile
.
c_str
(),
"ab+"
);
FILE
*
fp
=
fopen
(
featFile
.
c_str
(),
"ab+"
);
P
CHECK
(
!
ferror
(
fp
))
<<
"Fail to open "
<<
featFile
;
CHECK
(
!
ferror
(
fp
))
<<
"Fail to open "
<<
featFile
;
size_t
sampleNum
=
featMatrices
[
0
]
->
getHeight
();
size_t
sampleNum
=
featMatrices
[
0
]
->
getHeight
();
for
(
size_t
i
=
0
;
i
<
sampleNum
;
++
i
)
{
for
(
size_t
i
=
0
;
i
<
sampleNum
;
++
i
)
{
...
...
paddle/utils/ThreadLocal.h
浏览文件 @
9f643322
...
@@ -51,7 +51,7 @@ template <class T>
...
@@ -51,7 +51,7 @@ template <class T>
class
ThreadLocal
{
class
ThreadLocal
{
public:
public:
ThreadLocal
()
{
ThreadLocal
()
{
P
CHECK
(
pthread_key_create
(
&
threadSpecificKey_
,
dataDestructor
)
==
0
);
CHECK
(
pthread_key_create
(
&
threadSpecificKey_
,
dataDestructor
)
==
0
);
}
}
~
ThreadLocal
()
{
pthread_key_delete
(
threadSpecificKey_
);
}
~
ThreadLocal
()
{
pthread_key_delete
(
threadSpecificKey_
);
}
...
@@ -65,7 +65,7 @@ public:
...
@@ -65,7 +65,7 @@ public:
if
(
!
p
&&
createLocal
)
{
if
(
!
p
&&
createLocal
)
{
p
=
new
T
();
p
=
new
T
();
int
ret
=
pthread_setspecific
(
threadSpecificKey_
,
p
);
int
ret
=
pthread_setspecific
(
threadSpecificKey_
,
p
);
P
CHECK
(
ret
==
0
);
CHECK
(
ret
==
0
);
}
}
return
p
;
return
p
;
}
}
...
@@ -79,7 +79,7 @@ public:
...
@@ -79,7 +79,7 @@ public:
if
(
T
*
q
=
get
(
false
))
{
if
(
T
*
q
=
get
(
false
))
{
dataDestructor
(
q
);
dataDestructor
(
q
);
}
}
P
CHECK
(
pthread_setspecific
(
threadSpecificKey_
,
p
)
==
0
);
CHECK
(
pthread_setspecific
(
threadSpecificKey_
,
p
)
==
0
);
}
}
/**
/**
...
@@ -112,7 +112,7 @@ private:
...
@@ -112,7 +112,7 @@ private:
template
<
class
T
>
template
<
class
T
>
class
ThreadLocalD
{
class
ThreadLocalD
{
public:
public:
ThreadLocalD
()
{
P
CHECK
(
pthread_key_create
(
&
threadSpecificKey_
,
NULL
)
==
0
);
}
ThreadLocalD
()
{
CHECK
(
pthread_key_create
(
&
threadSpecificKey_
,
NULL
)
==
0
);
}
~
ThreadLocalD
()
{
~
ThreadLocalD
()
{
pthread_key_delete
(
threadSpecificKey_
);
pthread_key_delete
(
threadSpecificKey_
);
for
(
auto
t
:
threadMap_
)
{
for
(
auto
t
:
threadMap_
)
{
...
@@ -127,7 +127,7 @@ public:
...
@@ -127,7 +127,7 @@ public:
T
*
p
=
(
T
*
)
pthread_getspecific
(
threadSpecificKey_
);
T
*
p
=
(
T
*
)
pthread_getspecific
(
threadSpecificKey_
);
if
(
!
p
)
{
if
(
!
p
)
{
p
=
new
T
();
p
=
new
T
();
P
CHECK
(
pthread_setspecific
(
threadSpecificKey_
,
p
)
==
0
);
CHECK
(
pthread_setspecific
(
threadSpecificKey_
,
p
)
==
0
);
updateMap
(
p
);
updateMap
(
p
);
}
}
return
p
;
return
p
;
...
@@ -141,7 +141,7 @@ public:
...
@@ -141,7 +141,7 @@ public:
if
(
T
*
q
=
(
T
*
)
pthread_getspecific
(
threadSpecificKey_
))
{
if
(
T
*
q
=
(
T
*
)
pthread_getspecific
(
threadSpecificKey_
))
{
dataDestructor
(
q
);
dataDestructor
(
q
);
}
}
P
CHECK
(
pthread_setspecific
(
threadSpecificKey_
,
p
)
==
0
);
CHECK
(
pthread_setspecific
(
threadSpecificKey_
,
p
)
==
0
);
updateMap
(
p
);
updateMap
(
p
);
}
}
...
...
python/paddle/trainer/config_parser.py
浏览文件 @
9f643322
...
@@ -2466,10 +2466,14 @@ class MaxLayer(LayerBase):
...
@@ -2466,10 +2466,14 @@ class MaxLayer(LayerBase):
trans_type
=
'non-seq'
,
trans_type
=
'non-seq'
,
bias
=
False
,
bias
=
False
,
output_max_index
=
None
,
output_max_index
=
None
,
stride
=-
1
,
**
xargs
):
**
xargs
):
super
(
MaxLayer
,
self
).
__init__
(
name
,
'max'
,
0
,
inputs
=
inputs
,
**
xargs
)
super
(
MaxLayer
,
self
).
__init__
(
name
,
'max'
,
0
,
inputs
=
inputs
,
**
xargs
)
config_assert
(
len
(
self
.
inputs
)
==
1
,
'MaxLayer must have 1 input'
)
config_assert
(
len
(
self
.
inputs
)
==
1
,
'MaxLayer must have 1 input'
)
if
trans_type
==
'seq'
:
config_assert
(
stride
==
-
1
,
'subseq does not support stride window'
)
self
.
config
.
trans_type
=
trans_type
self
.
config
.
trans_type
=
trans_type
self
.
config
.
seq_pool_stride
=
stride
for
input_index
in
xrange
(
len
(
self
.
inputs
)):
for
input_index
in
xrange
(
len
(
self
.
inputs
)):
input_layer
=
self
.
get_input_layer
(
input_index
)
input_layer
=
self
.
get_input_layer
(
input_index
)
self
.
set_layer_size
(
input_layer
.
size
)
self
.
set_layer_size
(
input_layer
.
size
)
...
@@ -2731,11 +2735,15 @@ class AverageLayer(LayerBase):
...
@@ -2731,11 +2735,15 @@ class AverageLayer(LayerBase):
average_strategy
=
'average'
,
average_strategy
=
'average'
,
trans_type
=
'non-seq'
,
trans_type
=
'non-seq'
,
bias
=
False
,
bias
=
False
,
stride
=-
1
,
**
xargs
):
**
xargs
):
super
(
AverageLayer
,
self
).
__init__
(
super
(
AverageLayer
,
self
).
__init__
(
name
,
'average'
,
0
,
inputs
=
inputs
,
**
xargs
)
name
,
'average'
,
0
,
inputs
=
inputs
,
**
xargs
)
self
.
config
.
average_strategy
=
average_strategy
self
.
config
.
average_strategy
=
average_strategy
if
trans_type
==
'seq'
:
config_assert
(
stride
==
-
1
,
'subseq does not support stride window'
)
self
.
config
.
trans_type
=
trans_type
self
.
config
.
trans_type
=
trans_type
self
.
config
.
seq_pool_stride
=
stride
config_assert
(
len
(
inputs
)
==
1
,
'AverageLayer must have 1 input'
)
config_assert
(
len
(
inputs
)
==
1
,
'AverageLayer must have 1 input'
)
for
input_index
in
xrange
(
len
(
self
.
inputs
)):
for
input_index
in
xrange
(
len
(
self
.
inputs
)):
input_layer
=
self
.
get_input_layer
(
input_index
)
input_layer
=
self
.
get_input_layer
(
input_index
)
...
...
python/paddle/trainer_config_helpers/layers.py
浏览文件 @
9f643322
...
@@ -1246,10 +1246,19 @@ def pooling_layer(input,
...
@@ -1246,10 +1246,19 @@ def pooling_layer(input,
name
=
None
,
name
=
None
,
bias_attr
=
None
,
bias_attr
=
None
,
agg_level
=
AggregateLevel
.
TO_NO_SEQUENCE
,
agg_level
=
AggregateLevel
.
TO_NO_SEQUENCE
,
stride
=-
1
,
layer_attr
=
None
):
layer_attr
=
None
):
"""
"""
Pooling layer for sequence inputs, not used for Image.
Pooling layer for sequence inputs, not used for Image.
If stride > 0, this layer slides a window whose size is determined by stride,
and return the pooling value of the window as the output. Thus, a long sequence
will be shorten.
The parameter stride specifies the intervals at which to apply the pooling
operation. Note that for sequence with sub-sequence, the default value
of stride is -1.
The example usage is:
The example usage is:
.. code-block:: python
.. code-block:: python
...
@@ -1268,6 +1277,8 @@ def pooling_layer(input,
...
@@ -1268,6 +1277,8 @@ def pooling_layer(input,
:param pooling_type: Type of pooling, MaxPooling(default), AvgPooling,
:param pooling_type: Type of pooling, MaxPooling(default), AvgPooling,
SumPooling, SquareRootNPooling.
SumPooling, SquareRootNPooling.
:type pooling_type: BasePoolingType|None
:type pooling_type: BasePoolingType|None
:param stride: The step size between successive pooling regions.
:type stride: Int
:param bias_attr: Bias parameter attribute. False if no bias.
:param bias_attr: Bias parameter attribute. False if no bias.
:type bias_attr: ParameterAttribute|None|False
:type bias_attr: ParameterAttribute|None|False
:param layer_attr: The Extra Attributes for layer, such as dropout.
:param layer_attr: The Extra Attributes for layer, such as dropout.
...
@@ -1285,12 +1296,16 @@ def pooling_layer(input,
...
@@ -1285,12 +1296,16 @@ def pooling_layer(input,
extra_dict
[
'output_max_index'
]
=
pooling_type
.
output_max_index
extra_dict
[
'output_max_index'
]
=
pooling_type
.
output_max_index
extra_dict
.
update
(
ExtraLayerAttribute
.
to_kwargs
(
layer_attr
))
extra_dict
.
update
(
ExtraLayerAttribute
.
to_kwargs
(
layer_attr
))
if
agg_level
==
AggregateLevel
.
TO_SEQUENCE
:
assert
stride
==
-
1
Layer
(
Layer
(
name
=
name
,
name
=
name
,
type
=
pooling_type
.
name
,
type
=
pooling_type
.
name
,
inputs
=
[
Input
(
input
.
name
)],
inputs
=
[
Input
(
input
.
name
)],
bias
=
ParamAttr
.
to_bias
(
bias_attr
),
bias
=
ParamAttr
.
to_bias
(
bias_attr
),
trans_type
=
agg_level
,
trans_type
=
agg_level
,
stride
=
stride
,
**
extra_dict
)
**
extra_dict
)
return
LayerOutput
(
return
LayerOutput
(
...
@@ -1552,7 +1567,7 @@ def last_seq(input,
...
@@ -1552,7 +1567,7 @@ def last_seq(input,
:type name: basestring
:type name: basestring
:param input: Input layer name.
:param input: Input layer name.
:type input: LayerOutput
:type input: LayerOutput
:param stride:
window size
.
:param stride:
The step size between successive pooling regions
.
:type stride: Int
:type stride: Int
:param layer_attr: extra layer attributes.
:param layer_attr: extra layer attributes.
:type layer_attr: ExtraLayerAttribute.
:type layer_attr: ExtraLayerAttribute.
...
@@ -1608,7 +1623,7 @@ def first_seq(input,
...
@@ -1608,7 +1623,7 @@ def first_seq(input,
:type name: basestring
:type name: basestring
:param input: Input layer name.
:param input: Input layer name.
:type input: LayerOutput
:type input: LayerOutput
:param stride:
window size
.
:param stride:
The step size between successive pooling regions
.
:type stride: Int
:type stride: Int
:param layer_attr: extra layer attributes.
:param layer_attr: extra layer attributes.
:type layer_attr: ExtraLayerAttribute.
:type layer_attr: ExtraLayerAttribute.
...
...
python/paddle/trainer_config_helpers/tests/configs/protostr/test_sequence_pooling.protostr
浏览文件 @
9f643322
...
@@ -14,6 +14,7 @@ layers {
...
@@ -14,6 +14,7 @@ layers {
input_layer_name: "dat_in"
input_layer_name: "dat_in"
}
}
trans_type: "seq"
trans_type: "seq"
seq_pool_stride: -1
}
}
layers {
layers {
name: "__seq_pooling_1__"
name: "__seq_pooling_1__"
...
@@ -24,6 +25,7 @@ layers {
...
@@ -24,6 +25,7 @@ layers {
input_layer_name: "dat_in"
input_layer_name: "dat_in"
}
}
trans_type: "non-seq"
trans_type: "non-seq"
seq_pool_stride: -1
}
}
layers {
layers {
name: "__seq_pooling_2__"
name: "__seq_pooling_2__"
...
@@ -35,6 +37,7 @@ layers {
...
@@ -35,6 +37,7 @@ layers {
}
}
average_strategy: "average"
average_strategy: "average"
trans_type: "seq"
trans_type: "seq"
seq_pool_stride: -1
}
}
layers {
layers {
name: "__seq_pooling_3__"
name: "__seq_pooling_3__"
...
@@ -46,6 +49,7 @@ layers {
...
@@ -46,6 +49,7 @@ layers {
}
}
average_strategy: "average"
average_strategy: "average"
trans_type: "non-seq"
trans_type: "non-seq"
seq_pool_stride: -1
}
}
layers {
layers {
name: "__seq_pooling_4__"
name: "__seq_pooling_4__"
...
@@ -57,6 +61,7 @@ layers {
...
@@ -57,6 +61,7 @@ layers {
}
}
average_strategy: "sum"
average_strategy: "sum"
trans_type: "seq"
trans_type: "seq"
seq_pool_stride: -1
}
}
layers {
layers {
name: "__seq_pooling_5__"
name: "__seq_pooling_5__"
...
@@ -68,6 +73,7 @@ layers {
...
@@ -68,6 +73,7 @@ layers {
}
}
average_strategy: "sum"
average_strategy: "sum"
trans_type: "non-seq"
trans_type: "non-seq"
seq_pool_stride: -1
}
}
layers {
layers {
name: "__seq_pooling_6__"
name: "__seq_pooling_6__"
...
@@ -77,8 +83,44 @@ layers {
...
@@ -77,8 +83,44 @@ layers {
inputs {
inputs {
input_layer_name: "dat_in"
input_layer_name: "dat_in"
}
}
trans_type: "non-seq"
seq_pool_stride: 5
}
layers {
name: "__seq_pooling_7__"
type: "average"
size: 100
active_type: ""
inputs {
input_layer_name: "dat_in"
}
average_strategy: "average"
trans_type: "non-seq"
seq_pool_stride: 5
}
layers {
name: "__seq_pooling_8__"
type: "average"
size: 100
active_type: ""
inputs {
input_layer_name: "dat_in"
}
average_strategy: "sum"
trans_type: "non-seq"
seq_pool_stride: 5
}
layers {
name: "__seq_pooling_9__"
type: "max"
size: 100
active_type: ""
inputs {
input_layer_name: "dat_in"
}
output_max_index: true
output_max_index: true
trans_type: "non-seq"
trans_type: "non-seq"
seq_pool_stride: -1
}
}
input_layer_names: "dat_in"
input_layer_names: "dat_in"
output_layer_names: "__seq_pooling_0__"
output_layer_names: "__seq_pooling_0__"
...
@@ -88,6 +130,9 @@ output_layer_names: "__seq_pooling_3__"
...
@@ -88,6 +130,9 @@ output_layer_names: "__seq_pooling_3__"
output_layer_names: "__seq_pooling_4__"
output_layer_names: "__seq_pooling_4__"
output_layer_names: "__seq_pooling_5__"
output_layer_names: "__seq_pooling_5__"
output_layer_names: "__seq_pooling_6__"
output_layer_names: "__seq_pooling_6__"
output_layer_names: "__seq_pooling_7__"
output_layer_names: "__seq_pooling_8__"
output_layer_names: "__seq_pooling_9__"
sub_models {
sub_models {
name: "root"
name: "root"
layer_names: "dat_in"
layer_names: "dat_in"
...
@@ -98,6 +143,9 @@ sub_models {
...
@@ -98,6 +143,9 @@ sub_models {
layer_names: "__seq_pooling_4__"
layer_names: "__seq_pooling_4__"
layer_names: "__seq_pooling_5__"
layer_names: "__seq_pooling_5__"
layer_names: "__seq_pooling_6__"
layer_names: "__seq_pooling_6__"
layer_names: "__seq_pooling_7__"
layer_names: "__seq_pooling_8__"
layer_names: "__seq_pooling_9__"
input_layer_names: "dat_in"
input_layer_names: "dat_in"
output_layer_names: "__seq_pooling_0__"
output_layer_names: "__seq_pooling_0__"
output_layer_names: "__seq_pooling_1__"
output_layer_names: "__seq_pooling_1__"
...
@@ -106,6 +154,9 @@ sub_models {
...
@@ -106,6 +154,9 @@ sub_models {
output_layer_names: "__seq_pooling_4__"
output_layer_names: "__seq_pooling_4__"
output_layer_names: "__seq_pooling_5__"
output_layer_names: "__seq_pooling_5__"
output_layer_names: "__seq_pooling_6__"
output_layer_names: "__seq_pooling_6__"
output_layer_names: "__seq_pooling_7__"
output_layer_names: "__seq_pooling_8__"
output_layer_names: "__seq_pooling_9__"
is_recurrent_layer_group: false
is_recurrent_layer_group: false
}
}
python/paddle/trainer_config_helpers/tests/configs/test_sequence_pooling.py
浏览文件 @
9f643322
...
@@ -14,6 +14,14 @@ for pt in POOL_TYPE:
...
@@ -14,6 +14,14 @@ for pt in POOL_TYPE:
for
al
in
AGG_LEVEL
:
for
al
in
AGG_LEVEL
:
opts
.
append
(
pooling_layer
(
input
=
din
,
agg_level
=
al
,
pooling_type
=
pt
()))
opts
.
append
(
pooling_layer
(
input
=
din
,
agg_level
=
al
,
pooling_type
=
pt
()))
for
pt
in
POOL_TYPE
:
opts
.
append
(
pooling_layer
(
input
=
din
,
agg_level
=
AggregateLevel
.
TO_NO_SEQUENCE
,
pooling_type
=
pt
(),
stride
=
5
))
opts
.
append
(
opts
.
append
(
pooling_layer
(
pooling_layer
(
input
=
din
,
pooling_type
=
MaxPooling
(
output_max_index
=
True
)))
input
=
din
,
pooling_type
=
MaxPooling
(
output_max_index
=
True
)))
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录