Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
56a722a1
P
Paddle
项目概览
PaddlePaddle
/
Paddle
1 年多 前同步成功
通知
2302
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
56a722a1
编写于
7月 10, 2017
作者:
C
caoying03
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
output all beam search results in layer group.
上级
82801f24
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
102 addition
and
59 deletion
+102
-59
paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp
paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp
+72
-32
paddle/gserver/gradientmachines/RecurrentGradientMachine.h
paddle/gserver/gradientmachines/RecurrentGradientMachine.h
+6
-1
paddle/parameter/Argument.cpp
paddle/parameter/Argument.cpp
+20
-16
paddle/parameter/Argument.h
paddle/parameter/Argument.h
+1
-0
python/paddle/trainer_config_helpers/networks.py
python/paddle/trainer_config_helpers/networks.py
+3
-10
未找到文件。
paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp
浏览文件 @
56a722a1
...
@@ -1012,11 +1012,6 @@ void RecurrentGradientMachine::generateSequence() {
...
@@ -1012,11 +1012,6 @@ void RecurrentGradientMachine::generateSequence() {
/* width */
resultNum
,
/* width */
resultNum
,
false
,
false
,
/* useGpu */
false
);
/* useGpu */
false
);
Matrix
::
resizeOrCreate
(
generator_
.
outArg
.
value
,
/* height */
maxGenWordCount
,
/* width */
1
,
false
,
/* useGpu */
false
);
}
}
ICpuGpuVector
::
resizeOrCreate
(
generator_
.
outArg
.
sequenceStartPositions
,
ICpuGpuVector
::
resizeOrCreate
(
generator_
.
outArg
.
sequenceStartPositions
,
numSequences
+
1
,
numSequences
+
1
,
...
@@ -1026,7 +1021,7 @@ void RecurrentGradientMachine::generateSequence() {
...
@@ -1026,7 +1021,7 @@ void RecurrentGradientMachine::generateSequence() {
}
else
{
}
else
{
oneWaySearch
(
numSequences
);
oneWaySearch
(
numSequences
);
}
}
if
(
dataArgsSize_
)
createDataOutlink
(
batchMachineIdVec_
);
if
(
dataArgsSize_
)
createDataOutlink
();
size_t
size
=
generator_
.
ids
.
size
();
size_t
size
=
generator_
.
ids
.
size
();
generator_
.
outArg
.
ids
->
resize
(
size
);
generator_
.
outArg
.
ids
->
resize
(
size
);
...
@@ -1106,6 +1101,7 @@ void RecurrentGradientMachine::oneWaySearch(size_t batchSize) {
...
@@ -1106,6 +1101,7 @@ void RecurrentGradientMachine::oneWaySearch(size_t batchSize) {
}
}
batchMachineIdVec_
.
clear
();
batchMachineIdVec_
.
clear
();
batchMachineStartPos_
.
clear
();
int
*
starts
=
generator_
.
outArg
.
sequenceStartPositions
->
getMutableData
(
false
);
int
*
starts
=
generator_
.
outArg
.
sequenceStartPositions
->
getMutableData
(
false
);
starts
[
0
]
=
0
;
starts
[
0
]
=
0
;
generator_
.
ids
.
clear
();
generator_
.
ids
.
clear
();
...
@@ -1312,13 +1308,20 @@ void RecurrentGradientMachine::fillGenOutputs() {
...
@@ -1312,13 +1308,20 @@ void RecurrentGradientMachine::fillGenOutputs() {
finalPaths_
[
i
].
resize
(
minFinalPathsSize
);
finalPaths_
[
i
].
resize
(
minFinalPathsSize
);
}
}
batchMachineIdVec_
.
clear
();
generator_
.
ids
.
clear
();
generator_
.
ids
.
clear
();
int
*
starts
=
generator_
.
outArg
.
sequenceStartPositions
->
getMutableData
(
false
);
int
*
starts
=
generator_
.
outArg
.
sequenceStartPositions
->
getMutableData
(
false
);
starts
[
0
]
=
0
;
starts
[
0
]
=
0
;
if
(
numResults
>
1
)
{
if
(
numResults
>
1
)
{
real
*
probs
=
generator_
.
outArg
.
in
->
getData
();
int
idsProbSaveSize
=
0
;
for
(
auto
inSeq
:
finalPaths_
)
{
for
(
auto
path
:
inSeq
)
idsProbSaveSize
+=
path
.
ids
.
size
();
idsProbSaveSize
+=
inSeq
.
size
();
}
Matrix
::
resizeOrCreate
(
generator_
.
outArg
.
value
,
idsProbSaveSize
,
1
,
false
,
false
);
real
*
idsProb
=
generator_
.
outArg
.
value
->
getData
();
real
*
idsProb
=
generator_
.
outArg
.
value
->
getData
();
real
*
probs
=
generator_
.
outArg
.
in
->
getData
();
size_t
curPos
=
0
;
size_t
curPos
=
0
;
for
(
size_t
i
=
0
;
i
<
finalPaths_
.
size
();
++
i
)
{
for
(
size_t
i
=
0
;
i
<
finalPaths_
.
size
();
++
i
)
{
for
(
size_t
j
=
0
;
j
<
finalPaths_
[
i
].
size
();
++
j
)
{
for
(
size_t
j
=
0
;
j
<
finalPaths_
[
i
].
size
();
++
j
)
{
...
@@ -1333,24 +1336,16 @@ void RecurrentGradientMachine::fillGenOutputs() {
...
@@ -1333,24 +1336,16 @@ void RecurrentGradientMachine::fillGenOutputs() {
curPos
+=
genLen
;
curPos
+=
genLen
;
idsProb
[
curPos
++
]
=
-
1.0
;
idsProb
[
curPos
++
]
=
-
1.0
;
probs
[
i
*
numResults
+
j
]
=
path
.
logProb
;
probs
[
i
*
numResults
+
j
]
=
path
.
logProb
;
if
(
!
j
&&
dataArgsSize_
)
{
// in beam search, here only reserved the top 1 generated result
// for out_links that are not the generated word indices.
batchMachineIdVec_
.
insert
(
batchMachineIdVec_
.
end
(),
path
.
machineIdVec
.
begin
(),
path
.
machineIdVec
.
end
());
}
}
}
starts
[
i
+
1
]
=
generator_
.
ids
.
size
();
starts
[
i
+
1
]
=
generator_
.
ids
.
size
();
}
}
}
else
{
}
else
{
for
(
size_t
i
=
0
;
i
<
finalPaths_
.
size
();
++
i
)
{
for
(
size_t
i
=
0
;
i
<
finalPaths_
.
size
();
++
i
)
{
CHECK
(
!
finalPaths_
[
i
].
empty
());
CHECK
(
!
finalPaths_
[
i
].
empty
());
generator_
.
ids
.
insert
(
generator_
.
ids
.
begin
(),
Path
&
path
=
finalPaths_
[
i
][
0
];
finalPaths_
[
i
][
0
].
ids
.
begin
(),
generator_
.
ids
.
insert
(
finalPaths_
[
i
][
0
]
.
ids
.
end
());
generator_
.
ids
.
begin
(),
path
.
ids
.
begin
(),
path
.
ids
.
end
());
starts
[
i
+
1
]
=
starts
[
i
]
+
finalPaths_
[
i
][
0
]
.
ids
.
size
();
starts
[
i
+
1
]
=
starts
[
i
]
+
path
.
ids
.
size
();
}
}
}
}
}
}
...
@@ -1364,25 +1359,70 @@ void RecurrentGradientMachine::copyDataOutlinkFrame(size_t machineCur) {
...
@@ -1364,25 +1359,70 @@ void RecurrentGradientMachine::copyDataOutlinkFrame(size_t machineCur) {
}
}
}
}
void
RecurrentGradientMachine
::
createDataOutlink
(
void
RecurrentGradientMachine
::
createDataOutlinkSelRowsInfo
(
std
::
vector
<
int
>&
machineIdVec
)
{
bool
isSeq
,
std
::
vector
<
Argument
>&
outArgs
)
{
size_t
seqNum
=
batchMachineIdVec_
.
clear
();
getBeamSize
()
>
1UL
?
finalPaths_
.
size
()
:
finalPaths_
[
0
].
size
();
std
::
vector
<
int
>
starts
(
seqNum
+
1
,
0
);
size_t
seqIdx
=
0
;
for
(
size_t
i
=
0
;
i
<
seqNum
;
++
i
)
{
for
(
size_t
i
=
0
;
i
<
finalPaths_
.
size
();
++
i
)
{
size_t
seqLen
=
getBeamSize
()
>
1UL
?
finalPaths_
[
i
][
0
].
ids
.
size
()
for
(
size_t
j
=
0
;
j
<
finalPaths_
[
i
].
size
();
++
j
)
{
:
finalPaths_
[
0
][
i
].
ids
.
size
();
std
::
vector
<
int
>&
machineIdVec
=
finalPaths_
[
i
][
j
].
machineIdVec
;
starts
[
i
+
1
]
=
starts
[
i
]
+
seqLen
;
if
(
isSeq
)
{
for
(
size_t
i
=
0
;
i
<
machineIdVec
.
size
();
++
i
)
{
size_t
rowId
=
machineIdVec
[
i
];
int
*
seqPos
=
outArgs
[
i
].
sequenceStartPositions
->
getMutableData
(
false
);
batchMachineIdVec_
.
push_back
(
seqPos
[
rowId
]);
}
}
else
{
batchMachineIdVec_
.
insert
(
batchMachineIdVec_
.
end
(),
machineIdVec
.
begin
(),
machineIdVec
.
end
());
}
seqIdx
++
;
}
}
}
void
RecurrentGradientMachine
::
createDataOutlinkCopySizeInfo
(
bool
isSeq
,
std
::
vector
<
Argument
>&
outArgs
,
std
::
vector
<
int
>&
copySize
)
{
size_t
totalSeqNum
=
std
::
accumulate
(
finalPaths_
.
begin
(),
finalPaths_
.
end
(),
0UL
,
[](
size_t
a
,
const
std
::
vector
<
Path
>&
b
)
{
return
a
+
b
.
size
();
});
copySize
.
resize
(
totalSeqNum
,
1
);
batchMachineStartPos_
.
resize
(
totalSeqNum
+
1
,
0
);
if
(
isSeq
)
{
ICpuGpuVectorPtr
inputSeqStartPos
=
outArgs
[
0
].
sequenceStartPositions
;
CHECK_EQ
(
inputSeqStartPos
->
getSize
()
-
1
,
finalPaths_
.
size
());
int
*
starts
=
inputSeqStartPos
->
getMutableData
(
false
);
int
seqId
=
0
;
for
(
int
i
=
0
;
i
<
finalPaths_
.
size
();
++
i
)
{
for
(
int
j
=
0
;
j
<
finalPaths_
[
i
].
size
();
++
j
)
{
copySize
[
seqId
]
=
starts
[
i
+
1
]
-
starts
[
i
];
batchMachineStartPos_
[
seqId
+
1
]
=
batchMachineStartPos_
[
seqId
]
+
finalPaths_
[
i
][
j
].
ids
.
size
();
seqId
++
;
}
}
}
}
}
void
RecurrentGradientMachine
::
createDataOutlink
()
{
for
(
size_t
i
=
0
;
i
<
dataArgsSize_
;
i
++
)
{
for
(
size_t
i
=
0
;
i
<
dataArgsSize_
;
i
++
)
{
bool
isSeq
=
dataArgsFrame_
[
i
][
0
].
hasSeq
();
std
::
vector
<
int
>
copySize
;
createDataOutlinkCopySizeInfo
(
isSeq
,
dataArgsFrame_
[
i
],
copySize
);
createDataOutlinkSelRowsInfo
(
isSeq
,
dataArgsFrame_
[
i
]);
dataArgs_
[
i
].
concat
(
dataArgsFrame_
[
i
],
dataArgs_
[
i
].
concat
(
dataArgsFrame_
[
i
],
machineIdVec
,
batchMachineIdVec_
,
starts
,
batchMachineStartPos_
,
copySize
,
useGpu_
,
useGpu_
,
HPPL_STREAM_1
,
HPPL_STREAM_1
,
PASS_TEST
);
PASS_TEST
);
auto
dataAgent
=
auto
dataAgent
=
dynamic_cast
<
DataLayer
*>
(
outFrameLines_
[
i
+
1
].
agentLayer
.
get
());
dynamic_cast
<
DataLayer
*>
(
outFrameLines_
[
i
+
1
].
agentLayer
.
get
());
CHECK_NOTNULL
(
dataAgent
);
CHECK_NOTNULL
(
dataAgent
);
...
...
paddle/gserver/gradientmachines/RecurrentGradientMachine.h
浏览文件 @
56a722a1
...
@@ -480,7 +480,11 @@ private:
...
@@ -480,7 +480,11 @@ private:
* @param machineIdVec : select a row of output matrix in each frame
* @param machineIdVec : select a row of output matrix in each frame
* that the generation process expanded.
* that the generation process expanded.
*/
*/
void
createDataOutlink
(
std
::
vector
<
int
>&
machineIdVec
);
void
createDataOutlink
();
void
createDataOutlinkCopySizeInfo
(
bool
isSeq
,
std
::
vector
<
Argument
>&
outArgs
,
std
::
vector
<
int
>&
copySize
);
void
createDataOutlinkSelRowsInfo
(
bool
isSeq
,
std
::
vector
<
Argument
>&
outArgs
);
/*
/*
* @brief used in beam search, connect previous frame to form recurrent link
* @brief used in beam search, connect previous frame to form recurrent link
...
@@ -543,6 +547,7 @@ private:
...
@@ -543,6 +547,7 @@ private:
std
::
vector
<
int
>
topIds_
;
std
::
vector
<
int
>
topIds_
;
std
::
vector
<
int
>
seqIds_
;
std
::
vector
<
int
>
seqIds_
;
std
::
vector
<
int
>
batchMachineIdVec_
;
std
::
vector
<
int
>
batchMachineIdVec_
;
std
::
vector
<
int
>
batchMachineStartPos_
;
std
::
vector
<
std
::
vector
<
Path
>>
finalPaths_
;
std
::
vector
<
std
::
vector
<
Path
>>
finalPaths_
;
std
::
vector
<
real
>
minFinalPathLogProb_
;
std
::
vector
<
real
>
minFinalPathLogProb_
;
BeamSearchControlCallbacks
*
beamSearchCtrlCallbacks_
;
BeamSearchControlCallbacks
*
beamSearchCtrlCallbacks_
;
...
...
paddle/parameter/Argument.cpp
浏览文件 @
56a722a1
...
@@ -276,17 +276,21 @@ int32_t Argument::resizeAndCopyFrom(const Argument& src,
...
@@ -276,17 +276,21 @@ int32_t Argument::resizeAndCopyFrom(const Argument& src,
void
Argument
::
concat
(
const
std
::
vector
<
Argument
>&
args
,
void
Argument
::
concat
(
const
std
::
vector
<
Argument
>&
args
,
const
std
::
vector
<
int
>&
selectRows
,
const
std
::
vector
<
int
>&
selectRows
,
const
std
::
vector
<
int
>&
seqStartPos
,
const
std
::
vector
<
int
>&
seqStartPos
,
const
std
::
vector
<
int
>&
copySize
,
bool
useGpu
,
bool
useGpu
,
hl_stream_t
stream
,
hl_stream_t
stream
,
PassType
passType
)
{
PassType
passType
)
{
CHECK
(
!
subSequenceStartPositions
)
CHECK
(
!
subSequenceStartPositions
)
<<
"undefined behavior for subsequence positions"
;
<<
"undefined behavior for subsequence positions"
;
size_t
batchSize
=
selectRows
.
size
();
size_t
batchSize
=
0
;
for
(
size_t
i
=
0
;
i
<
copySize
.
size
();
++
i
)
batchSize
+=
copySize
[
i
]
*
(
seqStartPos
[
i
+
1
]
-
seqStartPos
[
i
]);
auto
copyArg
=
[
batchSize
,
stream
](
MatrixPtr
&
dst
,
auto
copyArg
=
[
batchSize
,
stream
](
MatrixPtr
&
dst
,
MatrixPtr
src
,
MatrixPtr
src
,
int
s
tartRow
,
int
desS
tartRow
,
int
pos
,
int
srcStartRow
,
int
size
,
int
size
,
bool
useGpu
)
{
bool
useGpu
)
{
if
(
!
src
)
{
if
(
!
src
)
{
...
@@ -300,8 +304,8 @@ void Argument::concat(const std::vector<Argument>& args,
...
@@ -300,8 +304,8 @@ void Argument::concat(const std::vector<Argument>& args,
dst
->
resize
(
batchSize
,
width
);
dst
->
resize
(
batchSize
,
width
);
}
}
MatrixPtr
tmpMatrix
=
dst
->
subMatrix
(
s
tartRow
,
size
);
MatrixPtr
tmpMatrix
=
dst
->
subMatrix
(
desS
tartRow
,
size
);
tmpMatrix
->
copyFrom
(
*
src
->
subMatrix
(
pos
,
size
),
stream
);
tmpMatrix
->
copyFrom
(
*
src
->
subMatrix
(
srcStartRow
,
size
),
stream
);
};
};
auto
copyIds
=
[
batchSize
,
stream
](
IVectorPtr
&
dst
,
auto
copyIds
=
[
batchSize
,
stream
](
IVectorPtr
&
dst
,
...
@@ -339,24 +343,24 @@ void Argument::concat(const std::vector<Argument>& args,
...
@@ -339,24 +343,24 @@ void Argument::concat(const std::vector<Argument>& args,
dataId
=
args
[
0
].
dataId
;
dataId
=
args
[
0
].
dataId
;
CHECK_NE
(
seqStartPos
.
size
(),
0UL
);
CHECK_NE
(
seqStartPos
.
size
(),
0UL
);
size_t
sampleNum
=
seqStartPos
.
size
()
-
1
;
int
desStartRow
=
0
;
for
(
size_t
i
=
0
;
i
<
sampleNum
;
++
i
)
{
for
(
size_t
i
=
0
;
i
<
copySize
.
size
()
;
++
i
)
{
int
startPos
=
seqStartPos
[
i
];
int
startPos
=
seqStartPos
[
i
];
int
endPos
=
seqStartPos
[
i
+
1
];
int
endPos
=
seqStartPos
[
i
+
1
];
CHECK_GE
(
args
.
size
(),
static_cast
<
size_t
>
(
endPos
-
startPos
));
CHECK_GE
(
args
.
size
(),
static_cast
<
size_t
>
(
endPos
-
startPos
));
for
(
int
j
=
startPos
;
j
<
endPos
;
++
j
)
{
for
(
int
j
=
startPos
;
j
<
endPos
;
++
j
)
{
const
Argument
&
arg
=
args
[
j
-
startPos
];
const
Argument
&
arg
=
args
[
j
-
startPos
];
CHECK_EQ
(
arg
.
dataId
,
dataId
)
<<
"Arguments in concat should have"
CHECK_EQ
(
arg
.
dataId
,
dataId
)
<<
"Arguments in concat should have the "
<<
" same dataId"
;
<<
"same dataId"
;
const
int
copySize
=
1
;
const
int
srcStartRow
=
selectRows
[
j
];
const
int
rowIdx
=
selectRows
[
j
];
copyArg
(
in
,
arg
.
in
,
desStartRow
,
srcStartRow
,
copySize
[
i
],
useGpu
);
copyArg
(
in
,
arg
.
in
,
j
,
rowIdx
,
copySize
,
useGpu
);
copyArg
(
value
,
arg
.
value
,
desStartRow
,
srcStartRow
,
copySize
[
i
],
useGpu
);
copyArg
(
value
,
arg
.
value
,
j
,
rowIdx
,
copySize
,
useGpu
);
if
(
passType
!=
PASS_TEST
)
{
if
(
passType
!=
PASS_TEST
)
{
copyArg
(
grad
,
arg
.
grad
,
j
,
rowIdx
,
copySize
,
useGpu
);
copyArg
(
grad
,
arg
.
grad
,
desStartRow
,
srcStartRow
,
copySize
[
i
]
,
useGpu
);
}
}
copyIds
(
ids
,
arg
.
ids
,
j
,
rowIdx
,
copySize
,
useGpu
);
copyIds
(
ids
,
arg
.
ids
,
desStartRow
,
srcStartRow
,
copySize
[
i
],
useGpu
);
copyStrs
(
strs
,
arg
.
strs
,
j
,
rowIdx
,
copySize
,
useGpu
);
copyStrs
(
strs
,
arg
.
strs
,
desStartRow
,
srcStartRow
,
copySize
[
i
],
useGpu
);
desStartRow
+=
copySize
[
i
];
}
}
}
}
ICpuGpuVector
::
resizeOrCreate
(
ICpuGpuVector
::
resizeOrCreate
(
...
...
paddle/parameter/Argument.h
浏览文件 @
56a722a1
...
@@ -240,6 +240,7 @@ struct Argument {
...
@@ -240,6 +240,7 @@ struct Argument {
void
concat
(
const
std
::
vector
<
Argument
>&
args
,
void
concat
(
const
std
::
vector
<
Argument
>&
args
,
const
std
::
vector
<
int
>&
selectRows
,
const
std
::
vector
<
int
>&
selectRows
,
const
std
::
vector
<
int
>&
seqStartPos
,
const
std
::
vector
<
int
>&
seqStartPos
,
const
std
::
vector
<
int
>&
copySize
,
bool
useGpu
,
bool
useGpu
,
hl_stream_t
stream
,
hl_stream_t
stream
,
PassType
passType
);
PassType
passType
);
...
...
python/paddle/trainer_config_helpers/networks.py
浏览文件 @
56a722a1
...
@@ -1370,14 +1370,7 @@ def simple_attention(encoded_sequence,
...
@@ -1370,14 +1370,7 @@ def simple_attention(encoded_sequence,
param_attr
=
softmax_param_attr
,
param_attr
=
softmax_param_attr
,
name
=
"%s_softmax"
%
name
,
name
=
"%s_softmax"
%
name
,
bias_attr
=
False
)
bias_attr
=
False
)
return
attention_weight
scaled
=
scaling_layer
(
weight
=
attention_weight
,
input
=
encoded_sequence
,
name
=
'%s_scaling'
%
name
)
return
pooling_layer
(
input
=
scaled
,
pooling_type
=
SumPooling
(),
name
=
"%s_pooling"
%
name
)
def
inputs
(
layers
,
*
args
):
def
inputs
(
layers
,
*
args
):
...
@@ -1395,7 +1388,7 @@ def inputs(layers, *args):
...
@@ -1395,7 +1388,7 @@ def inputs(layers, *args):
if
len
(
args
)
!=
0
:
if
len
(
args
)
!=
0
:
layers
.
extend
(
args
)
layers
.
extend
(
args
)
Inputs
(
*
[
l
.
name
for
l
in
layers
])
Inputs
(
*
[
l
.
name
for
l
in
layers
])
def
outputs
(
layers
,
*
args
):
def
outputs
(
layers
,
*
args
):
...
@@ -1438,7 +1431,7 @@ def outputs(layers, *args):
...
@@ -1438,7 +1431,7 @@ def outputs(layers, *args):
assert
len
(
layers
)
>
0
assert
len
(
layers
)
>
0
if
HasInputsSet
():
# input already set
if
HasInputsSet
():
# input already set
Outputs
(
*
[
l
.
name
for
l
in
layers
])
Outputs
(
*
[
l
.
name
for
l
in
layers
])
return
# just return outputs.
return
# just return outputs.
if
len
(
layers
)
!=
1
:
if
len
(
layers
)
!=
1
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录