From 56a722a1d01eb49bfbe5120065c615ecf1e16fe5 Mon Sep 17 00:00:00 2001 From: caoying03 Date: Mon, 10 Jul 2017 14:22:18 +0800 Subject: [PATCH] output all beam search results in layer group. --- .../RecurrentGradientMachine.cpp | 104 ++++++++++++------ .../RecurrentGradientMachine.h | 7 +- paddle/parameter/Argument.cpp | 36 +++--- paddle/parameter/Argument.h | 1 + .../paddle/trainer_config_helpers/networks.py | 13 +-- 5 files changed, 102 insertions(+), 59 deletions(-) diff --git a/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp b/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp index 41e0929959b..4cb5b8ec2db 100644 --- a/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp +++ b/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp @@ -1012,11 +1012,6 @@ void RecurrentGradientMachine::generateSequence() { /* width */ resultNum, false, /* useGpu */ false); - Matrix::resizeOrCreate(generator_.outArg.value, - /* height */ maxGenWordCount, - /* width */ 1, - false, - /* useGpu */ false); } ICpuGpuVector::resizeOrCreate(generator_.outArg.sequenceStartPositions, numSequences + 1, @@ -1026,7 +1021,7 @@ void RecurrentGradientMachine::generateSequence() { } else { oneWaySearch(numSequences); } - if (dataArgsSize_) createDataOutlink(batchMachineIdVec_); + if (dataArgsSize_) createDataOutlink(); size_t size = generator_.ids.size(); generator_.outArg.ids->resize(size); @@ -1106,6 +1101,7 @@ void RecurrentGradientMachine::oneWaySearch(size_t batchSize) { } batchMachineIdVec_.clear(); + batchMachineStartPos_.clear(); int* starts = generator_.outArg.sequenceStartPositions->getMutableData(false); starts[0] = 0; generator_.ids.clear(); @@ -1312,13 +1308,20 @@ void RecurrentGradientMachine::fillGenOutputs() { finalPaths_[i].resize(minFinalPathsSize); } - batchMachineIdVec_.clear(); generator_.ids.clear(); int* starts = generator_.outArg.sequenceStartPositions->getMutableData(false); starts[0] = 0; if (numResults > 1) { - real* probs = generator_.outArg.in->getData(); + int idsProbSaveSize = 0; + for (auto inSeq : finalPaths_) { + for (auto path : inSeq) idsProbSaveSize += path.ids.size(); + idsProbSaveSize += inSeq.size(); + } + Matrix::resizeOrCreate( + generator_.outArg.value, idsProbSaveSize, 1, false, false); real* idsProb = generator_.outArg.value->getData(); + + real* probs = generator_.outArg.in->getData(); size_t curPos = 0; for (size_t i = 0; i < finalPaths_.size(); ++i) { for (size_t j = 0; j < finalPaths_[i].size(); ++j) { @@ -1333,24 +1336,16 @@ void RecurrentGradientMachine::fillGenOutputs() { curPos += genLen; idsProb[curPos++] = -1.0; probs[i * numResults + j] = path.logProb; - - if (!j && dataArgsSize_) { - // in beam search, here only reserved the top 1 generated result - // for out_links that are not the generated word indices. - batchMachineIdVec_.insert(batchMachineIdVec_.end(), - path.machineIdVec.begin(), - path.machineIdVec.end()); - } } starts[i + 1] = generator_.ids.size(); } } else { for (size_t i = 0; i < finalPaths_.size(); ++i) { CHECK(!finalPaths_[i].empty()); - generator_.ids.insert(generator_.ids.begin(), - finalPaths_[i][0].ids.begin(), - finalPaths_[i][0].ids.end()); - starts[i + 1] = starts[i] + finalPaths_[i][0].ids.size(); + Path& path = finalPaths_[i][0]; + generator_.ids.insert( + generator_.ids.begin(), path.ids.begin(), path.ids.end()); + starts[i + 1] = starts[i] + path.ids.size(); } } } @@ -1364,25 +1359,70 @@ void RecurrentGradientMachine::copyDataOutlinkFrame(size_t machineCur) { } } -void RecurrentGradientMachine::createDataOutlink( - std::vector& machineIdVec) { - size_t seqNum = - getBeamSize() > 1UL ? finalPaths_.size() : finalPaths_[0].size(); - std::vector starts(seqNum + 1, 0); - for (size_t i = 0; i < seqNum; ++i) { - size_t seqLen = getBeamSize() > 1UL ? finalPaths_[i][0].ids.size() - : finalPaths_[0][i].ids.size(); - starts[i + 1] = starts[i] + seqLen; +void RecurrentGradientMachine::createDataOutlinkSelRowsInfo( + bool isSeq, std::vector& outArgs) { + batchMachineIdVec_.clear(); + + size_t seqIdx = 0; + for (size_t i = 0; i < finalPaths_.size(); ++i) { + for (size_t j = 0; j < finalPaths_[i].size(); ++j) { + std::vector& machineIdVec = finalPaths_[i][j].machineIdVec; + if (isSeq) { + for (size_t i = 0; i < machineIdVec.size(); ++i) { + size_t rowId = machineIdVec[i]; + int* seqPos = + outArgs[i].sequenceStartPositions->getMutableData(false); + batchMachineIdVec_.push_back(seqPos[rowId]); + } + } else { + batchMachineIdVec_.insert( + batchMachineIdVec_.end(), machineIdVec.begin(), machineIdVec.end()); + } + seqIdx++; + } + } +} + +void RecurrentGradientMachine::createDataOutlinkCopySizeInfo( + bool isSeq, std::vector& outArgs, std::vector& copySize) { + size_t totalSeqNum = std::accumulate( + finalPaths_.begin(), + finalPaths_.end(), + 0UL, + [](size_t a, const std::vector& b) { return a + b.size(); }); + copySize.resize(totalSeqNum, 1); + + batchMachineStartPos_.resize(totalSeqNum + 1, 0); + if (isSeq) { + ICpuGpuVectorPtr inputSeqStartPos = outArgs[0].sequenceStartPositions; + CHECK_EQ(inputSeqStartPos->getSize() - 1, finalPaths_.size()); + int* starts = inputSeqStartPos->getMutableData(false); + int seqId = 0; + for (int i = 0; i < finalPaths_.size(); ++i) { + for (int j = 0; j < finalPaths_[i].size(); ++j) { + copySize[seqId] = starts[i + 1] - starts[i]; + batchMachineStartPos_[seqId + 1] = + batchMachineStartPos_[seqId] + finalPaths_[i][j].ids.size(); + seqId++; + } + } } +} +void RecurrentGradientMachine::createDataOutlink() { for (size_t i = 0; i < dataArgsSize_; i++) { + bool isSeq = dataArgsFrame_[i][0].hasSeq(); + std::vector copySize; + createDataOutlinkCopySizeInfo(isSeq, dataArgsFrame_[i], copySize); + createDataOutlinkSelRowsInfo(isSeq, dataArgsFrame_[i]); + dataArgs_[i].concat(dataArgsFrame_[i], - machineIdVec, - starts, + batchMachineIdVec_, + batchMachineStartPos_, + copySize, useGpu_, HPPL_STREAM_1, PASS_TEST); - auto dataAgent = dynamic_cast(outFrameLines_[i + 1].agentLayer.get()); CHECK_NOTNULL(dataAgent); diff --git a/paddle/gserver/gradientmachines/RecurrentGradientMachine.h b/paddle/gserver/gradientmachines/RecurrentGradientMachine.h index fb3fc5877ac..bd096770b75 100644 --- a/paddle/gserver/gradientmachines/RecurrentGradientMachine.h +++ b/paddle/gserver/gradientmachines/RecurrentGradientMachine.h @@ -480,7 +480,11 @@ private: * @param machineIdVec : select a row of output matrix in each frame * that the generation process expanded. */ - void createDataOutlink(std::vector& machineIdVec); + void createDataOutlink(); + void createDataOutlinkCopySizeInfo(bool isSeq, + std::vector& outArgs, + std::vector& copySize); + void createDataOutlinkSelRowsInfo(bool isSeq, std::vector& outArgs); /* * @brief used in beam search, connect previous frame to form recurrent link @@ -543,6 +547,7 @@ private: std::vector topIds_; std::vector seqIds_; std::vector batchMachineIdVec_; + std::vector batchMachineStartPos_; std::vector> finalPaths_; std::vector minFinalPathLogProb_; BeamSearchControlCallbacks* beamSearchCtrlCallbacks_; diff --git a/paddle/parameter/Argument.cpp b/paddle/parameter/Argument.cpp index ef72b973c1a..e7522def08c 100644 --- a/paddle/parameter/Argument.cpp +++ b/paddle/parameter/Argument.cpp @@ -276,17 +276,21 @@ int32_t Argument::resizeAndCopyFrom(const Argument& src, void Argument::concat(const std::vector& args, const std::vector& selectRows, const std::vector& seqStartPos, + const std::vector& copySize, bool useGpu, hl_stream_t stream, PassType passType) { CHECK(!subSequenceStartPositions) << "undefined behavior for subsequence positions"; - size_t batchSize = selectRows.size(); + size_t batchSize = 0; + for (size_t i = 0; i < copySize.size(); ++i) + batchSize += copySize[i] * (seqStartPos[i + 1] - seqStartPos[i]); + auto copyArg = [batchSize, stream](MatrixPtr& dst, MatrixPtr src, - int startRow, - int pos, + int desStartRow, + int srcStartRow, int size, bool useGpu) { if (!src) { @@ -300,8 +304,8 @@ void Argument::concat(const std::vector& args, dst->resize(batchSize, width); } - MatrixPtr tmpMatrix = dst->subMatrix(startRow, size); - tmpMatrix->copyFrom(*src->subMatrix(pos, size), stream); + MatrixPtr tmpMatrix = dst->subMatrix(desStartRow, size); + tmpMatrix->copyFrom(*src->subMatrix(srcStartRow, size), stream); }; auto copyIds = [batchSize, stream](IVectorPtr& dst, @@ -339,24 +343,24 @@ void Argument::concat(const std::vector& args, dataId = args[0].dataId; CHECK_NE(seqStartPos.size(), 0UL); - size_t sampleNum = seqStartPos.size() - 1; - for (size_t i = 0; i < sampleNum; ++i) { + int desStartRow = 0; + for (size_t i = 0; i < copySize.size(); ++i) { int startPos = seqStartPos[i]; int endPos = seqStartPos[i + 1]; CHECK_GE(args.size(), static_cast(endPos - startPos)); for (int j = startPos; j < endPos; ++j) { const Argument& arg = args[j - startPos]; - CHECK_EQ(arg.dataId, dataId) << "Arguments in concat should have" - << " same dataId"; - const int copySize = 1; - const int rowIdx = selectRows[j]; - copyArg(in, arg.in, j, rowIdx, copySize, useGpu); - copyArg(value, arg.value, j, rowIdx, copySize, useGpu); + CHECK_EQ(arg.dataId, dataId) << "Arguments in concat should have the " + << "same dataId"; + const int srcStartRow = selectRows[j]; + copyArg(in, arg.in, desStartRow, srcStartRow, copySize[i], useGpu); + copyArg(value, arg.value, desStartRow, srcStartRow, copySize[i], useGpu); if (passType != PASS_TEST) { - copyArg(grad, arg.grad, j, rowIdx, copySize, useGpu); + copyArg(grad, arg.grad, desStartRow, srcStartRow, copySize[i], useGpu); } - copyIds(ids, arg.ids, j, rowIdx, copySize, useGpu); - copyStrs(strs, arg.strs, j, rowIdx, copySize, useGpu); + copyIds(ids, arg.ids, desStartRow, srcStartRow, copySize[i], useGpu); + copyStrs(strs, arg.strs, desStartRow, srcStartRow, copySize[i], useGpu); + desStartRow += copySize[i]; } } ICpuGpuVector::resizeOrCreate( diff --git a/paddle/parameter/Argument.h b/paddle/parameter/Argument.h index 0ccdef802e7..be871756584 100644 --- a/paddle/parameter/Argument.h +++ b/paddle/parameter/Argument.h @@ -240,6 +240,7 @@ struct Argument { void concat(const std::vector& args, const std::vector& selectRows, const std::vector& seqStartPos, + const std::vector& copySize, bool useGpu, hl_stream_t stream, PassType passType); diff --git a/python/paddle/trainer_config_helpers/networks.py b/python/paddle/trainer_config_helpers/networks.py index b77932ce5f0..c0b2ced2340 100755 --- a/python/paddle/trainer_config_helpers/networks.py +++ b/python/paddle/trainer_config_helpers/networks.py @@ -1370,14 +1370,7 @@ def simple_attention(encoded_sequence, param_attr=softmax_param_attr, name="%s_softmax" % name, bias_attr=False) - - scaled = scaling_layer( - weight=attention_weight, - input=encoded_sequence, - name='%s_scaling' % name) - - return pooling_layer( - input=scaled, pooling_type=SumPooling(), name="%s_pooling" % name) + return attention_weight def inputs(layers, *args): @@ -1395,7 +1388,7 @@ def inputs(layers, *args): if len(args) != 0: layers.extend(args) - Inputs(* [l.name for l in layers]) + Inputs(*[l.name for l in layers]) def outputs(layers, *args): @@ -1438,7 +1431,7 @@ def outputs(layers, *args): assert len(layers) > 0 if HasInputsSet(): # input already set - Outputs(* [l.name for l in layers]) + Outputs(*[l.name for l in layers]) return # just return outputs. if len(layers) != 1: -- GitLab