提交 4b39f92b 编写于 作者: C caoying03

add implementation of SubNestedSequenceLayer.

上级 c0ecd5c4
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "Layer.h"
#include "paddle/math/Matrix.h"
#include "paddle/math/Vector.h"
#include "paddle/utils/Logging.h"
#include "paddle/utils/Stat.h"
namespace paddle {
class SubNestedSequenceLayer : public Layer {
public:
explicit SubNestedSequenceLayer(const LayerConfig& config) : Layer(config) {}
bool init(const LayerMap& layerMap,
const ParameterMap& parameterMap) override;
void forward(PassType passType) override;
void backward(const UpdateCallback& callback = nullptr) override;
private:
void checkInputs(const Argument& inputSeq, const Argument& seqScores);
void calSelectedCols(const Argument& scores,
const int* subSeqStartPos,
size_t topK);
void partialSortIndex(const std::vector<real>& values,
int k,
std::vector<size_t>& indices);
void buildOutputSeqInfo();
std::vector<int> outSeqStartInfo_;
std::vector<int> outSubSeqStartInfo_;
MatrixPtr scoreOverInputSeq_;
// rowIdx_ and selectedRows_ actually share a same memory.
IVectorPtr rowIndice_;
std::vector<int> selectedRows_;
};
REGISTER_LAYER(sub_nested_seq, SubNestedSequenceLayer);
bool SubNestedSequenceLayer::init(const LayerMap& layerMap,
const ParameterMap& parameterMap) {
/* Initialize the basic parent class */
Layer::init(layerMap, parameterMap);
CHECK_EQ(2U, inputLayers_.size());
setNeedSequenceInfo(false);
return true;
}
void SubNestedSequenceLayer::checkInputs(const Argument& inputSeq,
const Argument& seqScores) {
CHECK(inputSeq.hasSubseq()) << "The first input of SubNestSequence layer "
<< "must be a nested sequence.";
CHECK(seqScores.hasSeq())
<< "The second input of SubNestSequence layer must be a sequence.";
CHECK_EQ(seqScores.value->getWidth(), 1U)
<< "The second input of SubNestedSequenceLayer is scores "
<< "over each sequence in a nested sequence, "
<< "so its size should be 1.";
CHECK_EQ(inputSeq.getNumSubSequences(), seqScores.value->getHeight())
<< "The second input of SubNestedSequenceLayer is scores "
<< "over each sequence in a nested sequence, so its height should be "
<< "equal to number of sequence in the first input.";
}
void SubNestedSequenceLayer::partialSortIndex(const std::vector<real>& values,
int k,
std::vector<size_t>& indices) {
CHECK_GE(values.size(), k);
indices.resize(values.size(), 0);
std::iota(begin(indices), end(indices), 0U);
std::partial_sort(begin(indices),
begin(indices) + k,
end(indices),
[&](size_t a, size_t b) { return values[a] > values[b]; });
}
void SubNestedSequenceLayer::calSelectedCols(const Argument& scores,
const int* subSeqStartPos,
size_t topK) {
selectedRows_.clear();
outSubSeqStartInfo_.resize(1, 0);
outSeqStartInfo_.resize(1, 0);
real* seqScores = nullptr;
if (useGpu_) {
Matrix::resizeOrCreate(scoreOverInputSeq_,
scores.value->getHeight(),
scores.value->getWidth(),
false /* trans */,
false /* useGpu */);
scoreOverInputSeq_->copyFrom(*scores.value);
seqScores = scoreOverInputSeq_->getData();
} else {
seqScores = scores.value->getData();
}
int* scoreSeqStartPos = scores.sequenceStartPositions->getMutableData(false);
for (int i = 0; i < scores.getNumSequences(); ++i) {
int seqLen = scoreSeqStartPos[i + 1] - scoreSeqStartPos[i];
int selectedSeqNum = std::min(static_cast<int>(config_.top_k()), seqLen);
std::vector<size_t> sortedIdx;
partialSortIndex(std::vector<real>(seqScores + scoreSeqStartPos[i],
seqScores + scoreSeqStartPos[i + 1]),
selectedSeqNum,
sortedIdx);
for (int j = 0; j < selectedSeqNum; ++j) {
int begPos = subSeqStartPos[scoreSeqStartPos[i] + sortedIdx[j]];
int endPos = subSeqStartPos[scoreSeqStartPos[i] + sortedIdx[j] + 1];
for (int m = begPos; m < endPos; ++m) selectedRows_.push_back(m);
outSubSeqStartInfo_.push_back(outSubSeqStartInfo_.back() + endPos -
begPos);
}
outSeqStartInfo_.push_back(outSubSeqStartInfo_.back());
}
}
void SubNestedSequenceLayer::buildOutputSeqInfo() {
Argument& output = getOutput();
ICpuGpuVector::resizeOrCreate(
output.sequenceStartPositions, outSeqStartInfo_.size(), false);
output.sequenceStartPositions->copyFrom(
outSeqStartInfo_.data(), outSeqStartInfo_.size(), false);
ICpuGpuVector::resizeOrCreate(
output.subSequenceStartPositions, outSubSeqStartInfo_.size(), false);
output.subSequenceStartPositions->copyFrom(
outSubSeqStartInfo_.data(), outSubSeqStartInfo_.size(), false);
}
void SubNestedSequenceLayer::forward(PassType passType) {
Layer::forward(passType);
const Argument& inputSeq = getInput(0);
const Argument& seqScores = getInput(1);
checkInputs(inputSeq, seqScores);
calSelectedCols(seqScores,
inputSeq.subSequenceStartPositions->getMutableData(false),
config_.top_k());
resetOutput(selectedRows_.size(), getSize());
buildOutputSeqInfo();
if (useGpu_) {
rowIndice_ = IVector::create(selectedRows_.size(), useGpu_);
rowIndice_->copyFrom(selectedRows_.data(), selectedRows_.size());
} else {
rowIndice_ =
IVector::create(selectedRows_.data(), selectedRows_.size(), useGpu_);
}
getOutputValue()->selectRows(*getInputValue(0), *rowIndice_);
}
void SubNestedSequenceLayer::backward(const UpdateCallback& callback) {
MatrixPtr inputGrad1 = getInputGrad(0);
MatrixPtr outputGrad = getOutputGrad();
if (inputGrad1) outputGrad->addToRows(*inputGrad1, *rowIndice_);
}
} // namespace paddle
......@@ -400,7 +400,6 @@ void initDataLayer(TestConfig testConf,
const std::vector<int>& labelSeqStartPositions =
testConf.inputDefs[i].labelSeqStartPositions;
if (labelSeqStartPositions.size() != 0) {
CHECK(!sequenceStartPositions);
CHECK_GE(static_cast<int>(labelSeqStartPositions.size()), 2);
sequenceStartPositions =
......@@ -410,6 +409,19 @@ void initDataLayer(TestConfig testConf,
useGpu);
data.sequenceStartPositions = sequenceStartPositions;
}
const std::vector<int>& labelSubSeqStartPositions =
testConf.inputDefs[i].labelSubSeqStartPositions;
if (labelSubSeqStartPositions.size() != 0) {
CHECK_GE(static_cast<int>(labelSubSeqStartPositions.size()), 2);
subSequenceStartPositions =
ICpuGpuVector::create(labelSubSeqStartPositions.size(), useGpu);
subSequenceStartPositions->copyFrom(labelSubSeqStartPositions.data(),
labelSubSeqStartPositions.size(),
useGpu);
data.subSequenceStartPositions = subSequenceStartPositions;
}
break;
}
default:
......
......@@ -67,6 +67,7 @@ struct InputDef {
bool isStatic;
std::vector<int> labelInitValue;
std::vector<int> labelSeqStartPositions;
std::vector<int> labelSubSeqStartPositions;
MatrixPtr selfDefinedData;
InputDef(InputType type, string nameIn, size_t dimIn, size_t sizeIn) {
......@@ -81,8 +82,10 @@ struct InputDef {
InputDef(InputType type,
string nameIn,
MatrixPtr selfDefinedData,
std::vector<int> selfDefinedSeqStartPos = {})
std::vector<int> selfDefinedSeqStartPos = {},
std::vector<int> selfDefinedSubSeqStartPos = {})
: labelSeqStartPositions(selfDefinedSeqStartPos),
labelSubSeqStartPositions(selfDefinedSubSeqStartPos),
selfDefinedData(selfDefinedData) {
inputType = type;
name = nameIn;
......
......@@ -920,14 +920,15 @@ TEST(Layer, SequenceLastInstanceLayer) {
}
TEST(Layer, AverageLayer) {
testDegradeLayer(false, "average", "non-seq", -1); // seq average to non-seq
testDegradeLayer(false,
"average",
"non-seq",
5); // seq average to a shorten seq, stride window = 5
testDegradeLayer(
true, "average", "non-seq", -1); // hasSubseq average to non-seq
testDegradeLayer(true, "average", "seq", -1); // hasSubseq average to seq
testDegradeLayer(false, "average", "non-seq", -1); // seq average to
non - seq testDegradeLayer(
false,
"average",
"non-seq",
5); // seq average to a shorten seq, stride window = 5
testDegradeLayer(true, "average", "non-seq", -1); // hasSubseq average to
non - seq testDegradeLayer(
true, "average", "seq", -1); // hasSubseq average to seq
}
TEST(Layer, SequenceConcatLayer) {
......@@ -1879,6 +1880,68 @@ TEST(Layer, CropLayer) {
}
}
TEST(Layer, SubNestedSequenceLayer) {
const int layerSize = 128;
TestConfig config;
config.layerConfig.set_type("sub_nested_seq");
config.layerConfig.set_top_k(2);
config.layerConfig.set_name("sub_nested_seq_layer");
config.layerConfig.set_size(layerSize);
// Generate the first input
srand((size_t)(time(NULL)));
const int batchSize = 128;
const int maxSeqLen = 100;
const int maxSubSeqNum = 50;
// sequenceStartPositioins info for the first input.
vector<int> seqStartPos1(batchSize + 1, 0);
// subSequenceStartPositioins info for the first input.
vector<int> subSeqStartPos;
subSeqStartPos.push_back(0);
// sequenceStartPositioins info for the second input.
vector<int> seqStartPos2(batchSize + 1, 0);
size_t curPos = 0;
for (int i = 1; i < batchSize + 1; ++i) {
int seqNum = uniformRandom(maxSubSeqNum);
seqStartPos2[i] = seqStartPos2[i - 1] + seqNum;
for (int j = 0; j < seqNum; ++j) {
int seqLen = uniformRandom(maxSeqLen);
subSeqStartPos.push_back(curPos + seqLen);
curPos += seqLen;
}
seqStartPos1[i] = curPos;
}
MatrixPtr dataInputPtr1 = Matrix::create(curPos, layerSize, false, false);
dataInputPtr1->randomizeUniform();
config.inputDefs.push_back({INPUT_SELF_DEFINE_DATA,
"layer_0",
dataInputPtr1,
seqStartPos1,
subSeqStartPos});
config.layerConfig.add_inputs();
// Generate the second input
MatrixPtr dataInputPtr2 =
Matrix::create(seqStartPos2[batchSize], 1, false, false);
dataInputPtr2->randomizeUniform();
config.inputDefs.push_back(
{INPUT_SELF_DEFINE_DATA, "layer_1", dataInputPtr2, seqStartPos2});
config.layerConfig.add_inputs();
for (auto useGpu : {false, true}) {
testLayerGrad(config,
"sub_nested_seq",
/* batchSize */ 100,
/* trans */ false,
/* useGpu*/ useGpu,
/* useWeight */ false);
}
}
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
initMain(argc, argv);
......
......@@ -31,33 +31,104 @@ except ImportError:
import copy
__all__ = [
'full_matrix_projection', 'AggregateLevel', 'ExpandLevel',
'identity_projection', 'dotmul_projection', 'dotmul_operator',
'repeat_layer', 'seq_reshape_layer', 'table_projection', 'mixed_layer',
'data_layer', 'embedding_layer', 'fc_layer', 'grumemory', 'pooling_layer',
'lstmemory', 'last_seq', 'first_seq', 'cos_sim', 'hsigmoid',
'conv_projection', 'mse_cost', 'regression_cost', 'classification_cost',
'LayerOutput', 'img_conv_layer', 'img_pool_layer', 'batch_norm_layer',
'img_cmrnorm_layer', 'addto_layer', 'concat_layer', 'seq_concat_layer',
'lstm_step_layer', 'recurrent_group', 'memory', 'StaticInput',
'expand_layer', 'scaling_layer', 'scaling_projection', 'power_layer',
'interpolation_layer', 'bilinear_interp_layer', 'trans_layer',
'rotate_layer', 'sum_to_one_norm_layer', 'get_output_layer', 'LayerType',
'context_projection', 'beam_search', 'maxid_layer', 'GeneratedInput',
'SubsequenceInput', 'gru_step_layer', 'gru_step_naive_layer',
'recurrent_layer', 'BaseGeneratedInput', 'conv_operator',
'conv_shift_layer', 'tensor_layer', 'selective_fc_layer',
'sampling_id_layer', 'slope_intercept_layer',
'trans_full_matrix_projection', 'linear_comb_layer', 'convex_comb_layer',
'ctc_layer', 'warp_ctc_layer', 'crf_layer', 'crf_decoding_layer',
'nce_layer', 'cross_entropy_with_selfnorm', 'cross_entropy',
'multi_binary_label_cross_entropy', 'sum_cost', 'rank_cost', 'lambda_cost',
'huber_cost', 'block_expand_layer', 'maxout_layer', 'out_prod_layer',
'printer_layer', 'print_layer', 'priorbox_layer',
'cross_channel_norm_layer', 'multibox_loss_layer', 'detection_output_layer',
'spp_layer', 'pad_layer', 'eos_layer', 'smooth_l1_cost', 'layer_support',
'multiplex_layer', 'row_conv_layer', 'dropout_layer', 'prelu_layer',
'gated_unit_layer', 'crop_layer', 'sub_nested_seq_layer'
'full_matrix_projection',
'AggregateLevel',
'ExpandLevel',
'identity_projection',
'dotmul_projection',
'dotmul_operator',
'repeat_layer',
'seq_reshape_layer',
'table_projection',
'mixed_layer',
'data_layer',
'embedding_layer',
'fc_layer',
'grumemory',
'pooling_layer',
'lstmemory',
'last_seq',
'first_seq',
'cos_sim',
'hsigmoid',
'conv_projection',
'mse_cost',
'regression_cost',
'classification_cost',
'LayerOutput',
'img_conv_layer',
'img_pool_layer',
'batch_norm_layer',
'img_cmrnorm_layer',
'addto_layer',
'concat_layer',
'seq_concat_layer',
'lstm_step_layer',
'recurrent_group',
'memory',
'StaticInput',
'expand_layer',
'scaling_layer',
'scaling_projection',
'power_layer',
'interpolation_layer',
'bilinear_interp_layer',
'trans_layer',
'rotate_layer',
'sum_to_one_norm_layer',
'get_output_layer',
'LayerType',
'context_projection',
'beam_search',
'maxid_layer',
'GeneratedInput',
'SubsequenceInput',
'gru_step_layer',
'gru_step_naive_layer',
'recurrent_layer',
'BaseGeneratedInput',
'conv_operator',
'conv_shift_layer',
'tensor_layer',
'selective_fc_layer',
'sampling_id_layer',
'slope_intercept_layer',
'trans_full_matrix_projection',
'linear_comb_layer',
'convex_comb_layer',
'ctc_layer',
'warp_ctc_layer',
'crf_layer',
'crf_decoding_layer',
'nce_layer',
'cross_entropy_with_selfnorm',
'cross_entropy',
'multi_binary_label_cross_entropy',
'sum_cost',
'rank_cost',
'lambda_cost',
'huber_cost',
'block_expand_layer',
'maxout_layer',
'out_prod_layer',
'printer_layer',
'print_layer',
'priorbox_layer',
'cross_channel_norm_layer',
'multibox_loss_layer',
'detection_output_layer',
'spp_layer',
'pad_layer',
'eos_layer',
'smooth_l1_cost',
'layer_support',
'multiplex_layer',
'row_conv_layer',
'dropout_layer',
'prelu_layer',
'gated_unit_layer',
'crop_layer',
'sub_nested_seq_layer',
]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册