diff --git a/paddle/function/RowConvOp.cpp b/paddle/function/RowConvOp.cpp index c3abb64971fd665324409c3adca44a7a5f827fd1..b6501e8f4db7fd33891cd80e07a6f36dd0b34532 100644 --- a/paddle/function/RowConvOp.cpp +++ b/paddle/function/RowConvOp.cpp @@ -136,6 +136,7 @@ public: // check CHECK_EQ(2UL, inputs.size()); CHECK_EQ(1UL, outputs.size()); + // TODO(qingqing): support ASSIGN_TO. CHECK_EQ(outputs[0].getArgType(), ADD_TO); CHECK(inputs[0].isSequenceArg() && outputs[0].isSequenceArg()) << "SequenceArg required here."; @@ -144,9 +145,7 @@ public: auto w = inputs[1]; CHECK(in.data() && out.data() && in.getSequenceId().data()); CHECK_EQ(in.shape().ndims(), 2UL); - CHECK_EQ(out.shape().ndims(), 2UL); - CHECK_EQ(in.shape()[1], out.shape()[1]); - CHECK_EQ(in.shape()[0], out.shape()[0]); + CHECK(in.shape() == out.shape()); CHECK_EQ(w.shape()[1], in.shape()[1]); auto outMat = out.matrix(); @@ -176,6 +175,7 @@ public: template class RowConvGradFunc : public FunctionBase { + // TODO(qingqing): split into RowConvDataFunc and RowConvWeightFunc public: void init(const FuncConfig& config) override {} @@ -196,9 +196,8 @@ public: auto wGrad = outputs[1]; CHECK_EQ(in.shape().ndims(), 2UL); - CHECK_EQ(outGrad.shape().ndims(), 2UL); - CHECK_EQ(in.shape()[1], outGrad.shape()[1]); - CHECK_EQ(in.shape()[0], outGrad.shape()[0]); + CHECK(in.shape() == inGrad.shape()); + CHECK(in.shape() == outGrad.shape()); CHECK_EQ(wGrad.shape()[1], in.shape()[1]); const auto outGMat = outGrad.matrix(); diff --git a/paddle/gserver/layers/RowConvLayer.cpp b/paddle/gserver/layers/RowConvLayer.cpp index 5302e0e1a8f5489b65ae93161b26551237950d99..54d77999ad5b30a8d9f4feaa02d81417957544a7 100644 --- a/paddle/gserver/layers/RowConvLayer.cpp +++ b/paddle/gserver/layers/RowConvLayer.cpp @@ -43,13 +43,14 @@ void RowConvLayer::forward(PassType passType) { resetOutput(height, width); const auto startPos = getInput(0).sequenceStartPositions->getVector(useGpu_); - wDims_ = TensorShape({contexLength_, width}); + MatrixPtr w = weight_->getW(); + wDims_ = TensorShape({w->getHeight(), w->getWidth()}); MatrixPtr outV = getOutputValue(); BufferArgs inputs; BufferArgs outputs; inputs.addArg(*getInputValue(0), *startPos); - inputs.addArg(*weight_->getW(), wDims_); + inputs.addArg(*w, wDims_); outputs.addArg(*getOutputValue(), *startPos, ADD_TO); { diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 1fd62cda508bc53b09067e70671cfa733f0818df..1c3d776a4f36870ba1fb1128da863dffc29debd9 100755 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -191,6 +191,14 @@ class LayerType(object): PAD_LAYER = "pad" MULTIPLEX_LAYER = "multiplex" ROW_CONV_LAYER = "row_conv" + + PRINT_LAYER = 'print' + PRIORBOX_LAYER = 'priorbox' + + CTC_LAYER = 'ctc' + WARP_CTC_LAYER = 'warp_ctc' + CRF_LAYER = 'crf' + CRF_DECODING_LAYER = 'crf_decoding' NCE_LAYER = 'nce' RANK_COST = 'rank-cost'