From d5114c60b098a3c5f778d48b70d0683b093b49db Mon Sep 17 00:00:00 2001 From: Jacek Czaja Date: Tue, 25 Sep 2018 11:00:30 +0200 Subject: [PATCH] - Reviewers suggesstions to fused_embedding_fc_lstm_op --- .../fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc | 11 ++++++----- paddle/fluid/operators/fused_embedding_fc_lstm_op.cc | 4 ---- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc b/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc index 38495125c34..af3f23cbf91 100644 --- a/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc +++ b/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc @@ -13,6 +13,7 @@ // limitations under the License. #include "paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.h" +#include #include #include "paddle/fluid/framework/lod_tensor.h" @@ -98,17 +99,17 @@ static int BuildFusion(Graph* graph, const std::string& name_scope, // Copy only gate biases values (only actual bias data, not peephole // weights) - std::vector combined_biases(n, 0.0f); - memcpy(&combined_biases[0], lstm_bias_tensor.data(), - n * sizeof(float)); + std::vector combined_biases; + combined_biases.reserve(n); + std::copy_n(lstm_bias_tensor.data(), n, + std::back_inserter(combined_biases)); if (with_fc_bias) { // Add FC-bias with LSTM-bias (into GEMM result to be) auto* fc_bias_var = scope->FindVar(fc_bias->Name()); const auto& fc_bias_tensor = fc_bias_var->Get(); for (int i = 0; i < fc_bias_tensor.numel(); i++) { - combined_biases[i] = - lstm_bias_tensor.data()[i] + fc_bias_tensor.data()[i]; + combined_biases[i] += fc_bias_tensor.data()[i]; } } diff --git a/paddle/fluid/operators/fused_embedding_fc_lstm_op.cc b/paddle/fluid/operators/fused_embedding_fc_lstm_op.cc index 3c4cc774525..0b917a40362 100644 --- a/paddle/fluid/operators/fused_embedding_fc_lstm_op.cc +++ b/paddle/fluid/operators/fused_embedding_fc_lstm_op.cc @@ -63,10 +63,6 @@ void FusedEmbeddingFCLSTMOp::InferShape( auto embeddings_dims = ctx->GetInputDim("Embeddings"); PADDLE_ENFORCE_EQ(embeddings_dims.size(), 2, "The rank of Input(Embeddings) should be 2."); - // PADDLE_ENFORCE_EQ(wx_dims[0], x_dims[1], - // "The first dimension of Input(Embeddings) " - // "should be %d.", - // x_dims[1]); auto wh_dims = ctx->GetInputDim("WeightH"); int frame_size = wh_dims[1] / 4; -- GitLab