diff --git a/paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc b/paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc index c82bbda57e1f727eeb95658880184065f17634ce..5cb37ea925afe60cb000499d390c9be0c398cb2e 100644 --- a/paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc +++ b/paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc @@ -217,10 +217,10 @@ void PrepareLSTMWeight(const LoDTensor& W_forget_w0, float* out_data = out->mutable_data(platform::CPUPlace()); std::array tensors( {{W_forget_w0.data(), W_input_w0.data(), - W_output_w0.data(), W_cell_w0.data()}}); + W_output_w0.data(), W_cell_w0.data()}}); std::array tensors1( {{W_forget_w1.data(), W_input_w1.data(), - W_output_w1.data(), W_cell_w1.data()}}); + W_output_w1.data(), W_cell_w1.data()}}); for (int row = 0; row < D; row++) { for (int col = 0; col < 4; col++) { @@ -244,7 +244,7 @@ void PrepareLSTMBias(const LoDTensor& B_forget, const LoDTensor& B_input, LoDTensor* out) { std::array tensors( {{B_forget.data(), B_input.data(), B_output.data(), - B_cell.data()}}); + B_cell.data()}}); PADDLE_ENFORCE_EQ(B_forget.dims().size(), 1); int D = B_forget.dims()[0];