diff --git a/paddle/fluid/operators/array_to_lod_tensor_op.cc b/paddle/fluid/operators/array_to_lod_tensor_op.cc index c81945d47385bf283e87537656447c9d8e8a9892..ebf64a92746e85c278fb5a1d25803a3002373cd4 100644 --- a/paddle/fluid/operators/array_to_lod_tensor_op.cc +++ b/paddle/fluid/operators/array_to_lod_tensor_op.cc @@ -157,7 +157,7 @@ class ArrayToLoDTensorOp : public framework::OperatorBase { return table_items[a].index < table_items[b].index; }); - // Build LoDTensor `out` + // Build phi::DenseTensor `out` framework::LoD *out_lod = out->mutable_lod(); out_lod->clear(); auto prefix_lod = rank_table.coarse_lod(); @@ -215,16 +215,18 @@ class ArrayToLoDTensorOpProtoMaker : public framework::OpProtoAndCheckerMaker { void Make() override { AddInput("X", "(std::vector) A vector of tensors that is going to " - "be casted to a big LoDTensor."); + "be casted to a big phi::DenseTensor."); AddInput("RankTable", "(LoDRankTable) RankTable provides the coarse lod information to " - "build the output LoDTensor. See " + "build the output phi::DenseTensor. See " "'paddle/framework/lod_rank_table.h' for more details."); - AddOutput("Out", "(LoDTensor) The LoDTensor formed by input tensor array."); + AddOutput("Out", + "(phi::DenseTensor) The phi::DenseTensor formed by input tensor " + "array."); AddComment( - R"DOC(This Op build a big LoDTensor from a std::vector + R"DOC(This Op build a big phi::DenseTensor from a std::vector and a LoDRankTable. It is supposed to be used in getting dynamic RNN's - outputs back to a normal LoDTensor. The std::vector + outputs back to a normal phi::DenseTensor. The std::vector would be the output of RNN Op and the LoDRankTable would be build with RNN's input.)DOC"); } @@ -247,9 +249,9 @@ class ArrayToLoDTensorInferShape : public framework::InferShapeBase { // detail kernel implementation. context->SetOutputDim("Out", context->GetInputDim("X")); - // The output LoDTensor's lod_level should be input X's lod_level + 1. - // For compile-time, we call SetLoDLevel to set output's lod_level. - // For runtime, output LoDTensor's lod is determined by input X's lod and + // The output phi::DenseTensor's lod_level should be input X's lod_level + // + 1. For compile-time, we call SetLoDLevel to set output's lod_level. For + // runtime, output phi::DenseTensor's lod is determined by input X's lod and // the level specified by input RandTable. // We cannot get X's detail lod and RankTable's level in this function, so // leave this work to the detail kernel implementation. diff --git a/paddle/fluid/operators/assert_op.cc b/paddle/fluid/operators/assert_op.cc index b65a709291f4e6f7c6072b1e555985e9175d6c48..b1ca48320016c1a272c6872dc9fc01d3d7912a86 100644 --- a/paddle/fluid/operators/assert_op.cc +++ b/paddle/fluid/operators/assert_op.cc @@ -41,8 +41,6 @@ const char kSummarize[] = "summarize"; namespace paddle { namespace operators { -using LoDTensor = phi::DenseTensor; - class AssertOp : public framework::OperatorBase { public: AssertOp(const std::string &type, @@ -58,7 +56,7 @@ class AssertOp : public framework::OperatorBase { PADDLE_ENFORCE_NOT_NULL(cond_var_ptr, platform::errors::NotFound( "Input(Condition) of AssertOp is not found.")); - const LoDTensor &cond = cond_var_ptr->Get(); + const phi::DenseTensor &cond = cond_var_ptr->Get(); PADDLE_ENFORCE_EQ( cond.dims(), phi::make_ddim({1}), @@ -78,7 +76,7 @@ class AssertOp : public framework::OperatorBase { const std::vector &x_names = Inputs(kData); for (const std::string &name : x_names) { const framework::Variable *x_var_ptr = scope.FindVar(name); - const phi::DenseTensor &x_tensor = x_var_ptr->Get(); + const phi::DenseTensor &x_tensor = x_var_ptr->Get(); formatter.Print(x_tensor, name); } diff --git a/paddle/fluid/operators/assign_op.cc b/paddle/fluid/operators/assign_op.cc index 91bc5019f3f079df6c12ea643027256e03eaa1bb..1af424fa77dbe63949621fa21796bba09c804ac7 100644 --- a/paddle/fluid/operators/assign_op.cc +++ b/paddle/fluid/operators/assign_op.cc @@ -79,16 +79,19 @@ class AssignInferVarType : public framework::VarTypeInference { class AssignOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddInput("X", - "(LoDTensor, SelectedRows or LoDTensorArray) The input variable " - "could be LoDTensor, SelectedRows or LoDTensorArray.") + AddInput( + "X", + "(phi::DenseTensor, SelectedRows or phi::DenseTensorArray) The input " + "variable " + "could be phi::DenseTensor, SelectedRows or phi::DenseTensorArray.") .AsDispensable(); AddOutput("Out", - "(LoDTensor, SelectedRows or LoDTensorArray) The type of output " + "(phi::DenseTensor, SelectedRows or phi::DenseTensorArray) The " + "type of output " "is the same as input X."); AddComment(R"DOC(Assign Operator -Out = X, when type in [LoDTensor/SelectedRows/LoDTensorArray] +Out = X, when type in [phi::DenseTensor/SelectedRows/phi::DenseTensorArray] raise error if the type is not listed above. )DOC"); } diff --git a/paddle/fluid/operators/assign_pos_op.cu b/paddle/fluid/operators/assign_pos_op.cu index 0f1107765d384459a3135a8fb6049721d1d88f5d..e5f783ec2d6ac9e3211cd69966fb47695581fcce 100644 --- a/paddle/fluid/operators/assign_pos_op.cu +++ b/paddle/fluid/operators/assign_pos_op.cu @@ -59,13 +59,14 @@ class AssignPosCUDAKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& context) const override { // assign pos decides which tokens should be fetched belong to specially // counter orderingly. - auto cum_count = context.Input( + auto cum_count = context.Input( "cum_count"); // (counter number) int32 | int64 - auto numbers = - context.Input("X"); // (batch_size * seq_len, topk) int32 + auto numbers = context.Input( + "X"); // (batch_size * seq_len, topk) int32 auto eff_num_len = - context.Input("eff_num_len"); // (sum(cum_count)) - auto out = context.Output("Out"); // (cum_count) value ranges + context.Input("eff_num_len"); // (sum(cum_count)) + auto out = + context.Output("Out"); // (cum_count) value ranges // from 0 to batch_size * // seq_len * topk auto place = context.GetPlace(); diff --git a/paddle/fluid/operators/assign_pos_op.h b/paddle/fluid/operators/assign_pos_op.h index 49e95184e346fb79ec45ed8e37e593738a0a3f7c..6c75fb55f58468f0bf9f9ddbadbab45ee7cd058f 100644 --- a/paddle/fluid/operators/assign_pos_op.h +++ b/paddle/fluid/operators/assign_pos_op.h @@ -20,8 +20,6 @@ limitations under the License. */ namespace paddle { namespace operators { -using LoDTensor = phi::DenseTensor; - template class AssignPosOpCPUKernel : public framework::OpKernel { public: diff --git a/paddle/fluid/operators/attention_lstm_op.cc b/paddle/fluid/operators/attention_lstm_op.cc index 49a847eecaeaaf0fdbbb68c410020db76ec76cfb..d3ae66b3c02ff83c3b1a941f297f5d2db425953f 100644 --- a/paddle/fluid/operators/attention_lstm_op.cc +++ b/paddle/fluid/operators/attention_lstm_op.cc @@ -205,11 +205,12 @@ framework::OpKernelType AttentionLSTMOp::GetExpectedKernelType( } void AttentionLSTMOpMaker::Make() { - AddInput("X", - "(LoDTensor) the input is a LodTensor, which support " - "variable-time length input sequence. The underlying tensor in " - "this LoDTensor is a matrix with shape (T X M), where T is the " - "total time steps in this mini-batch, M is the dim size of x."); + AddInput( + "X", + "(phi::DenseTensor) the input is a LodTensor, which support " + "variable-time length input sequence. The underlying tensor in " + "this phi::DenseTensor is a matrix with shape (T X M), where T is the " + "total time steps in this mini-batch, M is the dim size of x."); AddInput("C0", "(Tensor) LSTM C0" "This is a tensor with shape (N x D), where N is the batch size, D " @@ -247,12 +248,14 @@ void AttentionLSTMOpMaker::Make() { "Note: we should add the bias of hidden and context accorindg to " "the same gate: " "{B_forget, B_input, B_output, B_cell}"); - AddOutput("Hidden", - "(LoDTensor) (same as LSTMOp) the hidden state of LSTM operator. " - "The shape is (T x D), and lod is the same with the `Input`."); - AddOutput("Cell", - "(LoDTensor) (same as LSTMOp) the cell state of LSTM operator. " - "The shape is (T x D), and lod is the same with the `Input`."); + AddOutput( + "Hidden", + "(phi::DenseTensor) (same as LSTMOp) the hidden state of LSTM operator. " + "The shape is (T x D), and lod is the same with the `Input`."); + AddOutput( + "Cell", + "(phi::DenseTensor) (same as LSTMOp) the cell state of LSTM operator. " + "The shape is (T x D), and lod is the same with the `Input`."); AddOutput("AttentionedX", "(Tensor) shape is (T x 1), the result after X * AttentionWeight," " where T is the total time steps in this mini-batch," @@ -339,7 +342,7 @@ class AttentionLSTMKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& ctx) const override { using DeviceContext = phi::CPUContext; - auto* x = ctx.Input("X"); + auto* x = ctx.Input("X"); auto* h0 = ctx.Input("H0"); auto* c0 = ctx.Input("C0"); auto* atten_w = ctx.Input("AttentionWeight"); @@ -350,8 +353,8 @@ class AttentionLSTMKernel : public framework::OpKernel { auto* lstm_w = ctx.Input("LSTMWeight"); auto* lstm_b = ctx.Input("LSTMBias"); - auto* hidden_out = ctx.Output("Hidden"); - auto* cell_out = ctx.Output("Cell"); + auto* hidden_out = ctx.Output("Hidden"); + auto* cell_out = ctx.Output("Cell"); auto* atted_x = ctx.Output("AttentionedX"); auto* fc_out = ctx.Output("AttentionFCOut"); auto* lstm_x = ctx.Output("LSTMX"); diff --git a/paddle/fluid/operators/attention_lstm_op.h b/paddle/fluid/operators/attention_lstm_op.h index 32511b97d6a561ca5b12acbd20e96b55e3d4c17c..41d7d594df207381b56d6f618dc47e232fd03107 100644 --- a/paddle/fluid/operators/attention_lstm_op.h +++ b/paddle/fluid/operators/attention_lstm_op.h @@ -18,7 +18,6 @@ limitations under the License. */ namespace paddle { namespace operators { -using LoDTensor = phi::DenseTensor; using Tensor = phi::DenseTensor; class AttentionLSTMOp : public framework::OperatorWithKernel { diff --git a/paddle/fluid/operators/batch_norm_op.cc b/paddle/fluid/operators/batch_norm_op.cc index a20b2ad21d3e9cb99df1f804ec3cab7775c6ba45..abf177ee9f9f46dee97e4ef3b30fd8e0c8fa6b5d 100644 --- a/paddle/fluid/operators/batch_norm_op.cc +++ b/paddle/fluid/operators/batch_norm_op.cc @@ -383,8 +383,8 @@ framework::OpKernelType BatchNormGradOp::GetExpectedKernelType( const Tensor *t = nullptr; if (var->IsType()) { t = &var->Get(); - } else if (var->IsType()) { - t = &var->Get(); + } else if (var->IsType()) { + t = &var->Get(); } if (t == nullptr) { PADDLE_THROW( @@ -525,8 +525,8 @@ framework::OpKernelType BatchNormDoubleGradOp::GetExpectedKernelType( const Tensor *t = nullptr; if (var->IsType()) { t = &var->Get(); - } else if (var->IsType()) { - t = &var->Get(); + } else if (var->IsType()) { + t = &var->Get(); } if (t == nullptr) { PADDLE_THROW( diff --git a/paddle/fluid/operators/batch_norm_op.h b/paddle/fluid/operators/batch_norm_op.h index 35f1572899f8578377648352a1780f785af4120d..b11deeb49509b0bc89e80484a7db92709e0ecc51 100644 --- a/paddle/fluid/operators/batch_norm_op.h +++ b/paddle/fluid/operators/batch_norm_op.h @@ -28,7 +28,6 @@ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; using DataLayout = phi::DataLayout; template diff --git a/paddle/fluid/operators/beam_search_decode_op.h b/paddle/fluid/operators/beam_search_decode_op.h index 7bad6950a7a260469b713fc3cab93714444c1608..e635405f3884eaea398b1621c6f284e86a5cc4c6 100644 --- a/paddle/fluid/operators/beam_search_decode_op.h +++ b/paddle/fluid/operators/beam_search_decode_op.h @@ -23,8 +23,8 @@ namespace operators { struct BeamSearchDecodeFunctor { BeamSearchDecodeFunctor(const LoDTensorArray& step_ids, const LoDTensorArray& step_scores, - LoDTensor* id_tensor, - LoDTensor* score_tensor, + phi::DenseTensor* id_tensor, + phi::DenseTensor* score_tensor, size_t beam_size, int end_id) : beam_size_(beam_size), @@ -119,8 +119,8 @@ struct BeamSearchDecodeFunctor { const LoDTensorArray& step_scores_origin_; LoDTensorArray step_ids_ = LoDTensorArray(); LoDTensorArray step_scores_ = LoDTensorArray(); - LoDTensor* id_tensor_; - LoDTensor* score_tensor_; + phi::DenseTensor* id_tensor_; + phi::DenseTensor* score_tensor_; }; template @@ -164,8 +164,10 @@ class BeamSearchDecodeOpKernel : public framework::OpKernel { int end_id = context.Attr("end_id"); // prepare output - LoDTensor* sentenceIds = context.Output("SentenceIds"); - LoDTensor* sentenceScores = context.Output("SentenceScores"); + phi::DenseTensor* sentenceIds = + context.Output("SentenceIds"); + phi::DenseTensor* sentenceScores = + context.Output("SentenceScores"); BeamSearchDecodeFunctor bs( *ids, *scores, sentenceIds, sentenceScores, beam_size, end_id); diff --git a/paddle/fluid/operators/beam_search_decode_op_def.h b/paddle/fluid/operators/beam_search_decode_op_def.h index c9e89a7f354c4bcd73c7ee2cc801aa8b8d0c5340..e57dfe512c27a574f4f153609bc7e479fa6dd2bf 100644 --- a/paddle/fluid/operators/beam_search_decode_op_def.h +++ b/paddle/fluid/operators/beam_search_decode_op_def.h @@ -23,7 +23,6 @@ limitations under the License. */ namespace paddle { namespace operators { -using LoDTensor = phi::DenseTensor; using LoDTensorArray = framework::LoDTensorArray; // all the lod have 2 levels. @@ -54,15 +53,15 @@ struct BeamSearchDecoder { * with word score. * Param: * sentence_vector_list: sentence_vector for each source sentence. - * id_tensor: result LoDTensor for sentences of id. - * score_tensor: result LoDTensor for sentences of score. + * id_tensor: result phi::DenseTensor for sentences of id. + * score_tensor: result phi::DenseTensor for sentences of score. * reverse: whether ids of sentence in sentence_vector_list is reversed * sort_by_score: whether to sort hypotheses of each sentence by scores. */ void ConvertSentenceVectorToLodTensor( std::vector> sentence_vector_list, - LoDTensor* id_tensor, - LoDTensor* score_tensor, + phi::DenseTensor* id_tensor, + phi::DenseTensor* score_tensor, bool reverse = true, bool sort_by_score = true) const; @@ -72,8 +71,8 @@ struct BeamSearchDecoder { */ void Backtrace(const LoDTensorArray& step_ids, const LoDTensorArray& step_scores, - LoDTensor* id_tensor, - LoDTensor* score_tensor) const; + phi::DenseTensor* id_tensor, + phi::DenseTensor* score_tensor) const; size_t beam_size_; int end_id_; @@ -82,8 +81,8 @@ struct BeamSearchDecoder { template void BeamSearchDecoder::ConvertSentenceVectorToLodTensor( std::vector> sentence_vector_list, - LoDTensor* id_tensor, - LoDTensor* score_tensor, + phi::DenseTensor* id_tensor, + phi::DenseTensor* score_tensor, bool reverse, bool sort_by_score) const { size_t src_num = sentence_vector_list.size(); @@ -158,8 +157,8 @@ void BeamSearchDecoder::ConvertSentenceVectorToLodTensor( template void BeamSearchDecoder::Backtrace(const LoDTensorArray& step_ids, const LoDTensorArray& step_scores, - LoDTensor* id_tensor, - LoDTensor* score_tensor) const { + phi::DenseTensor* id_tensor, + phi::DenseTensor* score_tensor) const { PADDLE_ENFORCE_NE( step_ids.empty(), true, diff --git a/paddle/fluid/operators/beam_search_decode_op_test.cc b/paddle/fluid/operators/beam_search_decode_op_test.cc index 03103d98a0a0fd90035e64099ac1d19bcf2ef8a2..72fcf40ec3d58e65d8e6b376b71f1000700f9682 100644 --- a/paddle/fluid/operators/beam_search_decode_op_test.cc +++ b/paddle/fluid/operators/beam_search_decode_op_test.cc @@ -18,7 +18,6 @@ limitations under the License. */ using CPUPlace = paddle::platform::CPUPlace; using LoD = paddle::framework::LoD; -using LoDTensor = phi::DenseTensor; using LoDTensorArray = paddle::framework::LoDTensorArray; template @@ -59,7 +58,7 @@ void GenerateExample(const std::vector& level_0, lod.push_back(level_1); // Ids - LoDTensor tensor_id; + phi::DenseTensor tensor_id; tensor_id.set_lod(lod); tensor_id.Resize({static_cast(data.size())}); // malloc memory @@ -69,7 +68,7 @@ void GenerateExample(const std::vector& level_0, } // Scores - LoDTensor tensor_score; + phi::DenseTensor tensor_score; tensor_score.set_lod(lod); tensor_score.Resize({static_cast(data.size())}); // malloc memory @@ -124,8 +123,8 @@ void BeamSearchDecodeTestFrame() { BeamSearchDecoder helper(2, 1); // beam_size = 2, end_id = 1 - LoDTensor id_tensor; - LoDTensor score_tensor; + phi::DenseTensor id_tensor; + phi::DenseTensor score_tensor; helper.Backtrace(ids, scores, &id_tensor, &score_tensor); LoD lod = id_tensor.lod(); diff --git a/paddle/fluid/operators/beam_search_decode_op_xpu.cc b/paddle/fluid/operators/beam_search_decode_op_xpu.cc index cfea2f57da2731774f2385b2290949ca6b1ed616..aa7e6ca0476072ed75f0c4a760cb38505c15cdbd 100644 --- a/paddle/fluid/operators/beam_search_decode_op_xpu.cc +++ b/paddle/fluid/operators/beam_search_decode_op_xpu.cc @@ -62,20 +62,21 @@ class BeamSearchDecodeXPUKernel : public framework::OpKernel { int end_id = context.Attr("end_id"); // prepare output - LoDTensor* sentenceIds = nullptr; - LoDTensor* sentenceScores = nullptr; + phi::DenseTensor* sentenceIds = nullptr; + phi::DenseTensor* sentenceScores = nullptr; - LoDTensor* sentenceIds_temp = context.Output("SentenceIds"); - LoDTensor* sentenceScores_temp = - context.Output("SentenceScores"); + phi::DenseTensor* sentenceIds_temp = + context.Output("SentenceIds"); + phi::DenseTensor* sentenceScores_temp = + context.Output("SentenceScores"); if (platform::is_xpu_place(ids->at(0).place())) { - sentenceIds = new LoDTensor(); + sentenceIds = new phi::DenseTensor(); sentenceIds->set_lod(sentenceIds_temp->lod()); } if (platform::is_xpu_place(ids->at(0).place())) { - sentenceScores = new LoDTensor(); + sentenceScores = new phi::DenseTensor(); sentenceScores->set_lod(sentenceScores_temp->lod()); } diff --git a/paddle/fluid/operators/beam_search_decode_op_xpu.h b/paddle/fluid/operators/beam_search_decode_op_xpu.h index e528d48d7218e5dfc5bfd6160837c31efb93b7e8..25f109910f779151cba5c54774698d8ef53c690d 100644 --- a/paddle/fluid/operators/beam_search_decode_op_xpu.h +++ b/paddle/fluid/operators/beam_search_decode_op_xpu.h @@ -18,7 +18,7 @@ limitations under the License. */ namespace paddle { namespace operators { -int SetMeta(const LoDTensor& srcTensor, LoDTensor* dstTensor) { +int SetMeta(const phi::DenseTensor& srcTensor, phi::DenseTensor* dstTensor) { if (srcTensor.dtype() == paddle::experimental::DataType::INT32 || srcTensor.dtype() == paddle::experimental::DataType::INT64 || srcTensor.dtype() == paddle::experimental::DataType::FLOAT32 || @@ -33,8 +33,8 @@ int SetMeta(const LoDTensor& srcTensor, LoDTensor* dstTensor) { return xpu::Error_t::SUCCESS; } template -int CopyTensorByXPU(const LoDTensor& srcTensor, - LoDTensor* dstTensor, +int CopyTensorByXPU(const phi::DenseTensor& srcTensor, + phi::DenseTensor* dstTensor, int flag, const Place& place) { const T* srcData = srcTensor.template data(); @@ -67,8 +67,8 @@ int CopyTensorByXPU(const LoDTensor& srcTensor, return xpu::Error_t::SUCCESS; } -const int CopyTensorByType(const LoDTensor& srcTensor, - LoDTensor* dstTensor, +const int CopyTensorByType(const phi::DenseTensor& srcTensor, + phi::DenseTensor* dstTensor, int flag, const Place& place) { int r = 0; @@ -97,8 +97,8 @@ const int CopyTensorByType(const LoDTensor& srcTensor, struct BeamSearchDecodeXPUFunctor { BeamSearchDecodeXPUFunctor(const LoDTensorArray& step_ids, const LoDTensorArray& step_scores, - LoDTensor* id_tensor, - LoDTensor* score_tensor, + phi::DenseTensor* id_tensor, + phi::DenseTensor* score_tensor, size_t beam_size, int end_id) : beam_size_(beam_size), @@ -164,8 +164,8 @@ struct BeamSearchDecodeXPUFunctor { // scenarios. LoDTensorArray step_ids_ = LoDTensorArray(); LoDTensorArray step_scores_ = LoDTensorArray(); - LoDTensor* id_tensor_; - LoDTensor* score_tensor_; + phi::DenseTensor* id_tensor_; + phi::DenseTensor* score_tensor_; }; } // namespace operators diff --git a/paddle/fluid/operators/beam_search_decode_op_xpu_test.cc b/paddle/fluid/operators/beam_search_decode_op_xpu_test.cc index c3bd7d55d3784e93abea9b2a3b0034d8ebd7713c..8ec90efdf71248b84822b280afc752fecc5a846c 100644 --- a/paddle/fluid/operators/beam_search_decode_op_xpu_test.cc +++ b/paddle/fluid/operators/beam_search_decode_op_xpu_test.cc @@ -19,7 +19,6 @@ limitations under the License. */ using CPUPlace = paddle::platform::CPUPlace; using XPUPlace = paddle::platform::XPUPlace; using LoD = paddle::framework::LoD; -using LoDTensor = phi::DenseTensor; using LoDTensorArray = paddle::framework::LoDTensorArray; template @@ -67,7 +66,7 @@ void GenerateXPUExample(const std::vector& level_0, lod.push_back(level_1); // Ids - LoDTensor tensor_id_cpu; + phi::DenseTensor tensor_id_cpu; tensor_id_cpu.set_lod(lod); tensor_id_cpu.Resize({static_cast(data.size())}); // malloc memory @@ -76,7 +75,7 @@ void GenerateXPUExample(const std::vector& level_0, id_cpu_ptr[i] = static_cast(data.at(i)); } - LoDTensor tensor_id; + phi::DenseTensor tensor_id; const phi::DenseTensorMeta meta_data_id(paddle::experimental::DataType::INT64, tensor_id_cpu.dims()); tensor_id.set_meta(meta_data_id); @@ -90,7 +89,7 @@ void GenerateXPUExample(const std::vector& level_0, tensor_id_cpu.numel() * sizeof(int64_t)); // Scores - LoDTensor tensor_score_cpu; + phi::DenseTensor tensor_score_cpu; tensor_score_cpu.set_lod(lod); tensor_score_cpu.Resize({static_cast(data.size())}); // malloc memory @@ -99,7 +98,7 @@ void GenerateXPUExample(const std::vector& level_0, score_cpu_ptr[i] = static_cast(data.at(i)); } - LoDTensor tensor_score; + phi::DenseTensor tensor_score; if (std::is_same::value) { const phi::DenseTensorMeta meta_data_score( @@ -178,8 +177,8 @@ void BeamSearchDecodeTestByXPUFrame() { ASSERT_EQ(ids.size(), 5UL); ASSERT_EQ(scores.size(), 5UL); - LoDTensor id_tensor_cpu; - LoDTensor score_tensor_cpu; + phi::DenseTensor id_tensor_cpu; + phi::DenseTensor score_tensor_cpu; paddle::operators::BeamSearchDecodeXPUFunctor bs_xpu( ids, scores, &id_tensor_cpu, &score_tensor_cpu, 2, 1); diff --git a/paddle/fluid/operators/beam_search_op.cc b/paddle/fluid/operators/beam_search_op.cc index 668896c791f3c1ca9784cf9aa2c01d90d78a59ae..49669f1b350d9fd5a631a490248ec7654212a60d 100644 --- a/paddle/fluid/operators/beam_search_op.cc +++ b/paddle/fluid/operators/beam_search_op.cc @@ -27,37 +27,42 @@ class BeamSearchOpMaker : public framework::OpProtoAndCheckerMaker { void Make() override { // inputs and outputs stored in proto AddInput("pre_ids", - "(LoDTensor) The LoDTensor containing the selected ids at the " + "(phi::DenseTensor) The phi::DenseTensor containing the selected " + "ids at the " "previous step. It should be a tensor with shape (batch_size, 1) " "and lod `[[0, 1, ... , batch_size], [0, 1, ..., batch_size]]` at " "the first step."); - AddInput("pre_scores", - "(LoDTensor) The LoDTensor containing the accumulated " - "scores corresponding to the selected ids at the previous step."); + AddInput( + "pre_scores", + "(phi::DenseTensor) The phi::DenseTensor containing the accumulated " + "scores corresponding to the selected ids at the previous step."); AddInput("ids", - "(LoDTensor) The LoDTensor containing the candidates ids. Its " + "(phi::DenseTensor) The phi::DenseTensor containing the " + "candidates ids. Its " "shape should be (batch_size * beam_size, W). If not set, it will " "be calculated out according to Input(scores) in this operator.") .AsDispensable(); - AddInput("scores", - "(LoDTensor) The LoDTensor containing the current scores " - "corresponding to Input(ids). If Input(ids) is not nullptr, its " - "shape is the same as that of Input(ids)." - "If is_accumulated is true, Input(scores) is accumulated scores " - "and will be used derectedly. Else, each score will be " - "transformed to the log field and accumulate Input(pre_sores) " - "first."); + AddInput( + "scores", + "(phi::DenseTensor) The phi::DenseTensor containing the current scores " + "corresponding to Input(ids). If Input(ids) is not nullptr, its " + "shape is the same as that of Input(ids)." + "If is_accumulated is true, Input(scores) is accumulated scores " + "and will be used derectedly. Else, each score will be " + "transformed to the log field and accumulate Input(pre_sores) " + "first."); AddOutput("selected_ids", "A LodTensor that stores the IDs selected by beam search."); - AddOutput("selected_scores", - "A LoDTensor containing the accumulated scores corresponding to " - "Output(selected_ids)."); + AddOutput( + "selected_scores", + "A phi::DenseTensor containing the accumulated scores corresponding to " + "Output(selected_ids)."); AddOutput("parent_idx", "A Tensor preserving the selected_ids' parent index in pre_ids.") .AsDispensable(); // Attributes stored in AttributeMap - AddAttr("level", "the level of LoDTensor"); + AddAttr("level", "the level of phi::DenseTensor"); AddAttr("beam_size", "beam size for beam search"); AddAttr("end_id", "the token id which indicates the end of a sequence"); diff --git a/paddle/fluid/operators/broadcast_tensors_op.cc b/paddle/fluid/operators/broadcast_tensors_op.cc index df91ef10b181ab15729cce3edc444cf0091c67ba..34a76e86aae0d87008f5026092b59600bb9dff69 100644 --- a/paddle/fluid/operators/broadcast_tensors_op.cc +++ b/paddle/fluid/operators/broadcast_tensors_op.cc @@ -41,12 +41,13 @@ class BroadcastTensorsOp : public framework::OperatorWithKernel { class BroadcastTensorsOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddInput("X", - "A Varaible list. The shape and data type of the list elements" - "should be consistent. Variable can be multi-dimensional Tensor" - "or LoDTensor, and data types can be: bool, float16, float32, " - "float64, int32, " - "int64.") + AddInput( + "X", + "A Varaible list. The shape and data type of the list elements" + "should be consistent. Variable can be multi-dimensional Tensor" + "or phi::DenseTensor, and data types can be: bool, float16, float32, " + "float64, int32, " + "int64.") .AsDuplicable(); AddOutput("Out", "the sum of input :code:`x`. its shape and data types are " @@ -54,7 +55,7 @@ class BroadcastTensorsOpMaker : public framework::OpProtoAndCheckerMaker { .AsDuplicable(); AddComment( R"DOC(This OP is used to broadcast a vector of inputs - with Tensor or LoDTensor type, following broadcast semantics.)DOC"); + with phi::DenseTensor type, following broadcast semantics.)DOC"); } }; diff --git a/paddle/fluid/operators/check_memory_continue_op.cc b/paddle/fluid/operators/check_memory_continue_op.cc index aca6951c87e81886c7c6aef65c5bf9ff28892435..0099dd109cabca98af1549f91daa5f64a4458135 100644 --- a/paddle/fluid/operators/check_memory_continue_op.cc +++ b/paddle/fluid/operators/check_memory_continue_op.cc @@ -31,12 +31,13 @@ class CheckMemoryContinueOp : public framework::OperatorWithKernel { class CheckMemoryContinueOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddInput("X", "(vector) The input tensors.").AsDuplicable(); - AddOutput("Out", "(LoDTensor) The output tensor.").AsDuplicable(); - AddOutput( - "XOut", - "(vector) The output tensors which are the same as x. It is " - "used to build the graph dependency"); + AddInput("X", "(vector) The input tensors.") + .AsDuplicable(); + AddOutput("Out", "(phi::DenseTensor) The output tensor.").AsDuplicable(); + AddOutput("XOut", + "(vector) The output tensors which are the " + "same as x. It is " + "used to build the graph dependency"); AddComment(R"DOC( CheckMemoryContinue Operator. diff --git a/paddle/fluid/operators/chunk_eval_op.h b/paddle/fluid/operators/chunk_eval_op.h index 5422d4466188b7a1ba58660533838387ad36e549..7e614ccee7f566076869fb87248e85994b6b30a9 100644 --- a/paddle/fluid/operators/chunk_eval_op.h +++ b/paddle/fluid/operators/chunk_eval_op.h @@ -23,8 +23,6 @@ limitations under the License. */ namespace paddle { namespace operators { -using LoDTensor = phi::DenseTensor; - template class ChunkEvalKernel : public framework::OpKernel { public: @@ -187,9 +185,9 @@ class ChunkEvalKernel : public framework::OpKernel { context.Attr>("excluded_chunk_types").begin(), context.Attr>("excluded_chunk_types").end()); - auto* inference = context.Input("Inference"); + auto* inference = context.Input("Inference"); auto place = inference->place(); - auto* label = context.Input("Label"); + auto* label = context.Input("Label"); auto* precision = context.Output("Precision"); auto* recall = context.Output("Recall"); auto* f1 = context.Output("F1-Score"); diff --git a/paddle/fluid/operators/coalesce_tensor_op.cc b/paddle/fluid/operators/coalesce_tensor_op.cc index 719b5c057b602e22c377f3a6e0b48982d086388c..e148c5b4b10e5fd01cd941cb7105c5f07aab342f 100644 --- a/paddle/fluid/operators/coalesce_tensor_op.cc +++ b/paddle/fluid/operators/coalesce_tensor_op.cc @@ -120,7 +120,7 @@ class CoalesceTensorOpKernel : public framework::OpKernel { in_var_names.size(), out_var_names.size())); - // Input & Output check: only support LoDTensor + // Input & Output check: only support phi::DenseTensor bool has_not_init_in_vars = false; for (size_t i = 0; i < in_tensors.size(); ++i) { PADDLE_ENFORCE_NOT_NULL( @@ -426,17 +426,17 @@ class CoalesceTensorOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { AddInput("Input", - "(vector) The input tensors of" + "(vector) The input tensors of" " coalesce_tensor operator.") .AsDuplicable(); AddOutput("Output", - "(vector) The output " + "(vector) The output " "tensors of coalesce_tensor operator. And the address " "of output tensors are continuous, they are sliced from the " "tensor of FusedOutput.") .AsDuplicable(); AddOutput("FusedOutput", - "(LoDTensor) The output tensor " + "(phi::DenseTensor) The output tensor " "of coalesce_tensor operator. And the tensors of" " Output is sliced from the tensor of FusedOutput."); AddAttr("dtype", "The output data type."); diff --git a/paddle/fluid/operators/common_infer_shape_functions.cc b/paddle/fluid/operators/common_infer_shape_functions.cc index b256d94a5a894f6cf126c7821da356b5341ab839..fcb58dcb242270dc0425227c06f97905c1cc3cc0 100644 --- a/paddle/fluid/operators/common_infer_shape_functions.cc +++ b/paddle/fluid/operators/common_infer_shape_functions.cc @@ -154,7 +154,7 @@ void BinaryOpBroadcastInferShape(framework::InferShapeContext *ctx) { ctx->GetInputsVarType(y_name).front(), framework::proto::VarType::LOD_TENSOR, platform::errors::InvalidArgument( - "The var type of input %s should be LoDTensor, but got %s.", + "The var type of input %s should be phi::DenseTensor, but got %s.", ctx->Inputs(y_name).front(), ctx->GetInputsVarType(y_name).front())); diff --git a/paddle/fluid/operators/copy_cross_scope_op.cc b/paddle/fluid/operators/copy_cross_scope_op.cc index 1fcf6832d25c4b9ad97631f0fb08944d77b13541..a36e9b73639ba3685c094e95d026d007050e9ceb 100644 --- a/paddle/fluid/operators/copy_cross_scope_op.cc +++ b/paddle/fluid/operators/copy_cross_scope_op.cc @@ -30,7 +30,6 @@ class OpBase; } // namespace imperative } // namespace paddle -using LoDTensor = phi::DenseTensor; using Tensor = phi::DenseTensor; namespace paddle { @@ -64,7 +63,7 @@ class CopyCrossScopeOp : public framework::OperatorBase { PADDLE_ENFORCE_NOT_NULL( id_var, platform::errors::NotFound("No variable with name %s found.", id_name)); - auto id_tensor = id_var->GetMutable(); + auto id_tensor = id_var->GetMutable(); auto it = scope.kids().begin(); phi::DenseTensor cpu_id_tensor; paddle::framework::TensorCopySync( @@ -88,8 +87,8 @@ class CopyCrossScopeOp : public framework::OperatorBase { platform::errors::NotFound( "No variable with name %s found in destination scope.", x_name)); - auto dst_tensor = dst_var->GetMutable(); - auto main_tensor = main_var->GetMutable(); + auto dst_tensor = dst_var->GetMutable(); + auto main_tensor = main_var->GetMutable(); paddle::framework::TensorCopySync( *dst_tensor, main_tensor->place(), main_tensor); } @@ -109,8 +108,8 @@ class CopyCrossScopeOp : public framework::OperatorBase { dst_var, platform::errors::NotFound( "No variable with name %s found in destination scope.", x_name)); - auto src_tensor = source_var->GetMutable(); - auto dst_tensor = dst_var->GetMutable(); + auto src_tensor = source_var->GetMutable(); + auto dst_tensor = dst_var->GetMutable(); paddle::framework::TensorCopySync( *src_tensor, dst_tensor->place(), dst_tensor); @@ -120,7 +119,7 @@ class CopyCrossScopeOp : public framework::OperatorBase { main_var, platform::errors::NotFound( "No variable with name %s found in destination scope.", x_name)); - auto main_tensor = main_var->GetMutable(); + auto main_tensor = main_var->GetMutable(); paddle::framework::TensorCopySync( *dst_tensor, main_tensor->place(), main_tensor); } diff --git a/paddle/fluid/operators/crf_decoding_op.cc b/paddle/fluid/operators/crf_decoding_op.cc index ae1086b623f13b19bceacff422523fe6f4a7eba2..62bd73374b3a189431572441e146546c0d3501ca 100644 --- a/paddle/fluid/operators/crf_decoding_op.cc +++ b/paddle/fluid/operators/crf_decoding_op.cc @@ -21,7 +21,8 @@ class CRFDecodingOpMaker : public framework::OpProtoAndCheckerMaker { void Make() override { AddInput( "Emission", - "(Tensor/LoDTensor). For a LoDTensor input, its shape is [N x D] " + "(Tensor/phi::DenseTensor). For a phi::DenseTensor input, its shape is " + "[N x D] " "where N is the total sequence length of the mini-batch and D is " "the total tag number. While for a tensor input, its shape is " "[B X S X D] with B the batch size and S the sequence length of each " @@ -39,14 +40,14 @@ class CRFDecodingOpMaker : public framework::OpProtoAndCheckerMaker { "The data type is the same as Input(Emission)."); AddInput( "Label", - "(Tensor/LoDTensor). The ground truth with shape " - "[N x 1] (for LoDTensor) or [B x S] (for Tensor). This input is " + "(phi::DenseTensor). The ground truth with shape " + "[N x 1] (for phi::DenseTensor) or [B x S] (for Tensor). This input is " "optional. See more details in the operator's comments. The data type " "is int64.") .AsDispensable(); AddOutput( "ViterbiPath", - "(Tensor/LoDTensor). The decoding results. What to " + "(phi::DenseTensor). The decoding results. What to " "return changes depending on whether the Input(Label) (the ground " "truth) is given. See more details in the operator's comment. " "The data type is int64."); diff --git a/paddle/fluid/operators/crf_decoding_op.h b/paddle/fluid/operators/crf_decoding_op.h index dbce3700e8a64250f091d5306a83cb29c8c0a2ff..f674ce03a984a96e60e85e0c64f52643cec4e0d2 100644 --- a/paddle/fluid/operators/crf_decoding_op.h +++ b/paddle/fluid/operators/crf_decoding_op.h @@ -24,15 +24,14 @@ namespace paddle { namespace operators { using framework::LoD; -using LoDTensor = phi::DenseTensor; template class CRFDecodingOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - auto* emission_weights = ctx.Input("Emission"); + auto* emission_weights = ctx.Input("Emission"); auto* transition_weights = ctx.Input("Transition"); - auto* label = ctx.Input("Label"); + auto* label = ctx.Input("Label"); auto* decoded_path = ctx.Output("ViterbiPath"); int64_t* path = decoded_path->mutable_data(platform::CPUPlace()); diff --git a/paddle/fluid/operators/ctc_align_op.cu b/paddle/fluid/operators/ctc_align_op.cu index 2095b3d3858e34dbc97d69910975ec615fc625d7..cef3cf25ff6ff84adda5cf3e0a9ded0275a22d87 100644 --- a/paddle/fluid/operators/ctc_align_op.cu +++ b/paddle/fluid/operators/ctc_align_op.cu @@ -85,8 +85,8 @@ class CTCAlignOpCUDAKernel : public framework::OpKernel { platform::errors::InvalidArgument( "CTCAlign operator CUDA kernel must use CUDAPlace " "rather than CPUPlace.")); - auto* input = ctx.Input("Input"); - auto* output = ctx.Output("Output"); + auto* input = ctx.Input("Input"); + auto* output = ctx.Output("Output"); const int blank = ctx.Attr("blank"); const int merge_repeated = static_cast(ctx.Attr("merge_repeated")); @@ -99,9 +99,9 @@ class CTCAlignOpCUDAKernel : public framework::OpKernel { auto input_dims = input->dims(); T* output_data = output->mutable_data({input_dims[0], input_dims[1]}, ctx.GetPlace()); - auto* input_length = ctx.Input("InputLength"); + auto* input_length = ctx.Input("InputLength"); const T* input_length_data = input_length->data(); - auto* output_length = ctx.Output("OutputLength"); + auto* output_length = ctx.Output("OutputLength"); T* output_length_data = output_length->mutable_data({input_dims[0], 1}, ctx.GetPlace()); PaddingMergeAndDelCudaKernel diff --git a/paddle/fluid/operators/ctc_align_op.h b/paddle/fluid/operators/ctc_align_op.h index e137170d99a4a7f68305adf94cfa41827b6049b1..9279cf531d449cdb2fa4a1ac6a9da3df753ce764 100644 --- a/paddle/fluid/operators/ctc_align_op.h +++ b/paddle/fluid/operators/ctc_align_op.h @@ -25,14 +25,13 @@ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; template class CTCAlignKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - auto* input = ctx.Input("Input"); - auto* output = ctx.Output("Output"); + auto* input = ctx.Input("Input"); + auto* output = ctx.Output("Output"); size_t blank = static_cast(ctx.Attr("blank")); bool merge_repeated = ctx.Attr("merge_repeated"); T* output_data = output->mutable_data(ctx.GetPlace()); @@ -43,10 +42,10 @@ class CTCAlignKernel : public framework::OpKernel { if (input->lod().empty()) { size_t padding_value = static_cast(ctx.Attr("padding_value")); - auto* input_length = ctx.Input("InputLength"); + auto* input_length = ctx.Input("InputLength"); const T* input_length_data = input_length->data(); - auto* output_length = ctx.Output("OutputLength"); + auto* output_length = ctx.Output("OutputLength"); T* output_length_data = output_length->mutable_data(ctx.GetPlace()); for (size_t batch_id = 0; batch_id < (unsigned)input_dims[0]; diff --git a/paddle/fluid/operators/cudnn_lstm_op.cu.cc b/paddle/fluid/operators/cudnn_lstm_op.cu.cc index 399bc5bb0bb261d7b3c5e149be0355cc110e9e61..d436a4b5d531d2e1f706c1d1c5ad4227d263e983 100644 --- a/paddle/fluid/operators/cudnn_lstm_op.cu.cc +++ b/paddle/fluid/operators/cudnn_lstm_op.cu.cc @@ -26,7 +26,6 @@ limitations under the License. */ namespace paddle { namespace operators { -using LoDTensor = phi::DenseTensor; using Tensor = phi::DenseTensor; template diff --git a/paddle/fluid/operators/cvm_op.cu b/paddle/fluid/operators/cvm_op.cu index e8fdcec36082a19b6f4950aaee216339eff14ce2..5cac5392f4abb846dbc6472750e8fb5fbd71f900 100644 --- a/paddle/fluid/operators/cvm_op.cu +++ b/paddle/fluid/operators/cvm_op.cu @@ -23,7 +23,6 @@ namespace operators { using phi::PADDLE_CUDA_NUM_THREADS; using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; template __global__ void CvmComputeKernel(const bool use_cvm, @@ -87,7 +86,7 @@ template class CVMCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - const auto* x = context.Input("X"); + const auto* x = context.Input("X"); const T* x_data = x->data(); auto batch_size = x->dims()[0]; @@ -95,7 +94,7 @@ class CVMCUDAKernel : public framework::OpKernel { auto item_size = numel / batch_size; auto use_cvm = context.Attr("use_cvm"); - auto* y = context.Output("Y"); + auto* y = context.Output("Y"); T* y_data = y->mutable_data(context.GetPlace()); // for Input X do not have Lod Information. @@ -128,7 +127,7 @@ template class CVMGradCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto* dx = context.Output(framework::GradVarName("X")); + auto* dx = context.Output(framework::GradVarName("X")); T* dx_data = dx->mutable_data(context.GetPlace()); const phi::DenseTensor* cvm = context.Input("CVM"); diff --git a/paddle/fluid/operators/cvm_op.h b/paddle/fluid/operators/cvm_op.h index 355fc6690ce1a4642c9a92e583f19ef67adb0972..9bd5a00b3733fd1254e88248b8173a9137f814d7 100644 --- a/paddle/fluid/operators/cvm_op.h +++ b/paddle/fluid/operators/cvm_op.h @@ -20,7 +20,6 @@ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; template void CvmComputeKernel(const bool use_cvm, @@ -61,14 +60,14 @@ template class CVMOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - const auto* x = context.Input("X"); + const auto* x = context.Input("X"); const T* x_data = x->data(); auto batch_size = x->dims()[0]; auto item_size = x->numel() / batch_size; auto use_cvm = context.Attr("use_cvm"); - auto* y = context.Output("Y"); + auto* y = context.Output("Y"); T* y_data = y->mutable_data(context.GetPlace()); // for Input X do not have Lod Information. @@ -102,7 +101,7 @@ template class CVMGradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto* dx = context.Output(framework::GradVarName("X")); + auto* dx = context.Output(framework::GradVarName("X")); T* dx_data = dx->mutable_data(context.GetPlace()); const phi::DenseTensor* cvm = context.Input("CVM"); diff --git a/paddle/fluid/operators/data_norm_op.cc b/paddle/fluid/operators/data_norm_op.cc index 09d09c33900e8d9eb91422ca2a425cccb178811e..36dc93445df594599d9ebaaa647ed8427f057d75 100644 --- a/paddle/fluid/operators/data_norm_op.cc +++ b/paddle/fluid/operators/data_norm_op.cc @@ -24,7 +24,6 @@ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; using DataLayout = phi::DataLayout; template @@ -487,8 +486,8 @@ class DataNormGradOp : public framework::OperatorWithKernel { const Tensor *t = nullptr; if (var->IsType()) { t = &var->Get(); - } else if (var->IsType()) { - t = &var->Get(); + } else if (var->IsType()) { + t = &var->Get(); } if (t == nullptr) { PADDLE_THROW(platform::errors::InvalidArgument( diff --git a/paddle/fluid/operators/data_norm_op.cu b/paddle/fluid/operators/data_norm_op.cu index 790e55965a9d2aacdd4e45b56ef20eff7102f894..1b895b0c8daa5bfc5cc78de7b7c3d9a8ad32a016 100644 --- a/paddle/fluid/operators/data_norm_op.cu +++ b/paddle/fluid/operators/data_norm_op.cu @@ -27,7 +27,6 @@ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; using DataLayout = phi::DataLayout; using phi::PADDLE_CUDA_NUM_THREADS; diff --git a/paddle/fluid/operators/deformable_psroi_pooling_op.cc b/paddle/fluid/operators/deformable_psroi_pooling_op.cc index bac1bb04bc0dd4e9d0d1325c5a04ee89847ae138..5240116c6a4f88dce97e405c95b85869747890c2 100644 --- a/paddle/fluid/operators/deformable_psroi_pooling_op.cc +++ b/paddle/fluid/operators/deformable_psroi_pooling_op.cc @@ -33,9 +33,9 @@ class DeformablePSROIPoolOpMaker : public framework::OpProtoAndCheckerMaker { "H is height of the feature, and " "W is the width of the feature."); AddInput("ROIs", - "(LoDTensor), " + "(phi::DenseTensor), " "ROIs (Regions of Interest) to pool over. " - "ROIs should be a 2-D LoDTensor of shape (num_rois, 4) " + "ROIs should be a 2-D phi::DenseTensor of shape (num_rois, 4) " "given as [[x1, y1, x2, y2], ...]. " "(x1, y1) is the top left coordinates, and " "(x2, y2) is the bottom right coordinates."); @@ -149,7 +149,8 @@ class DeformablePSROIPoolOp : public framework::OperatorWithKernel { rois_dims.size(), 2, platform::errors::InvalidArgument( - "Input(ROIs) should be a 2-D LoDTensor of shape (num_rois, 4) " + "Input(ROIs) should be a 2-D phi::DenseTensor of shape (num_rois, " + "4) " "given as [[ x1, y1, x2, y2], ...]. The rank of Input(ROIs) should " "be 2, but received ROIs rank is:%d, ROIs shape is:[%s].", rois_dims.size(), diff --git a/paddle/fluid/operators/deformable_psroi_pooling_op.cu b/paddle/fluid/operators/deformable_psroi_pooling_op.cu index f1816850317a16a9ddfe54d49ebfc607c910d176..80d248b818b4f9781b84b8acee263e935227fa4b 100644 --- a/paddle/fluid/operators/deformable_psroi_pooling_op.cu +++ b/paddle/fluid/operators/deformable_psroi_pooling_op.cu @@ -40,7 +40,6 @@ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; using phi::PADDLE_CUDA_NUM_THREADS; static inline int GET_BLOCKS(const int N) { @@ -185,7 +184,7 @@ class DeformablePSROIPoolCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { const phi::DenseTensor* input = ctx.Input("Input"); - const LoDTensor* rois = ctx.Input("ROIs"); + const phi::DenseTensor* rois = ctx.Input("ROIs"); const phi::DenseTensor* trans = ctx.Input("Trans"); phi::DenseTensor* out = ctx.Output("Output"); out->mutable_data(ctx.GetPlace()); @@ -486,7 +485,7 @@ class DeformablePSROIPoolGradCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { const phi::DenseTensor* input = ctx.Input("Input"); - const LoDTensor* rois = ctx.Input("ROIs"); + const phi::DenseTensor* rois = ctx.Input("ROIs"); const phi::DenseTensor* trans = ctx.Input("Trans"); const phi::DenseTensor* top_count = ctx.Input("TopCount"); const phi::DenseTensor* output_grad = diff --git a/paddle/fluid/operators/deformable_psroi_pooling_op.h b/paddle/fluid/operators/deformable_psroi_pooling_op.h index 7af8c99aa376b94f229bd4428d5af8ce6b636147..231d14e537b547b37d5df8512b722538b70d02f5 100644 --- a/paddle/fluid/operators/deformable_psroi_pooling_op.h +++ b/paddle/fluid/operators/deformable_psroi_pooling_op.h @@ -34,7 +34,6 @@ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; template T bilinear_interp( @@ -80,7 +79,7 @@ void DeformablePSROIPoolForwardCPUKernel(const int count, T* top_count, const int batch_size, int* roi_batch_id_data, - const LoDTensor* rois) { + const phi::DenseTensor* rois) { for (int ix = 0; ix < count; ix++) { int pw = ix % pooled_width; int ph = (ix / pooled_width) % pooled_height; @@ -174,7 +173,7 @@ class DeformablePSROIPoolCPUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input("Input"); - auto* rois = ctx.Input("ROIs"); + auto* rois = ctx.Input("ROIs"); auto* trans = ctx.Input("Trans"); auto* out = ctx.Output("Output"); out->mutable_data(ctx.GetPlace()); @@ -316,7 +315,7 @@ void DeformablePSROIPoolBackwardAccCPUKernel(const int count, const int channels_each_class, const int batch_size, int* roi_batch_id_data, - const LoDTensor* rois) { + const phi::DenseTensor* rois) { for (int index = 0; index < count; index++) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; @@ -476,7 +475,7 @@ class DeformablePSROIPoolGradCPUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input("Input"); - auto* rois = ctx.Input("ROIs"); + auto* rois = ctx.Input("ROIs"); auto* trans = ctx.Input("Trans"); auto* top_count = ctx.Input("TopCount"); auto* output_grad = diff --git a/paddle/fluid/operators/dequeue_op.cc b/paddle/fluid/operators/dequeue_op.cc index 18216fb04d5cf6d6ade2c6d299768716f9551c00..2e954081ed7409cd5086a2ac47c617ec3d975ae8 100644 --- a/paddle/fluid/operators/dequeue_op.cc +++ b/paddle/fluid/operators/dequeue_op.cc @@ -20,7 +20,6 @@ #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/var_type.h" #include "paddle/fluid/operators/reader/lod_tensor_blocking_queue.h" -using LoDTensor = phi::DenseTensor; using LoDTensorBlockingQueueHolder = paddle::operators::reader::LoDTensorBlockingQueueHolder; @@ -59,7 +58,7 @@ class DequeueOp : public framework::OperatorBase { out_var, platform::errors::NotFound("No variable with name %s found", out_names[i])); - auto* out_tensor = out_var->GetMutable(); + auto* out_tensor = out_var->GetMutable(); PADDLE_ENFORCE_NOT_NULL( out_tensor, platform::errors::InvalidArgument( diff --git a/paddle/fluid/operators/detection_map_op.cc b/paddle/fluid/operators/detection_map_op.cc index 51fdd4ad1f2ec8d41c89224c3702cfb9e86d6995..5d3cccb3a66174c60205322f9681f0481f375812 100644 --- a/paddle/fluid/operators/detection_map_op.cc +++ b/paddle/fluid/operators/detection_map_op.cc @@ -103,7 +103,8 @@ class DetectionMAPOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { AddInput("DetectRes", - "(LoDTensor) A 2-D LoDTensor with shape [M, 6] represents the " + "(phi::DenseTensor) A 2-D phi::DenseTensor with shape [M, 6] " + "represents the " "detections. Each row has 6 values: " "[label, confidence, xmin, ymin, xmax, ymax], M is the total " "number of detect results in this mini-batch. For each instance, " @@ -111,7 +112,7 @@ class DetectionMAPOpMaker : public framework::OpProtoAndCheckerMaker { "offset is N + 1, if LoD[i + 1] - LoD[i] == 0, means there is " "no detected data."); AddInput("Label", - "(LoDTensor) A 2-D LoDTensor represents the" + "(phi::DenseTensor) A 2-D phi::DenseTensor represents the" "Labeled ground-truth data. Each row has 6 values: " "[label, xmin, ymin, xmax, ymax, is_difficult] or 5 values: " "[label, xmin, ymin, xmax, ymax], where N is the total " @@ -135,14 +136,16 @@ class DetectionMAPOpMaker : public framework::OpProtoAndCheckerMaker { "current mini-batch are calculated.") .AsDispensable(); AddInput("TruePos", - "(LoDTensor) A 2-D LoDTensor with shape [Ntp, 2], store the " + "(phi::DenseTensor) A 2-D phi::DenseTensor with shape [Ntp, 2], " + "store the " "input true positive example of each class." "This input is used to pass the AccumTruePos generated by the " "previous mini-batch when the multi mini-batches cumulative " "calculation carried out. ") .AsDispensable(); AddInput("FalsePos", - "(LoDTensor) A 2-D LoDTensor with shape [Nfp, 2], store the " + "(phi::DenseTensor) A 2-D phi::DenseTensor with shape [Nfp, 2], " + "store the " "input false positive example of each class." "This input is used to pass the AccumFalsePos generated by the " "previous mini-batch when the multi mini-batches cumulative " @@ -153,16 +156,18 @@ class DetectionMAPOpMaker : public framework::OpProtoAndCheckerMaker { "positive example count of each class. It combines the input " "input(PosCount) and the positive example count computed from " "input(Detection) and input(Label)."); - AddOutput("AccumTruePos", - "(LoDTensor) A LoDTensor with shape [Ntp', 2], store the " - "true positive example of each class. It combines the " - "input(TruePos) and the true positive examples computed from " - "input(Detection) and input(Label)."); - AddOutput("AccumFalsePos", - "(LoDTensor) A LoDTensor with shape [Nfp', 2], store the " - "false positive example of each class. It combines the " - "input(FalsePos) and the false positive examples computed from " - "input(Detection) and input(Label)."); + AddOutput( + "AccumTruePos", + "(phi::DenseTensor) A phi::DenseTensor with shape [Ntp', 2], store the " + "true positive example of each class. It combines the " + "input(TruePos) and the true positive examples computed from " + "input(Detection) and input(Label)."); + AddOutput( + "AccumFalsePos", + "(phi::DenseTensor) A phi::DenseTensor with shape [Nfp', 2], store the " + "false positive example of each class. It combines the " + "input(FalsePos) and the false positive examples computed from " + "input(Detection) and input(Label)."); AddOutput("MAP", "(Tensor) A tensor with shape [1], store the mAP evaluate " "result of the detection."); diff --git a/paddle/fluid/operators/edit_distance_op.cc b/paddle/fluid/operators/edit_distance_op.cc index 70de5a3bb7588ccbca84af43db25911a0cee5716..c4c5db6b50cdabc15c7c4dbd02dc7a85135d9cff 100644 --- a/paddle/fluid/operators/edit_distance_op.cc +++ b/paddle/fluid/operators/edit_distance_op.cc @@ -35,11 +35,11 @@ class EditDistanceOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { AddInput("Hyps", - "2-D Tensor, or 2-D LoDTensor with last " + "2-D Tensor, or 2-D phi::DenseTensor with last " "dimension being 1. " "The indices for hypothesis strings."); AddInput("Refs", - "2-D Tensor, or 2-D LoDTensor with last " + "2-D Tensor, or 2-D phi::DenseTensor with last " "dimension being 1. " "The indices for reference strings."); AddInput("HypsLength", @@ -75,7 +75,7 @@ insertion: So the edit distance between A and B is 3. -Input(Hyps) is a 2-D Tensor or a 2-D LoDTensor consisting of all the hypothesis strings. +Input(Hyps) is a 2-D Tensor or a 2-D phi::DenseTensor consisting of all the hypothesis strings. And the `batch_size` reference strings are arranged in order in the same way in the Input(Refs). diff --git a/paddle/fluid/operators/enqueue_op.cc b/paddle/fluid/operators/enqueue_op.cc index e4f2f70c72a4b0ebd82b48099ce5742131452069..c8279719789c4508ff36fd1dbaaa6c6cc3d7c203 100644 --- a/paddle/fluid/operators/enqueue_op.cc +++ b/paddle/fluid/operators/enqueue_op.cc @@ -31,7 +31,6 @@ class OpBase; } // namespace imperative } // namespace paddle -using LoDTensor = phi::DenseTensor; using LoDTensorBlockingQueueHolder = paddle::operators::reader::LoDTensorBlockingQueueHolder; @@ -61,7 +60,7 @@ class EnqueueOp : public framework::OperatorBase { PADDLE_ENFORCE_NOT_NULL(in_var, platform::errors::NotFound( "No variable with name %s found.", var_name)); - auto* in_tensor = in_var->GetMutable(); + auto* in_tensor = in_var->GetMutable(); auto* queue_holder = queue_holder_var->template GetMutable(); diff --git a/paddle/fluid/operators/fill_constant_batch_size_like_op_mlu.cc b/paddle/fluid/operators/fill_constant_batch_size_like_op_mlu.cc index 67812f5bc54b3e9a535d67b7449ec977d045dea1..34b760252bece7ac4f5e595bc4260948927e0eb4 100644 --- a/paddle/fluid/operators/fill_constant_batch_size_like_op_mlu.cc +++ b/paddle/fluid/operators/fill_constant_batch_size_like_op_mlu.cc @@ -32,7 +32,7 @@ class FillConstantBatchSizeLikeOpMLUKernel : public framework::OpKernel { auto *out = ctx.Output("Out"); auto *in = ctx.Input("Input"); if (in->lod().size() && ctx.Attr("input_dim_idx") == 0) { - // set the correct batch size for the LoDTensor. + // set the correct batch size for the phi::DenseTensor. auto odims = out->dims(); int output_dim_idx = ctx.Attr("output_dim_idx"); odims[output_dim_idx] = static_cast(in->lod().back().size()) - 1; diff --git a/paddle/fluid/operators/fill_constant_batch_size_like_op_npu.cc b/paddle/fluid/operators/fill_constant_batch_size_like_op_npu.cc index 339f36c53f96a0c6f0216afd5dfc603d66e7ddea..22df3e5a9d23a4978e823ea5c95f640cca8c09ce 100644 --- a/paddle/fluid/operators/fill_constant_batch_size_like_op_npu.cc +++ b/paddle/fluid/operators/fill_constant_batch_size_like_op_npu.cc @@ -35,7 +35,7 @@ class FillConstantBatchSizeLikeOpNPUKernel : public framework::OpKernel { auto *out = ctx.Output("Out"); auto *in = ctx.Input("Input"); if (in->lod().size() && ctx.Attr("input_dim_idx") == 0) { - // set the correct batch size for the LoDTensor. + // set the correct batch size for the phi::DenseTensor. auto odims = out->dims(); int output_dim_idx = ctx.Attr("output_dim_idx"); odims[output_dim_idx] = static_cast(in->lod().back().size()) - 1; diff --git a/paddle/fluid/operators/fill_op.cc b/paddle/fluid/operators/fill_op.cc index 8fe7b417e662e6aa9c55e4df5a76434800c1bc04..8937676c344ff8ce3920a4b1834b0efcfa3e1b5a 100644 --- a/paddle/fluid/operators/fill_op.cc +++ b/paddle/fluid/operators/fill_op.cc @@ -27,7 +27,7 @@ class FillOpMaker : public framework::OpProtoAndCheckerMaker { Fill an tensor with `value` and `shape`. The type of the tensor is specify by `dtype`. )DOC"); - AddOutput("Out", "(LoDTensor) The output tensor."); + AddOutput("Out", "(phi::DenseTensor) The output tensor."); AddAttr>( "value", "The float values of tensor, which are flatten in row major"); AddAttr>("shape", "The shape of output tensor"); diff --git a/paddle/fluid/operators/filter_by_instag_op.cc b/paddle/fluid/operators/filter_by_instag_op.cc index a0ac46c4a66030cfc6f13946827b293d00ba0230..808792468ff38d892087d9a08762c8a241e413a6 100644 --- a/paddle/fluid/operators/filter_by_instag_op.cc +++ b/paddle/fluid/operators/filter_by_instag_op.cc @@ -69,16 +69,17 @@ class FilterByInstagOp : public framework::OperatorWithKernel { class FilterByInstagOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddInput("Ins", "(LoDTensor) embeded tensor"); - AddInput("Ins_tag", "(LoDTensor) ins tag list"); + AddInput("Ins", "(phi::DenseTensor) embeded tensor"); + AddInput("Ins_tag", "(phi::DenseTensor) ins tag list"); AddInput("Filter_tag", "(1D Tensor) filter tag list"); AddAttr("is_lod", "is Ins with LoD info or not, default True"); AddAttr("out_val_if_empty", "if the output after filter is empty, the output value") .SetDefault(0); - AddOutput("Out", "(LoDTensor) embeded tensor filtered by instag"); + AddOutput("Out", "(phi::DenseTensor) embeded tensor filtered by instag"); AddOutput("LossWeight", "(Tensor) loss weight."); - AddOutput("IndexMap", "(LoDTensor) mapping from Out rows to X1 rows"); + AddOutput("IndexMap", + "(phi::DenseTensor) mapping from Out rows to X1 rows"); AddComment(R"DOC( Filter By Instag Op diff --git a/paddle/fluid/operators/filter_by_instag_op.cu b/paddle/fluid/operators/filter_by_instag_op.cu index 90bc2eda3c92cc70502880f110bc912df5edfa8e..56068684e16ce793e2663d28068291e17716d0ef 100644 --- a/paddle/fluid/operators/filter_by_instag_op.cu +++ b/paddle/fluid/operators/filter_by_instag_op.cu @@ -45,7 +45,6 @@ namespace operators { using Tensor = phi::DenseTensor; using SelectedRows = phi::SelectedRows; -using LoDTensor = phi::DenseTensor; template using Vector = framework::Vector; @@ -341,7 +340,7 @@ class FilterByInstagGPUKernel : public framework::OpKernel { // context.cuda_device_context().GetMaxThreadsPerBlock(); // X1 is global FC output // Dim [batch size, embedding size] - const LoDTensor* x1 = context.Input("Ins"); + const phi::DenseTensor* x1 = context.Input("Ins"); bool is_lod = context.Attr("is_lod"); int is_x1_lod = -1; @@ -354,7 +353,7 @@ class FilterByInstagGPUKernel : public framework::OpKernel { size_t x1_embed_size = x1->dims()[1]; // X2 is ins tag list // LoD [[0, Sum(ins1), Sum(ins1, ins2), ... ]] - const LoDTensor* x2 = context.Input("Ins_tag"); + const phi::DenseTensor* x2 = context.Input("Ins_tag"); // expected auto = const int64_t const int64_t* x2_data = x2->data(); @@ -389,7 +388,7 @@ class FilterByInstagGPUKernel : public framework::OpKernel { x1_lods.push_back(i + 1); } } else { - // x1_lods = context.Input("Ins")->lod()[0]; + // x1_lods = context.Input("Ins")->lod()[0]; // new: lod_level=0 => lod() return {} if (x1->lod().size() != 0) { // lod_level = 1 x1_lods = x1->lod()[0]; @@ -412,9 +411,10 @@ class FilterByInstagGPUKernel : public framework::OpKernel { // for those whose ins been dropout, set 0 for whole lines. // otherwise, copy whole line // Dim [local fc count, batch size, embedding size] - LoDTensor* out = context.Output("Out"); - LoDTensor* map = context.Output("IndexMap"); - LoDTensor* loss_weight = context.Output("LossWeight"); + phi::DenseTensor* out = context.Output("Out"); + phi::DenseTensor* map = context.Output("IndexMap"); + phi::DenseTensor* loss_weight = + context.Output("LossWeight"); int out_first = x1_lods.back(); @@ -563,13 +563,15 @@ class FilterByInstagGradGPUKernel : public framework::OpKernel { auto gpu_place = context.GetPlace(); gpuStream_t current_stream = context.cuda_device_context().stream(); auto max_thread_num_per_block = 1024; - auto* output_grad = context.Input(framework::GradVarName("Out")); - auto* x1_grad = context.Output(framework::GradVarName("Ins")); - auto* loss_weight = context.Input("LossWeight"); - auto* mmap = context.Input("IndexMap"); - auto* x1 = context.Input("Ins"); - - x1_grad->set_lod(context.Input("Ins")->lod()); + auto* output_grad = + context.Input(framework::GradVarName("Out")); + auto* x1_grad = + context.Output(framework::GradVarName("Ins")); + auto* loss_weight = context.Input("LossWeight"); + auto* mmap = context.Input("IndexMap"); + auto* x1 = context.Input("Ins"); + + x1_grad->set_lod(context.Input("Ins")->lod()); x1_grad->Resize(x1->dims()); auto* mmap_data = mmap->data(); diff --git a/paddle/fluid/operators/filter_by_instag_op.h b/paddle/fluid/operators/filter_by_instag_op.h index c5d4e35428f43d29046e72d4c6c8f660bb78965a..04f1099168a5ce9e01b8cf7553a6717a594cb64c 100644 --- a/paddle/fluid/operators/filter_by_instag_op.h +++ b/paddle/fluid/operators/filter_by_instag_op.h @@ -31,7 +31,6 @@ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; using SelectedRows = phi::SelectedRows; -using LoDTensor = phi::DenseTensor; template using Vector = framework::Vector; @@ -42,12 +41,12 @@ class FilterByInstagKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& context) const override { // X1 is global FC output // Dim [batch size, embedding size] - auto* x1 = context.Input("Ins"); + auto* x1 = context.Input("Ins"); bool is_x1_lod = context.Attr("is_lod"); int64_t out_val_if_empty = context.Attr("out_val_if_empty"); // X2 is ins tag list // LoD [[0, Sum(ins1), Sum(ins1, ins2), ... ]] - auto* x2 = context.Input("Ins_tag"); + auto* x2 = context.Input("Ins_tag"); // X3 is local fc tag list // LoD [[0, Sum(fc1), Sum(fc1, fc2) ...]] auto* x3 = context.Input("Filter_tag"); @@ -107,9 +106,10 @@ class FilterByInstagKernel : public framework::OpKernel { // for those whose ins been dropout, set 0 for whole lines. // otherwise, copy whole line // Dim [local fc count, batch size, embedding size] - LoDTensor* out = context.Output("Out"); - LoDTensor* map = context.Output("IndexMap"); - LoDTensor* loss_weight = context.Output("LossWeight"); + phi::DenseTensor* out = context.Output("Out"); + phi::DenseTensor* map = context.Output("IndexMap"); + phi::DenseTensor* loss_weight = + context.Output("LossWeight"); // expected auto = const T auto* x1_data = x1->data(); // expected auto = T @@ -196,12 +196,14 @@ template class FilterByInstagGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto* output_grad = context.Input(framework::GradVarName("Out")); - auto* x1_grad = context.Output(framework::GradVarName("Ins")); - auto* loss_weight = context.Input("LossWeight"); - auto* mmap = context.Input("IndexMap"); - auto* x1 = context.Input("Ins"); - x1_grad->set_lod(context.Input("Ins")->lod()); + auto* output_grad = + context.Input(framework::GradVarName("Out")); + auto* x1_grad = + context.Output(framework::GradVarName("Ins")); + auto* loss_weight = context.Input("LossWeight"); + auto* mmap = context.Input("IndexMap"); + auto* x1 = context.Input("Ins"); + x1_grad->set_lod(context.Input("Ins")->lod()); x1_grad->Resize(x1->dims()); auto mmap_data = mmap->data(); // expected auto = T diff --git a/paddle/fluid/operators/get_tensor_from_selected_rows_op.cc b/paddle/fluid/operators/get_tensor_from_selected_rows_op.cc index c96bc1a90255149f6dba99319f32f482649c15da..658352d844d9a21267a7da946a50aaace0591e89 100644 --- a/paddle/fluid/operators/get_tensor_from_selected_rows_op.cc +++ b/paddle/fluid/operators/get_tensor_from_selected_rows_op.cc @@ -35,13 +35,14 @@ class GetTensorFromSelectedRowsOp : public framework::OperatorWithKernel { "but the received is %s", ctx->Inputs("X").front(), ctx->GetInputsVarType("X").front())); - PADDLE_ENFORCE_EQ(ctx->GetOutputsVarType("Out").front(), - framework::proto::VarType::LOD_TENSOR, - platform::errors::InvalidArgument( - "The output Out(%s)'s type should be LoDTensor, " - "but the received is %s", - ctx->Outputs("Out").front(), - ctx->GetOutputsVarType("Out").front())); + PADDLE_ENFORCE_EQ( + ctx->GetOutputsVarType("Out").front(), + framework::proto::VarType::LOD_TENSOR, + platform::errors::InvalidArgument( + "The output Out(%s)'s type should be phi::DenseTensor, " + "but the received is %s", + ctx->Outputs("Out").front(), + ctx->GetOutputsVarType("Out").front())); ctx->SetOutputDim("Out", ctx->GetInputDim("X")); } @@ -72,7 +73,7 @@ class GetTensorFromSelectedRowsOpProtoMaker public: void Make() override { AddInput("X", "The input type is SelectedRows."); - AddOutput("Out", "The output type is LoDTensor."); + AddOutput("Out", "The output type is phi::DenseTensor."); AddComment( R"DOC( GetTensorFromSelectedRows Operator diff --git a/paddle/fluid/operators/group_norm_op.cc b/paddle/fluid/operators/group_norm_op.cc index f6f8cb5aff6d888253bd2d82d34d44fa38f3d159..3d6566d62b2a749aad388bc863e11cb21c76ab47 100644 --- a/paddle/fluid/operators/group_norm_op.cc +++ b/paddle/fluid/operators/group_norm_op.cc @@ -29,7 +29,6 @@ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; using DataLayout = phi::DataLayout; class GroupNormOp : public framework::OperatorWithKernel { @@ -127,8 +126,8 @@ class GroupNormGradOp : public framework::OperatorWithKernel { const Tensor *t = nullptr; if (var->IsType()) { t = &var->Get(); - } else if (var->IsType()) { - t = &var->Get(); + } else if (var->IsType()) { + t = &var->Get(); } PADDLE_ENFORCE_NOT_NULL( t, diff --git a/paddle/fluid/operators/group_norm_op.h b/paddle/fluid/operators/group_norm_op.h index 657892877fe8186833d55bca94bb1578621e90f8..0ce89b4625a131475f934572db0b2e30cd9124a6 100644 --- a/paddle/fluid/operators/group_norm_op.h +++ b/paddle/fluid/operators/group_norm_op.h @@ -29,7 +29,6 @@ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; using DataLayout = phi::DataLayout; template diff --git a/paddle/fluid/operators/gru_op.cc b/paddle/fluid/operators/gru_op.cc index fc78f514a45077ae028c1714a9fe252f49e36249..cceecdcad5fd2be1bcd6ae635a3102df9c4b4aad 100644 --- a/paddle/fluid/operators/gru_op.cc +++ b/paddle/fluid/operators/gru_op.cc @@ -115,11 +115,12 @@ class GRUOp : public framework::OperatorWithKernel { class GRUOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddInput("Input", - "(LoDTensor) The first input is a LodTensor, which supports " - "variable-time length input sequence. The underlying tensor in " - "this LoDTenosr is a matrix with shape (T X 3D), where, T is the " - "total time steps in this mini-batch, D is the hidden size."); + AddInput( + "Input", + "(phi::DenseTensor) The first input is a LodTensor, which supports " + "variable-time length input sequence. The underlying tensor in " + "this phi::DenseTensor is a matrix with shape (T X 3D), where, T is " + "the total time steps in this mini-batch, D is the hidden size."); AddInput("H0", "(Tensor, optional) The initial hidden state is an optional " "input. This is a tensor with shape (N x D), where N is the " @@ -136,35 +137,38 @@ class GRUOpMaker : public framework::OpProtoAndCheckerMaker { "(Tensor, optional) Bias vector with shape (1 x 3D) concating " "bias of the update gate, reset gate and output candidate.") .AsDispensable(); - AddOutput("BatchGate", - "(LoDTensor) To compute with batches, sequence data will be " - "reorganized into several successive batches each containing " - "data from the same time step. The LoDTensor BatchGate contains " - "the update gate, reset gate and output candidate values " - "organized in batches. The LoD size is 2. The first LoD contains " - "the batch offsets and the second LoD contains the indexes in " - "the raw sequence data.") + AddOutput( + "BatchGate", + "(phi::DenseTensor) To compute with batches, sequence data will be " + "reorganized into several successive batches each containing " + "data from the same time step. The phi::DenseTensor BatchGate contains " + "the update gate, reset gate and output candidate values " + "organized in batches. The LoD size is 2. The first LoD contains " + "the batch offsets and the second LoD contains the indexes in " + "the raw sequence data.") .AsIntermediate() .AsExtra(); - AddOutput( - "BatchResetHiddenPrev", - "(LoDTensor) The reset hidden state LoDTensor organized in batches. " - "This LoDTensor is a matrix with shape (T X D) and has the same LoD " - "with `BatchGate`.") + AddOutput("BatchResetHiddenPrev", + "(phi::DenseTensor) The reset hidden state phi::DenseTensor " + "organized in batches. " + "This phi::DenseTensor is a matrix with shape (T X D) and has " + "the same LoD " + "with `BatchGate`.") .AsIntermediate() .AsExtra(); - AddOutput( - "BatchHidden", - "(LoDTensor) The hidden state LoDTensor organized in batches. " - "This LoDTensor is a matrix with shape (T X D) and has the same LoD " - "with `BatchGate`.") + AddOutput("BatchHidden", + "(phi::DenseTensor) The hidden state phi::DenseTensor organized " + "in batches. " + "This phi::DenseTensor is a matrix with shape (T X D) and has " + "the same LoD " + "with `BatchGate`.") .AsIntermediate() .AsExtra(); - AddOutput( - "Hidden", - "(LoDTensor) the hidden state LoDTensor organized in sequences. " - "This LoDTensor is a matrix with shape (T X D) and has the same LoD " - "with `BatchGate`."); + AddOutput("Hidden", + "(phi::DenseTensor) the hidden state phi::DenseTensor organized " + "in sequences. " + "This phi::DenseTensor is a matrix with shape (T X D) and has " + "the same LoD with `BatchGate`."); AddAttr("activation", "(string, default tanh) " "The activation type used for output candidate {h}_t.") @@ -314,23 +318,24 @@ class GRUCPUKernel : public framework::OpKernel { public: void BatchCompute(const framework::ExecutionContext& context) const { using DeviceContext = phi::CPUContext; - using LodTensorPtr = LoDTensor*; + using LodTensorPtr = phi::DenseTensor*; bool is_test = context.Attr("is_test"); bool origin_mode = context.Attr("origin_mode"); - auto* input = context.Input("Input"); + auto* input = context.Input("Input"); auto* h0 = context.Input("H0"); auto* weight = context.Input("Weight"); const T* weight_data = weight->data(); auto* bias = context.Input("Bias"); - auto* hidden = context.Output("Hidden"); + auto* hidden = context.Output("Hidden"); hidden->mutable_data(context.GetPlace()); auto input_dims = input->dims(); auto hidden_dims = hidden->dims(); LodTensorPtr batch_gate, batch_reset_hidden_prev, batch_hidden; - LoDTensor batch_gate_tmp, batch_reset_hidden_prev_tmp, batch_hidden_tmp; + phi::DenseTensor batch_gate_tmp, batch_reset_hidden_prev_tmp, + batch_hidden_tmp; if (is_test) { batch_gate = &batch_gate_tmp; batch_gate->Resize(input_dims); @@ -341,10 +346,10 @@ class GRUCPUKernel : public framework::OpKernel { batch_hidden = &batch_hidden_tmp; batch_hidden->Resize(hidden_dims); } else { - batch_gate = context.Output("BatchGate"); - batch_hidden = context.Output("BatchHidden"); + batch_gate = context.Output("BatchGate"); + batch_hidden = context.Output("BatchHidden"); batch_reset_hidden_prev = - context.Output("BatchResetHiddenPrev"); + context.Output("BatchResetHiddenPrev"); } batch_gate->mutable_data(context.GetPlace()); batch_reset_hidden_prev->mutable_data(context.GetPlace()); diff --git a/paddle/fluid/operators/gru_op.cu.cc b/paddle/fluid/operators/gru_op.cu.cc index 2d63eb4d3a698dc8394b03184cc6efd1cf09709a..a6b57bd88f77d29fc741f1397d13eb125f5609cb 100644 --- a/paddle/fluid/operators/gru_op.cu.cc +++ b/paddle/fluid/operators/gru_op.cu.cc @@ -21,23 +21,24 @@ template class GRUKernel : public framework::OpKernel { public: void BatchCompute(const framework::ExecutionContext& context) const { - using LodTensorPtr = LoDTensor*; + using LodTensorPtr = phi::DenseTensor*; bool is_test = context.Attr("is_test"); bool origin_mode = context.Attr("origin_mode"); - auto* input = context.Input("Input"); + auto* input = context.Input("Input"); auto* h0 = context.Input("H0"); auto* weight = context.Input("Weight"); const T* weight_data = weight->data(); auto* bias = context.Input("Bias"); - auto* hidden = context.Output("Hidden"); + auto* hidden = context.Output("Hidden"); hidden->mutable_data(context.GetPlace()); auto input_dims = input->dims(); auto hidden_dims = hidden->dims(); LodTensorPtr batch_gate, batch_reset_hidden_prev, batch_hidden; - LoDTensor batch_gate_tmp, batch_reset_hidden_prev_tmp, batch_hidden_tmp; + phi::DenseTensor batch_gate_tmp, batch_reset_hidden_prev_tmp, + batch_hidden_tmp; if (is_test) { batch_gate = &batch_gate_tmp; batch_gate->Resize(input_dims); @@ -48,10 +49,10 @@ class GRUKernel : public framework::OpKernel { batch_hidden = &batch_hidden_tmp; batch_hidden->Resize(hidden_dims); } else { - batch_gate = context.Output("BatchGate"); - batch_hidden = context.Output("BatchHidden"); + batch_gate = context.Output("BatchGate"); + batch_hidden = context.Output("BatchHidden"); batch_reset_hidden_prev = - context.Output("BatchResetHiddenPrev"); + context.Output("BatchResetHiddenPrev"); } batch_gate->mutable_data(context.GetPlace()); batch_reset_hidden_prev->mutable_data(context.GetPlace()); diff --git a/paddle/fluid/operators/gru_op.h b/paddle/fluid/operators/gru_op.h index e050c42a0ec545955dc17ced2627b3f00784b298..89731e2efa0228e79e1620656085835307d7e490 100644 --- a/paddle/fluid/operators/gru_op.h +++ b/paddle/fluid/operators/gru_op.h @@ -25,7 +25,6 @@ limitations under the License. */ namespace paddle { namespace operators { -using LoDTensor = phi::DenseTensor; using Tensor = phi::DenseTensor; template @@ -47,15 +46,15 @@ class GRUGradKernel : public framework::OpKernel { auto* h0 = context.Input("H0"); auto* weight = context.Input("Weight"); const T* weight_data = weight->data(); - auto* batch_gate = context.Input("BatchGate"); + auto* batch_gate = context.Input("BatchGate"); auto* batch_reset_hidden_prev = - context.Input("BatchResetHiddenPrev"); - auto* batch_hidden = context.Input("BatchHidden"); - auto* hidden = context.Input("Hidden"); + context.Input("BatchResetHiddenPrev"); + auto* batch_hidden = context.Input("BatchHidden"); + auto* hidden = context.Input("Hidden"); auto* hidden_grad = - context.Input(framework::GradVarName("Hidden")); + context.Input(framework::GradVarName("Hidden")); auto* input_grad = - context.Output(framework::GradVarName("Input")); + context.Output(framework::GradVarName("Input")); auto* h0_grad = context.Output(framework::GradVarName("H0")); auto* weight_grad = @@ -68,7 +67,8 @@ class GRUGradKernel : public framework::OpKernel { int frame_size = hidden_dims[1]; phi::funcs::LoDTensor2BatchFunctor to_batch; - LoDTensor batch_hidden_grad, batch_gate_grad, batch_reset_hidden_prev_grad; + phi::DenseTensor batch_hidden_grad, batch_gate_grad, + batch_reset_hidden_prev_grad; batch_hidden_grad.mutable_data(hidden_dims, context.GetPlace()); batch_gate_grad.mutable_data(gate_dims, context.GetPlace()); batch_reset_hidden_prev_grad.mutable_data(hidden_dims, diff --git a/paddle/fluid/operators/hierarchical_sigmoid_op.cc b/paddle/fluid/operators/hierarchical_sigmoid_op.cc index 8193be6b6b8e1971ef94021dd9f49f0bbb3c900a..7255abcb7b4b63f96008d182f597c30e1724f55c 100644 --- a/paddle/fluid/operators/hierarchical_sigmoid_op.cc +++ b/paddle/fluid/operators/hierarchical_sigmoid_op.cc @@ -82,43 +82,44 @@ class HierarchicalSigmoidOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { AddInput("X", - "(LoDTensor, required) The input tensor with shape [N, D], " + "(phi::DenseTensor, required) The input tensor with shape [N, D], " "where N is the size of mini-batch, and D is the feature size."); AddInput("W", - "(LoDTensor, required), The parameters of hierarchical " + "(phi::DenseTensor, required), The parameters of hierarchical " "sigmoid operator, each of them is a 2-D tensor, the shape is" "[K, D]. Which K is the num of non-leaf node in Path Tree"); AddInput("Label", - "(LoDTensor, required), The labels of training data. It's a" + "(phi::DenseTensor, required), The labels of training data. It's a" "tensor with shape [N, 1]."); - AddInput("PathTable", - "(LoDTensor, optional), The Path Table from root to current word" - "it should have shape like [N, L], L is the length of the Path") - .AsDispensable(); AddInput( - "PathCode", - "(LoDTensor, optional), The Code on each Node of the Path from root " - "to current word" + "PathTable", + "(phi::DenseTensor, optional), The Path Table from root to current word" "it should have shape like [N, L], L is the length of the Path") .AsDispensable(); + AddInput("PathCode", + "(phi::DenseTensor, optional), The Code on each Node of the Path " + "from root " + "to current word" + "it should have shape like [N, L], L is the length of the Path") + .AsDispensable(); AddInput("Bias", - "(LoDTensor, optional), The bias is a tensor with shape or " + "(phi::DenseTensor, optional), The bias is a tensor with shape or " "[num_classes, 1]" "[num_classes - 1, 1].") .AsDispensable(); - AddOutput( - "Out", - "(LoDTensor, required) The output of hierarchical sigmoid operator." - "The shape is [N, 1]."); + AddOutput("Out", + "(phi::DenseTensor, required) The output of hierarchical sigmoid " + "operator." + "The shape is [N, 1]."); AddOutput("PreOut", - "(LoDTensor, required) A intermedia 2-D tensor with shape " + "(phi::DenseTensor, required) A intermedia 2-D tensor with shape " "[batch_size, code_length], where code_length represents the " "maximum path length from root to leaf nodes.") .AsIntermediate(); - AddOutput( - "W_Out", - "(LoDTensor, optional) using input 'W' as Output to make it mutable" - "When we are using prefetch") + AddOutput("W_Out", + "(phi::DenseTensor, optional) using input 'W' as Output to make " + "it mutable" + "When we are using prefetch") .AsIntermediate(); AddAttr("num_classes", "(int, optional), The number of classes") .SetDefault(2); @@ -227,7 +228,8 @@ class HierarchicalSigmoidGradOpGradVarTypeInference auto bias_grad_var_name = framework::GradVarName("Bias"); if (ctx->HasOutput(bias_grad_var_name)) { VLOG(3) << "hierarchical_sigmoid_grad op " - << framework::GradVarName("Bias") << " is set to LoDTensor"; + << framework::GradVarName("Bias") + << " is set to phi::DenseTensor"; ctx->SetOutputType(bias_grad_var_name, framework::proto::VarType::LOD_TENSOR); } @@ -241,7 +243,7 @@ class HierarchicalSigmoidGradOpGradVarTypeInference framework::proto::VarType::SELECTED_ROWS); } else { VLOG(3) << "hierarchical_sigmoid_grad op " << framework::GradVarName("W") - << " is set to LoDTensor"; + << " is set to phi::DenseTensor"; ctx->SetOutputType(w_grad_var_name, framework::proto::VarType::LOD_TENSOR); } diff --git a/paddle/fluid/operators/im2sequence_op.h b/paddle/fluid/operators/im2sequence_op.h index afb4db0f3c633b033c0b66cea8f07a3308ecfe50..a9da8f8f4dbbc79e6efad7d59aeb04fd38a1f17c 100644 --- a/paddle/fluid/operators/im2sequence_op.h +++ b/paddle/fluid/operators/im2sequence_op.h @@ -27,7 +27,6 @@ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; inline int Im2SeqOutputSize( int input_size, int filter_size, int padding_0, int padding_1, int stride) { @@ -41,7 +40,7 @@ class Im2SequenceKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { const phi::DenseTensor* in = ctx.Input("X"); - LoDTensor* out = ctx.Output("Out"); + phi::DenseTensor* out = ctx.Output("Out"); auto in_dim = in->dims(); int batch_size = in_dim[0]; int img_channels = in_dim[1]; diff --git a/paddle/fluid/operators/index_select_op.h b/paddle/fluid/operators/index_select_op.h index 71dd10d0aa42bd22d2d7e292e77d5a32d78ae96a..a705a95156608cbb8020201b67eaf8209a66a5b5 100644 --- a/paddle/fluid/operators/index_select_op.h +++ b/paddle/fluid/operators/index_select_op.h @@ -23,21 +23,20 @@ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; using DDim = framework::DDim; template void IndexSelectInner(const framework::ExecutionContext& context, - LoDTensor* input, - const LoDTensor& index, - LoDTensor* output, + phi::DenseTensor* input, + const phi::DenseTensor& index, + phi::DenseTensor* output, int dim) { auto input_dim = input->dims(); auto input_dim_size = input_dim.size(); auto output_dim = output->dims(); auto index_size = index.dims()[0]; - LoDTensor index_cpu_copy; + phi::DenseTensor index_cpu_copy; if (!platform::is_cpu_place(index.place())) { framework::TensorCopySync(index, platform::CPUPlace(), &index_cpu_copy); } @@ -127,9 +126,9 @@ struct IndexSelectAdd< template void IndexSelectGradInner(const framework::ExecutionContext& context, - const LoDTensor& out_grad, - const LoDTensor& index, - LoDTensor* x_grad, + const phi::DenseTensor& out_grad, + const phi::DenseTensor& index, + phi::DenseTensor* x_grad, int dim) { const T* input_data = out_grad.data(); const IndexT* index_data = index.data(); diff --git a/paddle/fluid/operators/inplace_abn_op.cc b/paddle/fluid/operators/inplace_abn_op.cc index 61379a3d893ea7deb58b9803974d6e3af97f0eb5..53453c6cad184a88d9f2dd9c3ce7f0fb71f5ebc1 100644 --- a/paddle/fluid/operators/inplace_abn_op.cc +++ b/paddle/fluid/operators/inplace_abn_op.cc @@ -147,8 +147,8 @@ class InplaceABNGradOp : public paddle::operators::BatchNormGradOp { const phi::DenseTensor* t = nullptr; if (var->IsType()) { t = &var->Get(); - } else if (var->IsType()) { - t = &var->Get(); + } else if (var->IsType()) { + t = &var->Get(); } if (t == nullptr) { PADDLE_THROW( diff --git a/paddle/fluid/operators/instance_norm_op.cc b/paddle/fluid/operators/instance_norm_op.cc index ae4da5c51a08890737bdea202968e1c59f88a0a6..ed474193461c395b25a5eb9019bda3cb1faceb16 100644 --- a/paddle/fluid/operators/instance_norm_op.cc +++ b/paddle/fluid/operators/instance_norm_op.cc @@ -108,8 +108,8 @@ framework::OpKernelType InstanceNormGradOp::GetExpectedKernelType( const Tensor *t = nullptr; if (var->IsType()) { t = &var->Get(); - } else if (var->IsType()) { - t = &var->Get(); + } else if (var->IsType()) { + t = &var->Get(); } if (t == nullptr) { PADDLE_THROW( @@ -129,8 +129,8 @@ framework::OpKernelType InstanceNormDoubleGradOp::GetExpectedKernelType( const Tensor *t = nullptr; if (var->IsType()) { t = &var->Get(); - } else if (var->IsType()) { - t = &var->Get(); + } else if (var->IsType()) { + t = &var->Get(); } if (t == nullptr) { PADDLE_THROW( diff --git a/paddle/fluid/operators/instance_norm_op.h b/paddle/fluid/operators/instance_norm_op.h index da6bb74ac56bc23c792e17517bea7c17ca7c47f2..2101f6a12bb53c5072547206302bac462d48db35 100644 --- a/paddle/fluid/operators/instance_norm_op.h +++ b/paddle/fluid/operators/instance_norm_op.h @@ -23,7 +23,6 @@ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; using DataLayout = phi::DataLayout; class InstanceNormOp : public framework::OperatorWithKernel { diff --git a/paddle/fluid/operators/label_smooth_op_mlu.cc b/paddle/fluid/operators/label_smooth_op_mlu.cc index 34293fd8fc6edfde5b14d25b94256f99e70e8c68..211ffc7fb2cd6069a1644f317dcee6885183ff7f 100644 --- a/paddle/fluid/operators/label_smooth_op_mlu.cc +++ b/paddle/fluid/operators/label_smooth_op_mlu.cc @@ -19,15 +19,14 @@ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; template class LabelSmoothMLUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - auto* in_t = ctx.Input("X"); + auto* in_t = ctx.Input("X"); auto* dist_t = ctx.Input("PriorDist"); - auto* out_t = ctx.Output("Out"); + auto* out_t = ctx.Output("Out"); auto epsilon = ctx.Attr("epsilon"); auto epsilon_gt = 1.0f - epsilon; diff --git a/paddle/fluid/operators/label_smooth_op_npu.cc b/paddle/fluid/operators/label_smooth_op_npu.cc index f6c96357e8ec2bb0d5aa5a2fe0959e0b34f0f571..529e8564cb19bf92d4cc877038eb7d8d72b3be74 100644 --- a/paddle/fluid/operators/label_smooth_op_npu.cc +++ b/paddle/fluid/operators/label_smooth_op_npu.cc @@ -19,7 +19,6 @@ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; template void LabelSmoothMuls(const platform::Place& place, @@ -58,8 +57,8 @@ template class LabelSmoothNPUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - auto* out_t = ctx.Output("Out"); - auto* in_t = ctx.Input("X"); + auto* out_t = ctx.Output("Out"); + auto* in_t = ctx.Input("X"); auto* dist_t = ctx.Input("PriorDist"); auto epsilon = ctx.Attr("epsilon"); diff --git a/paddle/fluid/operators/layer_norm_op.cc b/paddle/fluid/operators/layer_norm_op.cc index 30ddc3bbe224949a876b957a173700a053a36867..1081df4166aacb9c5251d0e2c0580e2ebc8c923b 100644 --- a/paddle/fluid/operators/layer_norm_op.cc +++ b/paddle/fluid/operators/layer_norm_op.cc @@ -21,7 +21,6 @@ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; using DataLayout = phi::DataLayout; class LayerNormOp : public framework::OperatorWithKernel { @@ -214,8 +213,8 @@ class LayerNormGradOp : public framework::OperatorWithKernel { const Tensor *t = nullptr; if (var->IsType()) { t = &var->Get(); - } else if (var->IsType()) { - t = &var->Get(); + } else if (var->IsType()) { + t = &var->Get(); } PADDLE_ENFORCE_NOT_NULL( t, platform::errors::NotFound("Y@GRAD of LayerNorm Op is not found.")); diff --git a/paddle/fluid/operators/limit_by_capacity_op.cu b/paddle/fluid/operators/limit_by_capacity_op.cu index f6e0bffa1d1ce9e0386129aadf5be148c97177ee..28ae524e0a4f9f0f3f5da9040aec28b7c88370bb 100644 --- a/paddle/fluid/operators/limit_by_capacity_op.cu +++ b/paddle/fluid/operators/limit_by_capacity_op.cu @@ -28,7 +28,6 @@ namespace paddle { namespace operators { -using LoDTensor = phi::DenseTensor; using Tensor = phi::DenseTensor; template diff --git a/paddle/fluid/operators/linear_chain_crf_op.cc b/paddle/fluid/operators/linear_chain_crf_op.cc index 99c10e868a396e8f9d5787ec915b3e48484f3611..64fe6562a6c7d9c0d17a194fc9f9142af4b563e9 100644 --- a/paddle/fluid/operators/linear_chain_crf_op.cc +++ b/paddle/fluid/operators/linear_chain_crf_op.cc @@ -23,23 +23,24 @@ class LinearChainCRFOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { AddInput("Emission", - "(LoDTensor/Tensor). When a LoDTensor input,A 2-D LoDTensor" + "(phi::DenseTensor). When a phi::DenseTensor " + "input,A 2-D phi::DenseTensor" " with shape [N x D], where N is the size of the " "mini-batch and D is the total tag number. The unscaled emission " "weight matrix for the linear chain CRF. When a Tensor input," "A Tensor with shape [N x S x D], where N is batch number," "S is max length of sequences, D is the total tag number." - "A LoDTensor or Tensor with type float32, float64."); + "A phi::DenseTensor with type float32, float64."); AddInput("Transition", "(Tensor, default Tensor) A 2-D Tensor with shape " "[(D + 2) x D]. The learnable parameter for the linear_chain_crf " "operator. See more details in the operator's comments."); AddInput("Label", - "(LoDTensor/Tensor), when a LoDTensor input, " + "(phi::DenseTensor), when a phi::DenseTensor input, " "[N x 1], where N is the total element number in a mini-batch. " "when a Tensor input, [N x S], where N is batch number. " "S is max length of sequences. The ground truth." - "A LoDTensor or Tensor with int64."); + "A phi::DenseTensor with int64."); AddInput("Length", "(Tensor, default Tensor) A Tensor with shape " "[M x 1], where M is the sequence number in a mini-batch." @@ -63,7 +64,7 @@ class LinearChainCRFOpMaker : public framework::OpProtoAndCheckerMaker { "The exponentials of Input(Emission). This is an intermediate " "computational result in forward computation, and will be reused in " "backward computation." - "A LoDTensor or Tensor with type float32, float64.") + "A phi::DenseTensor with type float32, float64.") .AsIntermediate(); AddOutput( "TransitionExps", @@ -71,7 +72,7 @@ class LinearChainCRFOpMaker : public framework::OpProtoAndCheckerMaker { "[(D + 2) x D]. The exponentials of Input(Transition). This is an " "intermediate computational result in forward computation, and " "will be reused in backward computation." - "A LoDTensor or Tensor with type float32, float64.") + "A phi::DenseTensor with type float32, float64.") .AsIntermediate(); AddOutput( "LogLikelihood", diff --git a/paddle/fluid/operators/linear_chain_crf_op.h b/paddle/fluid/operators/linear_chain_crf_op.h index bda310c31fff06d64dcc067e088284e0e57151fe..bf68c7298e72a72010dca7d5e1ee3ecd12b99170 100644 --- a/paddle/fluid/operators/linear_chain_crf_op.h +++ b/paddle/fluid/operators/linear_chain_crf_op.h @@ -47,7 +47,6 @@ struct ScalarMul { }; using framework::LoD; -using LoDTensor = phi::DenseTensor; template class LinearChainCRFOpKernel : public framework::OpKernel { @@ -114,7 +113,7 @@ class LinearChainCRFOpKernel : public framework::OpKernel { phi::funcs::set_constant(ctx.device_context(), emission_exps, 0.0); phi::funcs::set_constant(ctx.device_context(), alpha, 0.0); } else { - in_lod = ctx.Input("Label")->lod(); + in_lod = ctx.Input("Label")->lod(); PADDLE_ENFORCE_NE(in_lod.size(), 0, platform::errors::InvalidArgument( @@ -286,7 +285,7 @@ class LinearChainCRFGradOpKernel : public framework::OpKernel { emission_exps_tmp.Resize( {emission_dims[0] * emission_dims[1], emission_dims[2]}); } else { - in_lod = ctx.Input("Label")->lod(); + in_lod = ctx.Input("Label")->lod(); PADDLE_ENFORCE_NE(in_lod.size(), 0, platform::errors::InvalidArgument( diff --git a/paddle/fluid/operators/load_combine_op.cc b/paddle/fluid/operators/load_combine_op.cc index 94bfc44977fb34e72515abc2acfbd92a48687222..78c06e8c24a000933bcb6b39dc0fe4eb5d2760ec 100644 --- a/paddle/fluid/operators/load_combine_op.cc +++ b/paddle/fluid/operators/load_combine_op.cc @@ -62,7 +62,7 @@ class LoadCombineOpProtoMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( LoadCombine Operator. -LoadCombine operator loads LoDTensor variables from a file, which could be +LoadCombine operator loads phi::DenseTensor variables from a file, which could be loaded in memory already. The file should contain one or more LoDTensors serialized using the SaveCombine operator. The LoadCombine operator applies a deserialization strategy to appropriately load diff --git a/paddle/fluid/operators/load_op.cc b/paddle/fluid/operators/load_op.cc index d39beb9266a7e84d9a8f0139b101a94e612b6942..0c66dbd36568f7f844880305b91a52d67e5fbb34 100644 --- a/paddle/fluid/operators/load_op.cc +++ b/paddle/fluid/operators/load_op.cc @@ -37,7 +37,7 @@ class LoadOp : public framework::OperatorWithKernel { class LoadOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddOutput("Out", "The LoDTensor / SelectedRows need to be loaded"); + AddOutput("Out", "The phi::DenseTensor / SelectedRows need to be loaded"); AddAttr( "load_as_fp16", "If true, the tensor will be first loaded and then " @@ -54,7 +54,8 @@ class LoadOpProtoMaker : public framework::OpProtoAndCheckerMaker { "(vector) The shape of the output") .SetDefault({}); AddComment( - "Load operator will load a LoDTensor / SelectedRows variable from " + "Load operator will load a phi::DenseTensor / SelectedRows variable " + "from " "disk " "file."); } diff --git a/paddle/fluid/operators/load_op_npu.cc b/paddle/fluid/operators/load_op_npu.cc index 4efe67d36c5ca621cfaf874cdea9de0072f82657..8c00f0868300a0c0b0d5c04011b174f3de13c75e 100644 --- a/paddle/fluid/operators/load_op_npu.cc +++ b/paddle/fluid/operators/load_op_npu.cc @@ -54,7 +54,8 @@ class LoadOpKernel : public framework::OpKernel { LoadSelectedRows(fin, place, out_var); } else { PADDLE_THROW(platform::errors::InvalidArgument( - "Load operator only supports loading LoDTensor and SelectedRows " + "Load operator only supports loading phi::DenseTensor and " + "SelectedRows " "variable, %s has wrong type", out_var_name)); } diff --git a/paddle/fluid/operators/lod_rank_table_op.cc b/paddle/fluid/operators/lod_rank_table_op.cc index cffb76010761cc02347212f918d4be006c077300..a399ad4527ff84e526ac295dc475540eb89650bc 100644 --- a/paddle/fluid/operators/lod_rank_table_op.cc +++ b/paddle/fluid/operators/lod_rank_table_op.cc @@ -52,13 +52,14 @@ class LoDRankTableOp : public framework::OperatorBase { class LoDRankTableOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddInput("X", - "(LoDTensor) input lod tensor, must contain lod information."); + AddInput( + "X", + "(phi::DenseTensor) input lod tensor, must contain lod information."); AddOutput("Out", "(LoDRankTable) The rank table of specific level."); AddAttr("level", "(int) the specific lod level to rank.") .SetDefault(0) .EqualGreaterThan(0); - AddComment(R"DOC(Create LoDRanTable by LoDTensor + AddComment(R"DOC(Create LoDRanTable by phi::DenseTensor LoD Rank Table stores the `level` of `lod` which is ordered by sequence length in descending order. It is useful when implement dynamic RNN and is diff --git a/paddle/fluid/operators/lod_reset_op.cc b/paddle/fluid/operators/lod_reset_op.cc index 1e03bb806f1925c79cdf8f11192d278dbe3a904f..374bb8920fbbd5e5304d7a04c6d9738084b30c7a 100644 --- a/paddle/fluid/operators/lod_reset_op.cc +++ b/paddle/fluid/operators/lod_reset_op.cc @@ -105,18 +105,20 @@ class LoDResetOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { AddInput("X", - "(Tensor, LoDTensor) Input variable of LoDResetOp which " - "could be a Tensor or LoDTensor, where the data of output " + "(Tensor, phi::DenseTensor) Input variable of LoDResetOp which " + "could be a Tensor or phi::DenseTensor, where the data of output " "variable inherits from."); AddInput("Y", - "(Tensor, LoDTensor, optional) If provided and Y is LoDTensor, " + "(phi::DenseTensor, optional) If provided and Y is " + "phi::DenseTensor, " "lod of Input(Y) would be considered as the target lod first, " "otherwise data of Input(Y) would be considered as the " "target lod.") .AsDispensable(); - AddOutput("Out", - "(LoDTensor) Output variable of LoDResetOp which should be a " - "LoDTensor."); + AddOutput( + "Out", + "(phi::DenseTensor) Output variable of LoDResetOp which should be a " + "phi::DenseTensor."); AddAttr>("target_lod", "The target level 0 LoD from Attr().") .SetDefault(std::vector{}); @@ -124,7 +126,7 @@ class LoDResetOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC(LoDReset operator Set LoD of `X` to a new one specified by `Y` or attribute `target_lod`. When `Y` -provided and `Y` is a LoDTensor, `Y.lod` would be considered as target LoD +provided and `Y` is a phi::DenseTensor, `Y.lod` would be considered as target LoD first, otherwise `Y.data` would be considered as target LoD. If `Y` is not provided, target LoD should be specified by attribute `target_lod`. If target LoD is specified by `Y.data` or `target_lod`, only one level LoD @@ -132,7 +134,7 @@ is supported. Example 1: -Given a 1-level LoDTensor input(X): +Given a 1-level phi::DenseTensor input(X): X.lod = [[ 0, 2, 5 6 ]] X.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] X.dims = [6, 1] @@ -146,7 +148,7 @@ then we get a 1-level LoDTensor: Example 2: -Given a 1-level LoDTensor input(X): +Given a 1-level phi::DenseTensor input(X): X.lod = [[ 0, 2, 5 6 ]] X.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] X.dims = [6, 1] @@ -162,7 +164,7 @@ then we get a 1-level LoDTensor: Example 3: -Given a 1-level LoDTensor input(X): +Given a 1-level phi::DenseTensor input(X): X.lod = [[ 0, 2, 5 6 ]] X.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] X.dims = [6, 1] diff --git a/paddle/fluid/operators/lod_tensor_to_array_op.cc b/paddle/fluid/operators/lod_tensor_to_array_op.cc index f3c26a9121d63f883ac035268c22082158bd0318..a736385a1401e35ccab6718ef4b646678cce399f 100644 --- a/paddle/fluid/operators/lod_tensor_to_array_op.cc +++ b/paddle/fluid/operators/lod_tensor_to_array_op.cc @@ -125,11 +125,11 @@ class LoDTensorToArrayOp : public framework::OperatorBase { PADDLE_ENFORCE_LT( rank_level, x.lod().size(), - platform::errors::InvalidArgument( - "Input should be a LoDTensor, and its lod_level should be at " - "least %d, but given is %d.", - rank_level + 1, - x.lod().size())); + platform::errors::InvalidArgument("Input should be a phi::DenseTensor, " + "and its lod_level should be at " + "least %d, but given is %d.", + rank_level + 1, + x.lod().size())); out.resize(max_seq_len); std::vector> copy_ranges(max_seq_len); @@ -189,14 +189,15 @@ class LoDTensorToArrayOp : public framework::OperatorBase { class LoDTensorToArrayOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddInput("X", - "(LoDTensor), the input lod tensor is a minibatch of sequences, " - "and will be split to a tensor_array according to " - "Input(RankTable)."); + AddInput( + "X", + "(phi::DenseTensor), the input lod tensor is a minibatch of sequences, " + "and will be split to a tensor_array according to " + "Input(RankTable)."); AddInput("RankTable", "(LoDRankTable), the rank table."); AddOutput("Out", "(LoDTensorArray), the result tensor_array, which is actually a " - "std::vector."); + "std::vector."); AddComment(R"DOC(LoDTensorToArray operator. Input(X) is a minibatch of sequences. Input(RankTable) stores the order of the input sequences. The lod_tensor_to_array operator will spilt the input sequences to a tensor_array, with each @@ -234,9 +235,9 @@ class LoDTensorToArrayInferShape : public framework::InferShapeBase { // kernel implementation. context->SetOutputDim("Out", x_dim); - // The output LoDTensor's lod_level should be input X's lod_level - 1. - // For compile time, we call SetLoDLevel to set output's lod_level. - // For runtime, output LoDTensor's lod is determined by input X's lod and + // The output phi::DenseTensor's lod_level should be input X's lod_level + // - 1. For compile time, we call SetLoDLevel to set output's lod_level. For + // runtime, output phi::DenseTensor's lod is determined by input X's lod and // the level specified by input RandTable. // We cannot get X's detail lod and RankTable's level in this function, so // leave this work to the detail kernel implementation. diff --git a/paddle/fluid/operators/lookup_table_dequant_op.h b/paddle/fluid/operators/lookup_table_dequant_op.h index 05d74855f5d477ad3d390e8fcf573b45fb017483..3f9ec485ce4f819ed9f26d3d4ce3b5e7b3717c85 100644 --- a/paddle/fluid/operators/lookup_table_dequant_op.h +++ b/paddle/fluid/operators/lookup_table_dequant_op.h @@ -28,7 +28,6 @@ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; using SelectedRows = phi::SelectedRows; using DDim = framework::DDim; @@ -52,8 +51,8 @@ template class LookupTableDequantKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { - auto *ids_t = context.Input("Ids"); // int tensor - auto *output_t = context.Output("Out"); // float tensor + auto *ids_t = context.Input("Ids"); // int tensor + auto *output_t = context.Output("Out"); // float tensor auto *table_var = context.InputVar("W"); auto id_name = context.InputNames("Ids").front(); @@ -66,9 +65,9 @@ class LookupTableDequantKernel : public framework::OpKernel { PADDLE_ENFORCE_GE( table_var->Type(), - framework::VarTypeTrait::kId, + framework::VarTypeTrait::kId, platform::errors::InvalidArgument("lookup table must be LodTensor")); - auto *table_t = context.Input("W"); + auto *table_t = context.Input("W"); int64_t row_number = table_t->dims()[0]; int64_t quant_number = table_t->dims()[1]; int64_t row_width = (quant_number - 2) * 4; diff --git a/paddle/fluid/operators/lookup_table_op.cc b/paddle/fluid/operators/lookup_table_op.cc index 02cd9a205f009d6c2b71ac772b15946863515f53..8ad3966a1d236f96cbab75d3031c20d405eab32f 100644 --- a/paddle/fluid/operators/lookup_table_op.cc +++ b/paddle/fluid/operators/lookup_table_op.cc @@ -212,7 +212,7 @@ class LookupTableOpGradVarTypeInference : public framework::VarTypeInference { framework::proto::VarType::SELECTED_ROWS); } else { VLOG(3) << "lookup_table_grad op " << framework::GradVarName("W") - << " is set to LoDTensor"; + << " is set to phi::DenseTensor"; ctx->SetOutputType(out_var_name, framework::proto::VarType::LOD_TENSOR); } ctx->SetOutputDataType(out_var_name, ctx->GetInputDataType("W")); diff --git a/paddle/fluid/operators/lookup_table_op.cu b/paddle/fluid/operators/lookup_table_op.cu index 0562228f516fac15ac6b08624ccba1ea6e2a7e44..1052e5117e434e0974da93801d0153359ea9e3da 100644 --- a/paddle/fluid/operators/lookup_table_op.cu +++ b/paddle/fluid/operators/lookup_table_op.cu @@ -103,9 +103,9 @@ template class LookupTableCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { - auto *table_t = context.Input("W"); - auto *ids_t = context.Input("Ids"); - auto *output_t = context.Output("Out"); + auto *table_t = context.Input("W"); + auto *ids_t = context.Input("Ids"); + auto *output_t = context.Output("Out"); int64_t padding_idx = context.Attr("padding_idx"); auto id_name = context.InputNames("Ids").front(); @@ -157,9 +157,10 @@ class LookupTableGradCUDAKernel : public framework::OpKernel { // Since paddings are not trainable and fixed in forward, the gradient of // paddings makes no sense and we don't deal with it in backward. if (is_sparse) { - auto *ids = context.Input("Ids"); - auto *table = context.Input("W"); - auto *d_output = context.Input(framework::GradVarName("Out")); + auto *ids = context.Input("Ids"); + auto *table = context.Input("W"); + auto *d_output = + context.Input(framework::GradVarName("Out")); auto *d_table = context.Output(framework::GradVarName("W")); @@ -209,9 +210,11 @@ class LookupTableGradCUDAKernel : public framework::OpKernel { stream); } else { - auto ids_t = context.Input("Ids"); - auto d_output_t = context.Input(framework::GradVarName("Out")); - auto d_table_t = context.Output(framework::GradVarName("W")); + auto ids_t = context.Input("Ids"); + auto d_output_t = + context.Input(framework::GradVarName("Out")); + auto d_table_t = + context.Output(framework::GradVarName("W")); int N = d_table_t->dims()[0]; int D = d_table_t->dims()[1]; diff --git a/paddle/fluid/operators/lookup_table_op.h b/paddle/fluid/operators/lookup_table_op.h index cfd34cfe67848f13b41f40a65c079ddfd096a195..1ba6d6e31ecdcb4e52742307cefbdd7fc4e4872c 100644 --- a/paddle/fluid/operators/lookup_table_op.h +++ b/paddle/fluid/operators/lookup_table_op.h @@ -27,7 +27,6 @@ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; using SelectedRows = phi::SelectedRows; using DDim = framework::DDim; @@ -37,8 +36,8 @@ template class LookupTableKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { - auto *ids_t = context.Input("Ids"); // int tensor - auto *output_t = context.Output("Out"); // float tensor + auto *ids_t = context.Input("Ids"); // int tensor + auto *output_t = context.Output("Out"); // float tensor auto *table_var = context.InputVar("W"); auto id_name = context.InputNames("Ids").front(); @@ -51,8 +50,8 @@ class LookupTableKernel : public framework::OpKernel { int64_t *ids = const_cast(ids_t->data()); int64_t ids_numel = ids_t->numel(); - if (table_var->IsType()) { - auto *table_t = context.Input("W"); + if (table_var->IsType()) { + auto *table_t = context.Input("W"); int64_t row_number = table_t->dims()[0]; int64_t row_width = table_t->dims()[1]; @@ -165,15 +164,15 @@ class LookupTableGradKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext &context) const override { auto *table_var = context.InputVar("W"); DDim table_dim; - if (table_var->IsType()) { - table_dim = context.Input("W")->dims(); + if (table_var->IsType()) { + table_dim = context.Input("W")->dims(); } else if (table_var->IsType()) { auto *table_t = context.Input("W"); table_dim = table_t->value().dims(); } else { PADDLE_THROW(platform::errors::InvalidArgument( "The parameter W of a LookupTable " - "must be either LoDTensor or SelectedRows")); + "must be either phi::DenseTensor or SelectedRows")); } int64_t padding_idx = context.Attr("padding_idx"); @@ -181,8 +180,9 @@ class LookupTableGradKernel : public framework::OpKernel { // Since paddings are not trainable and fixed in forward, the gradient of // paddings makes no sense and we don't deal with it in backward. if (is_sparse) { - auto *ids = context.Input("Ids"); - auto *d_output = context.Input(framework::GradVarName("Out")); + auto *ids = context.Input("Ids"); + auto *d_output = + context.Input(framework::GradVarName("Out")); auto *d_table = context.Output(framework::GradVarName("W")); @@ -216,9 +216,11 @@ class LookupTableGradKernel : public framework::OpKernel { d_output_dims_2d)); memcpy(d_table_data, d_output_data, sizeof(T) * d_output->numel()); } else { - auto *ids = context.Input("Ids"); - auto *d_output = context.Input(framework::GradVarName("Out")); - auto *d_table = context.Output(framework::GradVarName("W")); + auto *ids = context.Input("Ids"); + auto *d_output = + context.Input(framework::GradVarName("Out")); + auto *d_table = + context.Output(framework::GradVarName("W")); auto *ids_data = ids->data(); diff --git a/paddle/fluid/operators/lookup_table_v2_op.cc b/paddle/fluid/operators/lookup_table_v2_op.cc index 5f023fbad6a027660916ca63a838d893101cf2bb..84f8c6cf6492a26f0495a051ec70623e501967df 100644 --- a/paddle/fluid/operators/lookup_table_v2_op.cc +++ b/paddle/fluid/operators/lookup_table_v2_op.cc @@ -156,7 +156,7 @@ class LookupTableV2OpGradVarTypeInference : public framework::VarTypeInference { framework::proto::VarType::SELECTED_ROWS); } else { VLOG(3) << "lookup_table_v2_grad op " << framework::GradVarName("W") - << " is set to LoDTensor"; + << " is set to phi::DenseTensor"; ctx->SetOutputType(out_var_name, framework::proto::VarType::LOD_TENSOR); } ctx->SetOutputDataType(out_var_name, ctx->GetInputDataType("W")); diff --git a/paddle/fluid/operators/lookup_table_v2_op.h b/paddle/fluid/operators/lookup_table_v2_op.h index e100ade31a0afaac42efbfca57e8b6ee466300be..e9369bcb475ccdd21a70e3773a66d47e29916954 100644 --- a/paddle/fluid/operators/lookup_table_v2_op.h +++ b/paddle/fluid/operators/lookup_table_v2_op.h @@ -28,7 +28,6 @@ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; using SelectedRows = phi::SelectedRows; using DDim = framework::DDim; @@ -57,7 +56,7 @@ struct LookupTableV2CPUFunctor { template void apply() { - auto *output_t = context_.Output("Out"); // float tensor + auto *output_t = context_.Output("Out"); // float tensor auto *table_var = context_.InputVar("W"); int64_t padding_idx = context_.Attr("padding_idx"); @@ -65,8 +64,8 @@ struct LookupTableV2CPUFunctor { auto ids = CopyIdsToVector(*ids_t_); auto ids_numel = static_cast(ids.size()); - if (table_var->template IsType()) { - const auto &table_t = table_var->template Get(); + if (table_var->template IsType()) { + const auto &table_t = table_var->template Get(); int64_t row_number = table_t.dims()[0]; int64_t row_width = table_t.dims()[1]; @@ -168,15 +167,15 @@ struct LookupTableV2GradCPUFunctor { void apply() { auto *table_var = context_.InputVar("W"); DDim table_dim; - if (table_var->template IsType()) { - table_dim = context_.Input("W")->dims(); + if (table_var->template IsType()) { + table_dim = context_.Input("W")->dims(); } else if (table_var->template IsType()) { auto *table_t = context_.Input("W"); table_dim = table_t->value().dims(); } else { PADDLE_THROW(platform::errors::InvalidArgument( "The parameter W of a LookupTableV2 " - "must be either LoDTensor or SelectedRows")); + "must be either phi::DenseTensor or SelectedRows")); } int64_t padding_idx = context_.Attr("padding_idx"); @@ -188,7 +187,8 @@ struct LookupTableV2GradCPUFunctor { // Since paddings are not trainable and fixed in forward, the gradient of // paddings makes no sense and we don't deal with it in backward. if (is_sparse) { - auto *d_output = context_.Input(framework::GradVarName("Out")); + auto *d_output = + context_.Input(framework::GradVarName("Out")); auto *d_table = context_.Output(framework::GradVarName("W")); @@ -219,8 +219,10 @@ struct LookupTableV2GradCPUFunctor { memcpy(d_table_data, d_output_data, sizeof(T) * d_output->numel()); } else { - auto *d_output = context_.Input(framework::GradVarName("Out")); - auto *d_table = context_.Output(framework::GradVarName("W")); + auto *d_output = + context_.Input(framework::GradVarName("Out")); + auto *d_table = + context_.Output(framework::GradVarName("W")); auto *ids_data = ids.data(); int64_t N = table_dim[0]; diff --git a/paddle/fluid/operators/lookup_table_v2_op_mlu.cc b/paddle/fluid/operators/lookup_table_v2_op_mlu.cc index 39e6cd984722bde48c33a785499ebe92fd0f7060..de9864aeee6a1664d7f8195804c82b021ec9470b 100644 --- a/paddle/fluid/operators/lookup_table_v2_op_mlu.cc +++ b/paddle/fluid/operators/lookup_table_v2_op_mlu.cc @@ -32,7 +32,7 @@ class LookupTableV2MLUKernel : public framework::OpKernel { PADDLE_ENFORCE_EQ( table_var->IsType(), true, - platform::errors::InvalidArgument("mlu only accept LoDTensor")); + platform::errors::InvalidArgument("mlu only accept phi::DenseTensor")); output_t->mutable_data(ctx.GetPlace()); MLUCnnlTensorDesc ids_desc(*ids_t); @@ -55,11 +55,12 @@ class LookupTableV2GradMLUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { auto *table_var = ctx.InputVar("W"); - PADDLE_ENFORCE_EQ(table_var->IsType(), - true, - platform::errors::PermissionDenied( - "Unsupported Variable Type , idx in " - "LookupTableV2GradMLUKernel should be LoDTensor.")); + PADDLE_ENFORCE_EQ( + table_var->IsType(), + true, + platform::errors::PermissionDenied( + "Unsupported Variable Type , idx in " + "LookupTableV2GradMLUKernel should be phi::DenseTensor.")); bool is_sparse = ctx.Attr("is_sparse"); PADDLE_ENFORCE_EQ( is_sparse, diff --git a/paddle/fluid/operators/lookup_table_v2_op_npu.cc b/paddle/fluid/operators/lookup_table_v2_op_npu.cc index b8719d33b5d6c0662fac324c3269c257ef0eb718..d11ef440f8a3f4a6e7de30548ab107a24ab4def4 100644 --- a/paddle/fluid/operators/lookup_table_v2_op_npu.cc +++ b/paddle/fluid/operators/lookup_table_v2_op_npu.cc @@ -37,7 +37,7 @@ class LookupTableV2NPUKernel : public framework::OpKernel { PADDLE_ENFORCE_EQ( table_var->IsType(), true, - platform::errors::InvalidArgument("npu only accept LoDTensor")); + platform::errors::InvalidArgument("npu only accept phi::DenseTensor")); output_t->mutable_data(ctx.GetPlace()); int64_t padding_idx = ctx.Attr("padding_idx"); diff --git a/paddle/fluid/operators/lstm_op.cc b/paddle/fluid/operators/lstm_op.cc index ba56eeddf89d18a344c043f05c905ef4681e3d73..b7310ed475994cd5b6941f24a0dc77c6368181b0 100644 --- a/paddle/fluid/operators/lstm_op.cc +++ b/paddle/fluid/operators/lstm_op.cc @@ -146,11 +146,12 @@ class LSTMOp : public framework::OperatorWithKernel { class LSTMOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddInput("Input", - "(LoDTensor) the first input is a LodTensor, which support " - "variable-time length input sequence. The underlying tensor in " - "this LoDTensor is a matrix with shape (T X 4D), where T is the " - "total time steps in this mini-batch, D is the hidden size."); + AddInput( + "Input", + "(phi::DenseTensor) the first input is a phi::DenseTensor, which " + "support variable-time length input sequence. The underlying tensor in " + "this phi::DenseTensor is a matrix with shape (T X 4D), where T is the " + "total time steps in this mini-batch, D is the hidden size."); AddInput("H0", "(Tensor, optional) the initial hidden state is an optional " "input. This is a tensor with shape (N x D), where N is the " @@ -176,23 +177,26 @@ class LSTMOpMaker : public framework::OpProtoAndCheckerMaker { " - The shape is (1 x 7D). " " - Bias = {b_c, b_i, b_f, b_o, W_ic, W_fc, W_oc}."); AddOutput("Hidden", - "(LoDTensor) the hidden state of LSTM operator. " + "(phi::DenseTensor) the hidden state of LSTM operator. " "The shape is (T x D), and lod is the same with the `Input`."); AddOutput("Cell", - "(LoDTensor) the cell state of LSTM operator. " + "(phi::DenseTensor) the cell state of LSTM operator. " "The shape is (T x D), and lod is the same with the `Input`."); - AddOutput("BatchGate", - "(LoDTensor) This LoDTensor contains input gate, forget gate " - "and output gate after the nonlinear computation. This " - "LoDTensor has the same shape as the reorganized input, which " - "is also be called batch input. The LoD size is 2. The first " - "LoD is the batch offsets and the second LoD contains the " - "indexes, which denote the position of reorganized sequence " - "in the raw input.") + AddOutput( + "BatchGate", + "(phi::DenseTensor) This phi::DenseTensor contains input gate, forget " + "gate " + "and output gate after the nonlinear computation. This " + "phi::DenseTensor has the same shape as the reorganized input, which " + "is also be called batch input. The LoD size is 2. The first " + "LoD is the batch offsets and the second LoD contains the " + "indexes, which denote the position of reorganized sequence " + "in the raw input.") .AsIntermediate() .AsExtra(); AddOutput("BatchCellPreAct", - "(LoDTensor) This LoDTensor is obtained in the forward and used " + "(phi::DenseTensor) This phi::DenseTensor is obtained in the " + "forward and used " "in the backward.") .AsIntermediate() .AsExtra(); diff --git a/paddle/fluid/operators/lstm_op.h b/paddle/fluid/operators/lstm_op.h index a6bb901897416c4d5a8425b472853f7f0c6a0ae1..dc4f2f1548612d37100e595170d39b0977a32ced 100644 --- a/paddle/fluid/operators/lstm_op.h +++ b/paddle/fluid/operators/lstm_op.h @@ -24,7 +24,6 @@ limitations under the License. */ namespace paddle { namespace operators { -using LoDTensor = phi::DenseTensor; using Tensor = phi::DenseTensor; template @@ -44,25 +43,25 @@ class LSTMKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& ctx) const override { bool is_test = ctx.Attr("is_test"); - auto* input = ctx.Input("Input"); + auto* input = ctx.Input("Input"); auto* weight = ctx.Input("Weight"); auto* bias = ctx.Input("Bias"); auto* hidden_t0 = ctx.Input("H0"); auto* cell_t0 = ctx.Input("C0"); - LoDTensor* batch_gate = nullptr; - LoDTensor batch_gate_temp; + phi::DenseTensor* batch_gate = nullptr; + phi::DenseTensor batch_gate_temp; if (is_test) { batch_gate = &batch_gate_temp; batch_gate->Resize(input->dims()); } else { - batch_gate = ctx.Output("BatchGate"); + batch_gate = ctx.Output("BatchGate"); } batch_gate->mutable_data(ctx.GetPlace()); - auto* hidden_out = ctx.Output("Hidden"); + auto* hidden_out = ctx.Output("Hidden"); hidden_out->mutable_data(ctx.GetPlace()); - auto* cell_out = ctx.Output("Cell"); + auto* cell_out = ctx.Output("Cell"); cell_out->mutable_data(ctx.GetPlace()); bool is_reverse = ctx.Attr("is_reverse"); @@ -110,12 +109,12 @@ class LSTMKernel : public framework::OpKernel { } // Use the local variable as here. - LoDTensor batch_hidden, batch_cell, batch_cell_pre_act_temp; - LoDTensor* batch_cell_pre_act; + phi::DenseTensor batch_hidden, batch_cell, batch_cell_pre_act_temp; + phi::DenseTensor* batch_cell_pre_act; if (is_test) { batch_cell_pre_act = &batch_cell_pre_act_temp; } else { - batch_cell_pre_act = ctx.Output("BatchCellPreAct"); + batch_cell_pre_act = ctx.Output("BatchCellPreAct"); } batch_hidden.mutable_data(dims, ctx.GetPlace()); batch_cell.mutable_data(dims, ctx.GetPlace()); @@ -191,11 +190,11 @@ class LSTMKernel : public framework::OpKernel { phi::funcs::Batch2LoDTensorFunctor to_seq; batch_hidden.set_lod(batch_gate->lod()); - // restore the output hidden in LoDTensor from the batch hidden + // restore the output hidden in phi::DenseTensor from the batch hidden to_seq(device_ctx, batch_hidden, hidden_out); batch_cell.set_lod(batch_gate->lod()); - // restore the output cell state in LoDTensor from the batch cell + // restore the output cell state in phi::DenseTensor from the batch cell to_seq(device_ctx, batch_cell, cell_out); } }; @@ -204,19 +203,20 @@ template class LSTMGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - auto* input = ctx.Input("Input"); + auto* input = ctx.Input("Input"); auto* weight = ctx.Input("Weight"); auto* bias = ctx.Input("Bias"); - auto* hidden_out = ctx.Input("Hidden"); - auto* cell_out = ctx.Input("Cell"); + auto* hidden_out = ctx.Input("Hidden"); + auto* cell_out = ctx.Input("Cell"); - auto* batch_gate = ctx.Input("BatchGate"); - auto* batch_cell_pre_act = ctx.Input("BatchCellPreAct"); + auto* batch_gate = ctx.Input("BatchGate"); + auto* batch_cell_pre_act = ctx.Input("BatchCellPreAct"); - auto* hidden_g = ctx.Input(framework::GradVarName("Hidden")); + auto* hidden_g = + ctx.Input(framework::GradVarName("Hidden")); - auto* in_g = ctx.Output(framework::GradVarName("Input")); + auto* in_g = ctx.Output(framework::GradVarName("Input")); auto* weight_g = ctx.Output(framework::GradVarName("Weight")); auto* bias_g = ctx.Output(framework::GradVarName("Bias")); @@ -301,12 +301,12 @@ class LSTMGradKernel : public framework::OpKernel { to_batch(ctx, src, &dst, false); }; - LoDTensor batch_hidden, batch_hidden_g, batch_cell; + phi::DenseTensor batch_hidden, batch_hidden_g, batch_cell; ToBatch(device_ctx, *hidden_out, out_dims, batch_hidden); ToBatch(device_ctx, *hidden_g, out_dims, batch_hidden_g); ToBatch(device_ctx, *cell_out, out_dims, batch_cell); - LoDTensor batch_cell_g, batch_gate_g; + phi::DenseTensor batch_cell_g, batch_gate_g; batch_cell_g.mutable_data(out_dims, ctx.GetPlace()); // TODO(qingqing) support the case output cell has gradient. // to_batch(device_ctx, *cell_g, batch_cell_g, false); diff --git a/paddle/fluid/operators/lstmp_op.cc b/paddle/fluid/operators/lstmp_op.cc index 156fc55fb6b9a89fdb610250ca8fede34b421993..dc36b3431d48912b9dfbb60f217bc0098ad6c1b3 100644 --- a/paddle/fluid/operators/lstmp_op.cc +++ b/paddle/fluid/operators/lstmp_op.cc @@ -154,11 +154,12 @@ class LSTMPOp : public framework::OperatorWithKernel { class LSTMPOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddInput("Input", - "(LoDTensor) the input for sequence data, which supports " - "variable-time length input sequence. The underlying tensor in " - "this LoDTensor is a matrix with shape (T X 4D), where T is the " - "total time steps in this mini-batch, D is the hidden size."); + AddInput( + "Input", + "(phi::DenseTensor) the input for sequence data, which supports " + "variable-time length input sequence. The underlying tensor in " + "this phi::DenseTensor is a matrix with shape (T X 4D), where T is the " + "total time steps in this mini-batch, D is the hidden size."); AddInput("H0", "(Tensor, optional) the initial hidden state is an optional " "input. This is a tensor with shape (N x D), where N is the " @@ -190,29 +191,34 @@ class LSTMPOpMaker : public framework::OpProtoAndCheckerMaker { " - The shape is (1 x 7D). " " - Bias = {b_c, b_i, b_f, b_o, W_ic, W_fc, W_oc}."); AddOutput("Projection", - "(LoDTensor) the projection of the hidden state of LSTMP " + "(phi::DenseTensor) the projection of the hidden state of LSTMP " "operator. The shape is (T x P), and LoD is the same with the " "`Input`."); AddOutput("Cell", - "(LoDTensor) the cell state of LSTMP operator. " + "(phi::DenseTensor) the cell state of LSTMP operator. " "The shape is (T x D), and lod is the same with the `Input`."); - AddOutput("BatchGate", - "(LoDTensor) This LoDTensor contains input gate, forget gate " - "and output gate after the activations. This LoDTensor has the " - "same shape as the reorganized input, which is also be called " - "batch input. The LoD size is 2. The first-level LoD is the " - "batch offsets and the second contains the indices, which " - "denotes the position of reorganized sequence in the raw input.") + AddOutput( + "BatchGate", + "(phi::DenseTensor) This phi::DenseTensor contains input gate, forget " + "gate " + "and output gate after the activations. This phi::DenseTensor has the " + "same shape as the reorganized input, which is also be called " + "batch input. The LoD size is 2. The first-level LoD is the " + "batch offsets and the second contains the indices, which " + "denotes the position of reorganized sequence in the raw input.") .AsIntermediate(); - AddOutput("BatchCellPreAct", - "(LoDTensor) the pre-activation cell state reorganized in batch. " - "This LoDTensor is obtained in the forward and used in the " - "backward.") + AddOutput( + "BatchCellPreAct", + "(phi::DenseTensor) the pre-activation cell state reorganized in " + "batch. " + "This phi::DenseTensor is obtained in the forward and used in the " + "backward.") .AsIntermediate(); - AddOutput("BatchHidden", - "(LoDTensor) the hidden state reorganized in batch. " - "This LoDTensor is obtained in the forward and used in the " - "backward.") + AddOutput( + "BatchHidden", + "(phi::DenseTensor) the hidden state reorganized in batch. " + "This phi::DenseTensor is obtained in the forward and used in the " + "backward.") .AsIntermediate(); AddAttr("use_peepholes", "(bool, default: True) " diff --git a/paddle/fluid/operators/lstmp_op.h b/paddle/fluid/operators/lstmp_op.h index 23f3a14db837df2231d270b44f949aca2f1888e7..8056bf0bd49f2a8bbec63b14e72cfd55bcd74fbe 100644 --- a/paddle/fluid/operators/lstmp_op.h +++ b/paddle/fluid/operators/lstmp_op.h @@ -29,7 +29,6 @@ limitations under the License. */ namespace paddle { namespace operators { -using LoDTensor = phi::DenseTensor; using Tensor = phi::DenseTensor; using platform::Transform; @@ -107,7 +106,7 @@ class LSTMPKernel : public framework::OpKernel { } void Compute(const framework::ExecutionContext& ctx) const override { - auto* input = ctx.Input("Input"); + auto* input = ctx.Input("Input"); auto* weight = ctx.Input("Weight"); auto* proj_weight = ctx.Input("ProjWeight"); auto* bias = ctx.Input("Bias"); @@ -118,11 +117,11 @@ class LSTMPKernel : public framework::OpKernel { auto proj_clip = static_cast(ctx.Attr("proj_clip")); auto cell_clip = static_cast(ctx.Attr("cell_clip")); - auto* batch_gate = ctx.Output("BatchGate"); + auto* batch_gate = ctx.Output("BatchGate"); batch_gate->mutable_data(ctx.GetPlace()); - auto* proj_out = ctx.Output("Projection"); + auto* proj_out = ctx.Output("Projection"); proj_out->mutable_data(ctx.GetPlace()); - auto* cell_out = ctx.Output("Cell"); + auto* cell_out = ctx.Output("Cell"); cell_out->mutable_data(ctx.GetPlace()); bool is_reverse = ctx.Attr("is_reverse"); @@ -172,10 +171,10 @@ class LSTMPKernel : public framework::OpKernel { } // Use the local variable as here. - LoDTensor batch_proj, batch_cell; - auto* batch_cell_pre_act = ctx.Output("BatchCellPreAct"); + phi::DenseTensor batch_proj, batch_cell; + auto* batch_cell_pre_act = ctx.Output("BatchCellPreAct"); batch_cell_pre_act->mutable_data(dims, ctx.GetPlace()); - auto* batch_hidden = ctx.Output("BatchHidden"); + auto* batch_hidden = ctx.Output("BatchHidden"); batch_hidden->mutable_data(dims, ctx.GetPlace()); // T x D batch_proj.mutable_data(proj_dims, ctx.GetPlace()); // T x P batch_cell.mutable_data(dims, ctx.GetPlace()); // T x D @@ -272,11 +271,11 @@ class LSTMPKernel : public framework::OpKernel { phi::funcs::Batch2LoDTensorFunctor to_seq; batch_proj.set_lod(batch_gate->lod()); - // restore the output hidden in LoDTensor from the batch hidden + // restore the output hidden in phi::DenseTensor from the batch hidden to_seq(device_ctx, batch_proj, proj_out); batch_cell.set_lod(batch_gate->lod()); - // restore the output cell state in LoDTensor from the batch cell + // restore the output cell state in phi::DenseTensor from the batch cell to_seq(device_ctx, batch_cell, cell_out); } }; @@ -310,20 +309,20 @@ class LSTMPGradKernel : public framework::OpKernel { auto* proj_weight = ctx.Input("ProjWeight"); auto* bias = ctx.Input("Bias"); - auto* proj_out = ctx.Input("Projection"); - auto* cell_out = ctx.Input("Cell"); + auto* proj_out = ctx.Input("Projection"); + auto* cell_out = ctx.Input("Cell"); auto proj_clip = static_cast(ctx.Attr("proj_clip")); auto cell_clip = static_cast(ctx.Attr("cell_clip")); - auto* batch_gate = ctx.Input("BatchGate"); - auto* batch_cell_pre_act = ctx.Input("BatchCellPreAct"); - auto* batch_hidden = ctx.Input("BatchHidden"); + auto* batch_gate = ctx.Input("BatchGate"); + auto* batch_cell_pre_act = ctx.Input("BatchCellPreAct"); + auto* batch_hidden = ctx.Input("BatchHidden"); auto* projection_g = - ctx.Input(framework::GradVarName("Projection")); + ctx.Input(framework::GradVarName("Projection")); - auto* in_g = ctx.Output(framework::GradVarName("Input")); + auto* in_g = ctx.Output(framework::GradVarName("Input")); auto* weight_g = ctx.Output(framework::GradVarName("Weight")); auto* proj_weight_g = @@ -415,13 +414,13 @@ class LSTMPGradKernel : public framework::OpKernel { to_batch(ctx, src, &dst, false); }; - LoDTensor batch_hidden_g, batch_proj, batch_proj_g, batch_cell; + phi::DenseTensor batch_hidden_g, batch_proj, batch_proj_g, batch_cell; batch_hidden_g.mutable_data(out_dims, ctx.GetPlace()); ToBatch(device_ctx, *proj_out, proj_dims, batch_proj); // T x P ToBatch(device_ctx, *projection_g, proj_dims, batch_proj_g); // T x P ToBatch(device_ctx, *cell_out, out_dims, batch_cell); // T x D - LoDTensor batch_cell_g, batch_gate_g; + phi::DenseTensor batch_cell_g, batch_gate_g; batch_cell_g.mutable_data(out_dims, ctx.GetPlace()); // TODO(qingqing) support the case output cell has gradient. // to_batch(device_ctx, *cell_g, batch_cell_g, false); diff --git a/paddle/fluid/operators/match_matrix_tensor_op.cc b/paddle/fluid/operators/match_matrix_tensor_op.cc index 07dfde8d0d4123d131b440c115320a3bbe48aa6c..facf44725e2b6f5278e3ca56c2b99a6fce0c85ed 100644 --- a/paddle/fluid/operators/match_matrix_tensor_op.cc +++ b/paddle/fluid/operators/match_matrix_tensor_op.cc @@ -25,7 +25,6 @@ limitations under the License. */ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; using LoD = framework::LoD; void MatchMatrixTensorOP::InferShape(framework::InferShapeContext* ctx) const { @@ -92,7 +91,7 @@ void MatchMatrixTensorOP::InferShape(framework::InferShapeContext* ctx) const { if (ctx->IsRuntime()) { framework::Variable* x_var = PADDLE_GET(framework::Variable*, ctx->GetInputVarPtrs("X")[0]); - const auto& x_lod = x_var->Get().lod(); + const auto& x_lod = x_var->Get().lod(); PADDLE_ENFORCE_EQ(x_lod.empty(), false, platform::errors::InvalidArgument( @@ -117,7 +116,7 @@ void MatchMatrixTensorOP::InferShape(framework::InferShapeContext* ctx) const { framework::Variable* y_var = PADDLE_GET(framework::Variable*, ctx->GetInputVarPtrs("Y")[0]); - const auto& y_lod = y_var->Get().lod(); + const auto& y_lod = y_var->Get().lod(); PADDLE_ENFORCE_EQ(y_lod.empty(), false, platform::errors::InvalidArgument( @@ -213,18 +212,22 @@ void MatchMatrixTensorOpGrad::InferShape( void MatchMatrixTensorOpMaker::Make() { AddInput("X", - "X (LoDTensor, default LoDTensor) Input variable which " + "X (phi::DenseTensor, default phi::DenseTensor) Input " + "variable which " "should contain lod information."); AddInput("Y", - "Y (LoDTensor, default LoDTensor) Input variable which " + "Y (phi::DenseTensor, default phi::DenseTensor) Input " + "variable which " "should contain lod information."); AddInput("W", "W (Tensor), The weight of X and Y."); AddAttr("dim_t", "the dim of W").SetDefault(1); AddOutput("Out", - "(LoDTensor, default LoDTensor) Output variable which " + "(phi::DenseTensor, default phi::DenseTensor) Output " + "variable which " "is X * W * Y"); AddOutput("Tmp", - "(LoDTensor, default LoDTensor) tmp variable which is " + "(phi::DenseTensor, default phi::DenseTensor) tmp variable " + "which is " "used for X * W"); AddComment(R"DOC( Match Matrix Tensor Operator @@ -242,11 +245,11 @@ template class CPUMatchMatrixTensorOPKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - auto* x = ctx.Input("X"); - auto* y = ctx.Input("Y"); + auto* x = ctx.Input("X"); + auto* y = ctx.Input("Y"); auto* w = ctx.Input("W"); - auto* out = ctx.Output("Out"); - auto* tmp = ctx.Output("Tmp"); + auto* out = ctx.Output("Out"); + auto* tmp = ctx.Output("Tmp"); int dim_t = ctx.Attr("dim_t"); int64_t dim_in = x->dims()[1]; @@ -322,10 +325,10 @@ template class CPUMatchMatrixTensorOPGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - auto* x = ctx.Input("X"); - auto* y = ctx.Input("Y"); + auto* x = ctx.Input("X"); + auto* y = ctx.Input("Y"); auto* w = ctx.Input("W"); - auto* tmp = ctx.Input("Tmp"); + auto* tmp = ctx.Input("Tmp"); int dim_t = ctx.Attr("dim_t"); int64_t dim_in = x->dims()[1]; @@ -346,9 +349,9 @@ class CPUMatchMatrixTensorOPGradKernel : public framework::OpKernel { auto* bottom_r_data = y->data(); auto* bottom_l_trans_data = tmp->data(); - auto* d_out = ctx.Input(framework::GradVarName("Out")); - auto* d_x = ctx.Output(framework::GradVarName("X")); - auto* d_y = ctx.Output(framework::GradVarName("Y")); + auto* d_out = ctx.Input(framework::GradVarName("Out")); + auto* d_x = ctx.Output(framework::GradVarName("X")); + auto* d_y = ctx.Output(framework::GradVarName("Y")); Tensor tmp_grad; tmp_grad.Resize(tmp->dims()); diff --git a/paddle/fluid/operators/memcpy_d2h_op.cc b/paddle/fluid/operators/memcpy_d2h_op.cc index 80181779ab3477fce24e1a22b709c178197c4f5b..82feee0f695db5b5e18968a934593af9ac700775 100644 --- a/paddle/fluid/operators/memcpy_d2h_op.cc +++ b/paddle/fluid/operators/memcpy_d2h_op.cc @@ -83,9 +83,9 @@ class MemcpyD2HKernel { class MemcpyD2HOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddInput("X", "(LoDTensor) The input variable "); + AddInput("X", "(phi::DenseTensor) The input variable "); AddOutput("Out", - "(LoDTensor) The type of output " + "(phi::DenseTensor) The type of output " "is the same as input X."); AddAttr( "dst_place_type", @@ -98,7 +98,7 @@ class MemcpyD2HOpProtoMaker : public framework::OpProtoAndCheckerMaker { MemcpyD2H Operator. By now, it ONLY supports the memcopy between NPUPlace/CUDAPlace <-> CUDAPinnedPlace/CPU. You would have to update it if you want other more capacities. -Out = X, when type in [LoDTensor] +Out = X, when type in [phi::DenseTensor] raise error if the type is not listed above. )DOC"); } diff --git a/paddle/fluid/operators/memcpy_h2d_op.cc b/paddle/fluid/operators/memcpy_h2d_op.cc index 8d2cfcff80768ea29732f4e6312bb6c394d658b2..1426b23dc1b6640fde39e9d0df4dbc11914084a0 100644 --- a/paddle/fluid/operators/memcpy_h2d_op.cc +++ b/paddle/fluid/operators/memcpy_h2d_op.cc @@ -84,9 +84,9 @@ class MemcpyH2DKernel { class MemcpyH2DOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddInput("X", "(LoDTensor) The input variable "); + AddInput("X", "(phi::DenseTensor) The input variable "); AddOutput("Out", - "(LoDTensor) The type of output " + "(phi::DenseTensor) The type of output " "is the same as input X."); AddAttr("dst_place_type", "Determine the dst place of tensor copy. " @@ -100,7 +100,7 @@ class MemcpyH2DOpProtoMaker : public framework::OpProtoAndCheckerMaker { MemcpyD2H Operator. By now, it ONLY supports the memcopy between CUDAPinnedPlace/CPU <-> NPUPlace/CUDAPlace. You would have to update it if you want other more capacities. -Out = X, when type in [LoDTensor] +Out = X, when type in [phi::DenseTensor] raise error if the type is not listed above. )DOC"); } diff --git a/paddle/fluid/operators/memcpy_op.cc b/paddle/fluid/operators/memcpy_op.cc index caa4164ee5bc0c1adedff51ad9994a5bdc364685..66cf6a00b7af43be56df3f2715ae9ed8e502e7c8 100644 --- a/paddle/fluid/operators/memcpy_op.cc +++ b/paddle/fluid/operators/memcpy_op.cc @@ -100,9 +100,9 @@ class MemcpyKernel { class MemcpyOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddInput("X", "(LoDTensor) The input variable "); + AddInput("X", "(phi::DenseTensor) The input variable "); AddOutput("Out", - "(LoDTensor) The type of output " + "(phi::DenseTensor) The type of output " "is the same as input X."); AddAttr("dst_place_type", "Determine the dst place of tensor copy. " @@ -122,7 +122,7 @@ class MemcpyOpProtoMaker : public framework::OpProtoAndCheckerMaker { NPUPlace <-> CPUPlace, and used as an internal op by Recompute-Offload. You would have to update it if you want other more capacities. -Out = X, when type in [LoDTensor] +Out = X, when type in [phi::DenseTensor] raise error if the type is not listed above. )DOC"); } diff --git a/paddle/fluid/operators/merge_lod_tensor_op.cc b/paddle/fluid/operators/merge_lod_tensor_op.cc index 5bd34010db922750b99a240973ee331e3e69603a..007f853f3243feb0245a4007502ef0f3a374d083 100644 --- a/paddle/fluid/operators/merge_lod_tensor_op.cc +++ b/paddle/fluid/operators/merge_lod_tensor_op.cc @@ -104,7 +104,7 @@ class MergeLoDTensorOp : public framework::OperatorBase { out_lod->clear(); size_t out_offset = 0; - // Build LoDTensor `out` + // Build phi::DenseTensor `out` size_t in_true_idx = 0; size_t in_false_idx = 0; @@ -182,18 +182,18 @@ class MergeLoDTensorOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { AddInput("X", - "The input LoDTensor, contains complete lod information to " + "The input phi::DenseTensor, contains complete lod information to " "construct the output"); AddInput("Mask", "A bool column vector which mask the input"); AddInput("InTrue", "The True branch to be merged"); AddInput("InFalse", "The False branch to be merged"); - AddOutput("Out", "The merged output LoDTensor"); + AddOutput("Out", "The merged output phi::DenseTensor"); AddAttr("level", "(int) the specific lod level to rank.") .SetDefault(0) .EqualGreaterThan(0); AddComment( R"DOC( - Merge True and False branches of LoDTensor into a single Output, + Merge True and False branches of phi::DenseTensor into a single Output, with a mask at certain lod level. X is used to obtain complete lod information. Please refer to SplitLoDTensorOp.)DOC"); } diff --git a/paddle/fluid/operators/nce_op.cc b/paddle/fluid/operators/nce_op.cc index 9badb74988bc4b0d09891b6b6b3bf33f79c4c6af..b80de062796a0557f20f8218be5bca23d8532cd5 100644 --- a/paddle/fluid/operators/nce_op.cc +++ b/paddle/fluid/operators/nce_op.cc @@ -300,7 +300,7 @@ class NCEOpGradVarTypeInference : public framework::VarTypeInference { ctx->SetOutputType(weight_grad, framework::proto::VarType::SELECTED_ROWS); } else { VLOG(3) << "nce_op_grad op " << weight_grad << " and " - << " is set to LoDTensor"; + << " is set to phi::DenseTensor"; ctx->SetOutputType(weight_grad, framework::proto::VarType::LOD_TENSOR); } ctx->SetOutputDataType(weight_grad, ctx->GetInputDataType("Input")); diff --git a/paddle/fluid/operators/nce_op.h b/paddle/fluid/operators/nce_op.h index a699f81b827e5b1a6468e3d0aab54f1f6b144569..a4b418b14cc84b4c8045465460ed85fd5a5808e7 100644 --- a/paddle/fluid/operators/nce_op.h +++ b/paddle/fluid/operators/nce_op.h @@ -32,7 +32,6 @@ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; using SelectedRows = phi::SelectedRows; using Sampler = math::Sampler; using DDim = framework::DDim; @@ -395,15 +394,15 @@ class NCEGradKernel : public framework::OpKernel { auto *table_var = context.InputVar("Weight"); DDim table_dim; - if (table_var->IsType()) { - table_dim = context.Input("Weight")->dims(); + if (table_var->IsType()) { + table_dim = context.Input("Weight")->dims(); } else if (table_var->IsType()) { auto *table_t = context.Input("Weight"); table_dim = table_t->value().dims(); } else { PADDLE_THROW(platform::errors::InvalidArgument( "The parameter Weight of a NCE_OP " - "must be either LoDTensor or SelectedRows")); + "must be either phi::DenseTensor or SelectedRows")); } auto d_w = diff --git a/paddle/fluid/operators/number_count_op.cu b/paddle/fluid/operators/number_count_op.cu index 25541ebdb36217990f270f8d92afde5021390a95..99623917d59ee3d871c0e103077781f31de88a26 100644 --- a/paddle/fluid/operators/number_count_op.cu +++ b/paddle/fluid/operators/number_count_op.cu @@ -37,7 +37,6 @@ static inline int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } -using LoDTensor = phi::DenseTensor; using Tensor = phi::DenseTensor; template @@ -86,9 +85,9 @@ template class NumberCountOpCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto numbers = context.Input("numbers"); + auto numbers = context.Input("numbers"); auto upper_range = context.Attr("upper_range"); - auto number_count = context.Output("Out"); + auto number_count = context.Output("Out"); int64_t batch_size = numbers->numel(); auto place = context.GetPlace(); diff --git a/paddle/fluid/operators/one_hot_op.cc b/paddle/fluid/operators/one_hot_op.cc index 8e1a07975e2dac36e338a74fd6b8d2f1efd497ec..0cd6cab49eb11e641551fd6f981c6c1a701af916 100644 --- a/paddle/fluid/operators/one_hot_op.cc +++ b/paddle/fluid/operators/one_hot_op.cc @@ -79,7 +79,8 @@ class OneHotOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { AddInput("X", - "(LoDTensor, LoDTensor) Input variable with rank at least 2. " + "(phi::DenseTensor, phi::DenseTensor) Input variable with " + "rank at least 2. " "The last dimension of X should be 1. Each value of X is an index " "to indicate the position."); AddInput("depth_tensor", "(Tensor, Tensor), Length of one-hot vector") diff --git a/paddle/fluid/operators/one_hot_op.cu b/paddle/fluid/operators/one_hot_op.cu index b36ca97b3e40f9b0fb3b0c480852b689cb2e6bab..917fa857e07782a5bbb003e5185ef5132bd45cd4 100644 --- a/paddle/fluid/operators/one_hot_op.cu +++ b/paddle/fluid/operators/one_hot_op.cu @@ -60,13 +60,12 @@ struct OneHotOpCUDAFunctor { } }; -using LoDTensor = phi::DenseTensor; template class OneHotCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto* in = context.Input("X"); - auto* out = context.Output("Out"); + auto* in = context.Input("X"); + auto* out = context.Output("Out"); int depth = -1; if (context.HasInput("depth_tensor")) { diff --git a/paddle/fluid/operators/one_hot_op.h b/paddle/fluid/operators/one_hot_op.h index b8eb1c046d59d0392ae217564a715ddc16a9be70..d878fd5a6d44bbe8d890402de4dc5003e21f9dc6 100644 --- a/paddle/fluid/operators/one_hot_op.h +++ b/paddle/fluid/operators/one_hot_op.h @@ -76,14 +76,13 @@ struct OneHotOpFunctor { } }; -using LoDTensor = phi::DenseTensor; using Tensor = phi::DenseTensor; template class OneHotKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto* in = context.Input("X"); - auto* out = context.Output("Out"); + auto* in = context.Input("X"); + auto* out = context.Output("Out"); int depth = context.Attr("depth"); bool allow_out_of_range = context.Attr("allow_out_of_range"); if (context.HasInput("depth_tensor")) { diff --git a/paddle/fluid/operators/one_hot_op_npu.cc b/paddle/fluid/operators/one_hot_op_npu.cc index 2ca74cac0a051d1708f13118f46d5b12326266ac..e2997dc079c61c840155d550d47a4d7f2ff762b9 100644 --- a/paddle/fluid/operators/one_hot_op_npu.cc +++ b/paddle/fluid/operators/one_hot_op_npu.cc @@ -25,8 +25,8 @@ class OneHotNPUKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& ctx) const override { auto& dev_ctx = ctx.template device_context(); - auto* in = ctx.Input("X"); - auto* out = ctx.Output("Out"); + auto* in = ctx.Input("X"); + auto* out = ctx.Output("Out"); int depth = ctx.Attr("depth"); if (ctx.HasInput("depth_tensor")) { diff --git a/paddle/fluid/operators/one_hot_op_xpu.cc b/paddle/fluid/operators/one_hot_op_xpu.cc index afa7104e9175a82f46577f8999a72c476c265118..66826cd4ff33a39a8921500415e8b08308481fd0 100644 --- a/paddle/fluid/operators/one_hot_op_xpu.cc +++ b/paddle/fluid/operators/one_hot_op_xpu.cc @@ -22,15 +22,14 @@ namespace paddle { namespace operators { -using LoDTensor = phi::DenseTensor; using Tensor = phi::DenseTensor; template class OneHotXPUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - const auto* in = context.Input("X"); - auto* out = context.Output("Out"); + const auto* in = context.Input("X"); + auto* out = context.Output("Out"); // get depth from attr int depth = context.Attr("depth"); diff --git a/paddle/fluid/operators/one_hot_v2_op.cc b/paddle/fluid/operators/one_hot_v2_op.cc index 55cb5d1a53b2fc3893e58fe3697ef7bbd2a2e7a7..f5b55fcf0275a2cf080eae22836c85c8293ff04b 100644 --- a/paddle/fluid/operators/one_hot_v2_op.cc +++ b/paddle/fluid/operators/one_hot_v2_op.cc @@ -52,7 +52,8 @@ class OneHotV2OpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { AddInput("X", - "(LoDTensor, LoDTensor) Input variable with rank at least 2. " + "(phi::DenseTensor, phi::DenseTensor) Input variable with " + "rank at least 2. " "The last dimension of X should be 1. Each value of X is an index " "to indicate the position."); AddInput("depth_tensor", "(Tensor, Tensor), Length of one-hot vector") diff --git a/paddle/fluid/operators/one_hot_v2_op_mlu.cc b/paddle/fluid/operators/one_hot_v2_op_mlu.cc index 1b7ce8f243b6f31a24ff498a9f7d2fd38a123baf..f98cbabf58a87a3072c7c0fc7e5efc12f2b18040 100644 --- a/paddle/fluid/operators/one_hot_v2_op_mlu.cc +++ b/paddle/fluid/operators/one_hot_v2_op_mlu.cc @@ -20,7 +20,6 @@ limitations under the License. */ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; template class OneHotV2MLUKernel : public framework::OpKernel { @@ -28,8 +27,8 @@ class OneHotV2MLUKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& ctx) const override { auto& dev_ctx = ctx.template device_context(); - auto* in = ctx.Input("X"); - auto* out = ctx.Output("Out"); + auto* in = ctx.Input("X"); + auto* out = ctx.Output("Out"); int depth = ctx.Attr("depth"); if (ctx.HasInput("depth_tensor")) { std::vector depth_data; diff --git a/paddle/fluid/operators/one_hot_v2_op_npu.cc b/paddle/fluid/operators/one_hot_v2_op_npu.cc index 01ab76ab5ccd3e343c88bb140e257906096787fc..8cc97b417ca78ecda9a9f0d95c3b3d55c3514a50 100644 --- a/paddle/fluid/operators/one_hot_v2_op_npu.cc +++ b/paddle/fluid/operators/one_hot_v2_op_npu.cc @@ -18,7 +18,6 @@ limitations under the License. */ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; template class OneHotV2NPUKernel : public framework::OpKernel { @@ -26,8 +25,8 @@ class OneHotV2NPUKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& ctx) const override { auto& dev_ctx = ctx.template device_context(); - auto* in = ctx.Input("X"); - auto* out = ctx.Output("Out"); + auto* in = ctx.Input("X"); + auto* out = ctx.Output("Out"); int depth = ctx.Attr("depth"); if (ctx.HasInput("depth_tensor")) { diff --git a/paddle/fluid/operators/partial_concat_op.cu b/paddle/fluid/operators/partial_concat_op.cu index 01bf11d2ea70557cd83996aa09e718a109404141..ef52bbad525a4a4ba659de70e5b97dab6128246b 100644 --- a/paddle/fluid/operators/partial_concat_op.cu +++ b/paddle/fluid/operators/partial_concat_op.cu @@ -23,7 +23,6 @@ namespace operators { #define CEIL_DIV(x, y) (((x) + (y)-1) / (y)) -using LoDTensor = phi::DenseTensor; using Tensor = phi::DenseTensor; template @@ -154,8 +153,8 @@ class PartialConcatGradOpCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { auto *out_grad = ctx.Input(framework::GradVarName("Out")); - auto ins = ctx.MultiInput("X"); - auto outs = ctx.MultiOutput(framework::GradVarName("X")); + auto ins = ctx.MultiInput("X"); + auto outs = ctx.MultiOutput(framework::GradVarName("X")); PADDLE_ENFORCE_EQ(ins[0] != nullptr, true, diff --git a/paddle/fluid/operators/partial_sum_op.cu b/paddle/fluid/operators/partial_sum_op.cu index f427ab4e3f2d24b67827596693d27dfbec25a545..c92e9618bfce02a2a2e26507d5b478a4085eb428 100644 --- a/paddle/fluid/operators/partial_sum_op.cu +++ b/paddle/fluid/operators/partial_sum_op.cu @@ -23,7 +23,6 @@ namespace operators { #define CEIL_DIV(x, y) (((x) + (y)-1) / (y)) -using LoDTensor = phi::DenseTensor; using Tensor = phi::DenseTensor; template @@ -153,8 +152,8 @@ class PartialSumGradOpCUDAKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext &ctx) const override { const Tensor *out_grad = ctx.Input(framework::GradVarName("Out")); - auto ins = ctx.MultiInput("X"); - auto outs = ctx.MultiOutput(framework::GradVarName("X")); + auto ins = ctx.MultiInput("X"); + auto outs = ctx.MultiOutput(framework::GradVarName("X")); PADDLE_ENFORCE_EQ( ins[0] != nullptr, diff --git a/paddle/fluid/operators/positive_negative_pair_op.h b/paddle/fluid/operators/positive_negative_pair_op.h index 439a02e37d6cfbbf39d13d098644d265b6469176..1cc89cda21bc79e7075aa3141c6912b4ffc89106 100644 --- a/paddle/fluid/operators/positive_negative_pair_op.h +++ b/paddle/fluid/operators/positive_negative_pair_op.h @@ -20,7 +20,6 @@ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; template class PositiveNegativePairKernel : public framework::OpKernel { diff --git a/paddle/fluid/operators/prroi_pool_op.cc b/paddle/fluid/operators/prroi_pool_op.cc index 57c80fc1fa43260333a75c02768b9eb68500781f..9b3146c3b848754c5577f659f9ec06e3d7129550 100644 --- a/paddle/fluid/operators/prroi_pool_op.cc +++ b/paddle/fluid/operators/prroi_pool_op.cc @@ -20,7 +20,6 @@ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; class PRROIPoolOpMaker : public framework::OpProtoAndCheckerMaker { public: @@ -33,9 +32,9 @@ class PRROIPoolOpMaker : public framework::OpProtoAndCheckerMaker { "H is the height of the input feature map, and " "W is the width."); AddInput("ROIs", - "(LoDTensor), " + "(phi::DenseTensor), " "ROIs (Regions of Interest) to pool over. " - "should be a 2-D LoDTensor of shape (num_rois, 4) " + "should be a 2-D phi::DenseTensor of shape (num_rois, 4) " "given as [(x1, y1, x2, y2), ...]. " "where (x1, y1) is the top left coordinates, and " "(x2, y2) is the bottom right coordinates. " @@ -95,13 +94,13 @@ class PRROIPoolOp : public framework::OperatorWithKernel { rois_dims.size(), 2, platform::errors::InvalidArgument( - "ROIs should be a 2-D LoDTensor of shape (num_rois, 4) " + "ROIs should be a 2-D phi::DenseTensor of shape (num_rois, 4) " "given as [(x1, y1, x2, y2), ...]")); PADDLE_ENFORCE_EQ( rois_dims[1], 4, platform::errors::InvalidArgument( - "ROIs should be a 2-D LoDTensor of shape (num_rois, 4) " + "ROIs should be a 2-D phi::DenseTensor of shape (num_rois, 4) " "given as [(x1, y1, x2, y2), ...]")); int pooled_height = ctx->Attrs().Get("pooled_height"); int pooled_width = ctx->Attrs().Get("pooled_width"); diff --git a/paddle/fluid/operators/prroi_pool_op.cu b/paddle/fluid/operators/prroi_pool_op.cu index eaa0526174317af2a47011ccfa9d5cc68a7ec20b..b24ded79dd05017e4972165bc865c8d88eebfca2 100644 --- a/paddle/fluid/operators/prroi_pool_op.cu +++ b/paddle/fluid/operators/prroi_pool_op.cu @@ -18,7 +18,6 @@ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaximumNumBlocks = 4096; @@ -219,7 +218,7 @@ class GPUPRROIPoolOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input("X"); - auto* rois = ctx.Input("ROIs"); + auto* rois = ctx.Input("ROIs"); auto* out = ctx.Output("Out"); auto pooled_height = ctx.Attr("pooled_height"); @@ -322,7 +321,7 @@ class GPUPRROIPoolGradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input("X"); - auto* rois = ctx.Input("ROIs"); + auto* rois = ctx.Input("ROIs"); auto* out = ctx.Input("Out"); auto* output_grad = @@ -330,7 +329,7 @@ class GPUPRROIPoolGradOpKernel : public framework::OpKernel { auto* input_grad = ctx.Output(framework::GradVarName("X")); auto* input_roi_grad = - ctx.Output(framework::GradVarName("ROIs")); + ctx.Output(framework::GradVarName("ROIs")); auto pooled_height = ctx.Attr("pooled_height"); auto pooled_width = ctx.Attr("pooled_width"); diff --git a/paddle/fluid/operators/prune_gate_by_capacity_op.cu b/paddle/fluid/operators/prune_gate_by_capacity_op.cu index 9f038002cfbe6e06e897a5869e3b806bda5710d5..cec8b15c4144626f05e6c4d32514f142bf25d0bb 100644 --- a/paddle/fluid/operators/prune_gate_by_capacity_op.cu +++ b/paddle/fluid/operators/prune_gate_by_capacity_op.cu @@ -30,7 +30,6 @@ DECLARE_bool(avoid_op_randomness); namespace paddle { namespace operators { -using LoDTensor = phi::DenseTensor; static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaxinumNumBlocks = 4096; @@ -111,10 +110,11 @@ template class PruneGateByCapacityCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto* gate_idx = context.Input("GateIdx"); - auto* expert_count = context.Input("ExpertCount"); - // auto* expert_count_out = context.Output("ExpertCountOut"); - auto* new_gate_idx = context.Output("NewGateIdx"); + auto* gate_idx = context.Input("GateIdx"); + auto* expert_count = context.Input("ExpertCount"); + // auto* expert_count_out = + // context.Output("ExpertCountOut"); + auto* new_gate_idx = context.Output("NewGateIdx"); auto* new_gate_idx_data = new_gate_idx->mutable_data(context.GetPlace()); phi::DenseTensor expert_count_out; diff --git a/paddle/fluid/operators/psroi_pool_op.cc b/paddle/fluid/operators/psroi_pool_op.cc index e85e51d9ebebef2ed751ac498f3bcae4f02cd589..1222f97c091688d6d4f2f7404fd5a0c39aca7d5b 100644 --- a/paddle/fluid/operators/psroi_pool_op.cc +++ b/paddle/fluid/operators/psroi_pool_op.cc @@ -32,9 +32,9 @@ class PSROIPoolOpMaker : public framework::OpProtoAndCheckerMaker { "H is the height of the input feature map, and " "W is the width. The data type can be float32 or float64"); AddInput("ROIs", - "(LoDTensor), " + "(phi::DenseTensor), " "ROIs (Regions of Interest) to pool over. " - "should be a 2-D LoDTensor of shape (num_rois, 4) " + "should be a 2-D phi::DenseTensor of shape (num_rois, 4) " "given as [(x1, y1, x2, y2), ...]. " "where (x1, y1) is the top left coordinates, and " "(x2, y2) is the bottom right coordinates. " diff --git a/paddle/fluid/operators/pull_box_extended_sparse_op.h b/paddle/fluid/operators/pull_box_extended_sparse_op.h index 7da62ca9711a313ca5872d45701b57ba2273af8f..eff3bfd2a5f3c3bd721712e5cc82aba309a59632 100644 --- a/paddle/fluid/operators/pull_box_extended_sparse_op.h +++ b/paddle/fluid/operators/pull_box_extended_sparse_op.h @@ -108,7 +108,6 @@ static void PushBoxExtendedSparseFunctor( #endif } -using LoDTensor = phi::DenseTensor; template class PullBoxExtendedSparseCPUKernel : public framework::OpKernel { public: diff --git a/paddle/fluid/operators/pull_box_sparse_op.h b/paddle/fluid/operators/pull_box_sparse_op.h index c77cb440d08e31eb8d264f83d4adeab2facfe18a..dd41fd6ff0f4f262526b0887916adb14e7bafa32 100644 --- a/paddle/fluid/operators/pull_box_sparse_op.h +++ b/paddle/fluid/operators/pull_box_sparse_op.h @@ -113,7 +113,6 @@ static void PushBoxSparseFunctor(const framework::ExecutionContext &ctx) { #endif } -using LoDTensor = phi::DenseTensor; template class PullBoxSparseKernel : public framework::OpKernel { public: diff --git a/paddle/fluid/operators/pull_gpups_sparse_op.cu b/paddle/fluid/operators/pull_gpups_sparse_op.cu index d22c632d60dd257d21a1e9bbc857cedd45f313cc..ff68c42c8eb1b1fc7f8d8158975831c216248e10 100644 --- a/paddle/fluid/operators/pull_gpups_sparse_op.cu +++ b/paddle/fluid/operators/pull_gpups_sparse_op.cu @@ -19,7 +19,6 @@ namespace paddle { namespace operators { using phi::PADDLE_CUDA_NUM_THREADS; -using LoDTensor = phi::DenseTensor; template class PullGpuPSSparseCUDAKernel : public framework::OpKernel { diff --git a/paddle/fluid/operators/pull_gpups_sparse_op.h b/paddle/fluid/operators/pull_gpups_sparse_op.h index 7269ddd7d505a06330a72fa8374b5fb5f1bd21fd..2d844a4ce2bf09cf6e1f71345caf9216d2f20a67 100644 --- a/paddle/fluid/operators/pull_gpups_sparse_op.h +++ b/paddle/fluid/operators/pull_gpups_sparse_op.h @@ -97,7 +97,6 @@ static void PushGpuPSSparseFunctor(const framework::ExecutionContext &ctx) { #endif } -using LoDTensor = phi::DenseTensor; template class PullGpuPSSparseCPUKernel : public framework::OpKernel { public: diff --git a/paddle/fluid/operators/py_func_op.cc b/paddle/fluid/operators/py_func_op.cc index 474576f8f6809ee91385d8b0c612423807f51e07..2c5736d36689052dccdaf7bc0fcc03a2b089eb48 100644 --- a/paddle/fluid/operators/py_func_op.cc +++ b/paddle/fluid/operators/py_func_op.cc @@ -118,8 +118,8 @@ static void CallPythonFunc(py::object *callable, out->ShareDataWith(*py_out_tensor); } catch (py::cast_error &) { PADDLE_THROW(platform::errors::InvalidArgument( - "py::cast to LoDTensor error. The %d-th output expection is " - "LoDTensor", + "py::cast to phi::DenseTensor error. The %d-th output expection is " + "phi::DenseTensor", i)); } } diff --git a/paddle/fluid/operators/pyramid_hash_op.cc b/paddle/fluid/operators/pyramid_hash_op.cc index 44767dd7ab9987e4f46ccde6fd72e15ee37a4a59..5eead81365053f021c4f7c650aea3e7df051a47f 100644 --- a/paddle/fluid/operators/pyramid_hash_op.cc +++ b/paddle/fluid/operators/pyramid_hash_op.cc @@ -29,7 +29,6 @@ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; using LoD = framework::LoD; class PyramidHashOpMaker : public framework::OpProtoAndCheckerMaker { @@ -275,12 +274,12 @@ class CPUPyramidHashOPKernel : public framework::OpKernel { } void Compute(const framework::ExecutionContext& ctx) const override { - auto* bottom = ctx.Input("X"); + auto* bottom = ctx.Input("X"); auto* _blobs_0 = ctx.Input("W"); auto* _blobs_1 = ctx.Input("WhiteList"); auto* _blobs_2 = ctx.Input("BlackList"); - auto* top = ctx.Output("Out"); - auto* drop_pos = ctx.Output("DropPos"); + auto* top = ctx.Output("Out"); + auto* drop_pos = ctx.Output("DropPos"); int _num_emb = ctx.Attr("num_emb"); bool use_filter = ctx.Attr("use_filter"); @@ -296,7 +295,7 @@ class CPUPyramidHashOPKernel : public framework::OpKernel { const auto& offset = bottom->lod()[0]; const auto* bottom_data_ori = bottom->data(); - auto* buff = ctx.Output("X_Temp_Out"); + auto* buff = ctx.Output("X_Temp_Out"); buff->Resize(phi::make_ddim({bottom->dims()[0], bottom->dims()[1]})); float* bottom_data = buff->mutable_data(ctx.GetPlace()); for (int i = 0; i < bottom->dims()[0]; i++) { @@ -512,10 +511,10 @@ class CPUPyramidHashOPGradKernel : public framework::OpKernel { } void Compute(const framework::ExecutionContext& ctx) const override { - auto* bottom = ctx.Input("X"); + auto* bottom = ctx.Input("X"); auto* _blobs = ctx.Input("W"); - auto* drop_pos = ctx.Input("DropPos"); - auto* top = ctx.Input(framework::GradVarName("Out")); + auto* drop_pos = ctx.Input("DropPos"); + auto* top = ctx.Input(framework::GradVarName("Out")); int _num_emb = ctx.Attr("num_emb"); float _lr = ctx.Attr("lr"); @@ -523,7 +522,7 @@ class CPUPyramidHashOPGradKernel : public framework::OpKernel { int _space_len = ctx.Attr("space_len"); int _pyramid_layer = ctx.Attr("pyramid_layer"); - auto* buff = ctx.Input("X_Temp_Out"); + auto* buff = ctx.Input("X_Temp_Out"); auto* bottom_data = buff->data(); int _slot_len = bottom->dims()[0]; diff --git a/paddle/fluid/operators/random_routing_op.cu b/paddle/fluid/operators/random_routing_op.cu index 1fdb1bf73a3047e62eb105bbe6cb9d9b187c24ca..f7f111299c73d5542ba767a346dd613d59a8d312 100644 --- a/paddle/fluid/operators/random_routing_op.cu +++ b/paddle/fluid/operators/random_routing_op.cu @@ -29,7 +29,6 @@ static inline int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } -using LoDTensor = phi::DenseTensor; using Tensor = phi::DenseTensor; template @@ -54,10 +53,10 @@ template class RandomRoutingOpCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto topk_idx = context.Input("TopK_Idx"); - auto topk_value = context.Input("TopK_Value"); - auto prob = context.Input("Prob"); - auto out = context.Output("Out"); + auto topk_idx = context.Input("TopK_Idx"); + auto topk_value = context.Input("TopK_Value"); + auto prob = context.Input("Prob"); + auto out = context.Output("Out"); auto place = context.GetPlace(); const auto& dev_ctx = context.template device_context(); diff --git a/paddle/fluid/operators/recurrent_op.cc b/paddle/fluid/operators/recurrent_op.cc index 4b2ed12b1cf8fd189aba5163d3152b7020e7aa4e..40c553f41cc678ae08a99a3a4c8c8e28fe0d097b 100644 --- a/paddle/fluid/operators/recurrent_op.cc +++ b/paddle/fluid/operators/recurrent_op.cc @@ -153,12 +153,13 @@ int64_t RecurrentBase::GetSequenceLength(const framework::Scope &scope) const { PADDLE_ENFORCE_NOT_NULL(var, platform::errors::InvalidArgument( "RecurrentOp finds var %s is NULL", iname)); - PADDLE_ENFORCE_EQ(var->IsType(), - true, - platform::errors::InvalidArgument( - "RecurrentOp only accepts LoDTensor as input but " - "input var %s is not LoDTensor", - iname)); + PADDLE_ENFORCE_EQ( + var->IsType(), + true, + platform::errors::InvalidArgument( + "RecurrentOp only accepts phi::DenseTensor as input but " + "input var %s is not phi::DenseTensor", + iname)); auto &dim = var->Get().dims(); if (seq_len == -1) { seq_len = dim[0]; diff --git a/paddle/fluid/operators/reorder_lod_tensor_by_rank_op.cc b/paddle/fluid/operators/reorder_lod_tensor_by_rank_op.cc index dfafbb7c8a2cb5f3f910331fa6d716a200e133c8..dbd424b1fa0e5d6ee23066577d5aee77c3337ba8 100644 --- a/paddle/fluid/operators/reorder_lod_tensor_by_rank_op.cc +++ b/paddle/fluid/operators/reorder_lod_tensor_by_rank_op.cc @@ -37,13 +37,14 @@ class ReorderLoDTensorByRankTableOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddInput("X", - "(LoDTensor), the input lod tensor to be reordered according to " - "Input(RankTable)."); + AddInput( + "X", + "(phi::DenseTensor), the input lod tensor to be reordered according to " + "Input(RankTable)."); AddInput("RankTable", "(LoDRankTable), the rank table according to which Input(X) is " "reordered."); - AddOutput("Out", "LoDTensor, the reordered lod tensor."); + AddOutput("Out", "phi::DenseTensor, the reordered lod tensor."); AddComment(R"DOC(ReorderLoDTensorByRankTable operator. Input(X) is a batch of sequences. Input(RankTable) stores new orders of the diff --git a/paddle/fluid/operators/reverse_op.cc b/paddle/fluid/operators/reverse_op.cc index 810a73d89d21766dc6fcc006884232bedf9e00d2..93877aa8251cb781830e0429fb41ec57ce3407d6 100644 --- a/paddle/fluid/operators/reverse_op.cc +++ b/paddle/fluid/operators/reverse_op.cc @@ -48,15 +48,15 @@ class ReverseOpVarTypeInference : public framework::VarTypeInference { class ReverseOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddInput("X", "The LoDTensor to be flipped."); - AddOutput("Out", "The LoDTensor after flipping."); + AddInput("X", "The phi::DenseTensor to be flipped."); + AddOutput("Out", "The phi::DenseTensor after flipping."); AddAttr>( "axis", "The axises that along which order of elements is reversed.") .SupportTensor(); AddComment(R"DOC( Reverse Operator. - Reverse the order of elements in the input LoDTensor along given axises. + Reverse the order of elements in the input phi::DenseTensor along given axises. Case 1: Given diff --git a/paddle/fluid/operators/roi_align_op.cc b/paddle/fluid/operators/roi_align_op.cc index 0cb74c50dfc27e30ea4f596412099025336d2038..6a7999c56557f0c0b36a4897e83bf5b85968c9cb 100644 --- a/paddle/fluid/operators/roi_align_op.cc +++ b/paddle/fluid/operators/roi_align_op.cc @@ -21,7 +21,6 @@ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; class ROIAlignOp : public framework::OperatorWithKernel { public: @@ -73,9 +72,9 @@ class ROIAlignOpMaker : public framework::OpProtoAndCheckerMaker { "H is the height of the feature, and " "W is the width of the feature."); AddInput("ROIs", - "(LoDTensor), " + "(phi::DenseTensor), " "ROIs (Regions of Interest) to pool over. " - "should be a 2-D LoDTensor of shape (num_rois, 4)" + "should be a 2-D phi::DenseTensor of shape (num_rois, 4)" "given as [[x1, y1, x2, y2], ...]. " "(x1, y1) is the top left coordinates, and " "(x2, y2) is the bottom right coordinates."); diff --git a/paddle/fluid/operators/roi_align_op_mlu.cc b/paddle/fluid/operators/roi_align_op_mlu.cc index 15d42db2751e4ffb850d4bfb2589fa2e4cc66715..5bde4dd7b6686e61145afeac25259b3f61b97e72 100644 --- a/paddle/fluid/operators/roi_align_op_mlu.cc +++ b/paddle/fluid/operators/roi_align_op_mlu.cc @@ -20,14 +20,13 @@ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; template class ROIAlignOpMLUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input("X"); - auto* rois = ctx.Input("ROIs"); + auto* rois = ctx.Input("ROIs"); auto* out = ctx.Output("Out"); out->mutable_data(ctx.GetPlace()); out->set_layout(phi::DataLayout::kNHWC); @@ -175,7 +174,7 @@ template class ROIAlignGradOpMLUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - auto* rois = ctx.Input("ROIs"); + auto* rois = ctx.Input("ROIs"); auto* out_grad = ctx.Input(framework::GradVarName("Out")); auto* in_grad = ctx.Output(framework::GradVarName("X")); diff --git a/paddle/fluid/operators/roi_pool_op.cc b/paddle/fluid/operators/roi_pool_op.cc index 7bba00fb90aa2c38bb12a3a11d33d3f611467058..b2e8a6ae58883b73df93d2671dd46989cee916f9 100644 --- a/paddle/fluid/operators/roi_pool_op.cc +++ b/paddle/fluid/operators/roi_pool_op.cc @@ -24,7 +24,6 @@ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; class ROIPoolOp : public framework::OperatorWithKernel { public: @@ -75,9 +74,9 @@ class ROIPoolOpMaker : public framework::OpProtoAndCheckerMaker { "H is the height of the feature, and " "W is the width of the feature."); AddInput("ROIs", - "(LoDTensor), " + "(phi::DenseTensor), " "ROIs (Regions of Interest) to pool over. " - "should be a 2-D LoDTensor of shape (num_rois, 4)" + "should be a 2-D phi::DenseTensor of shape (num_rois, 4)" "given as [[x1, y1, x2, y2], ...]. " "Where batch_id is the id of the data, " "(x1, y1) is the top left coordinates, and " diff --git a/paddle/fluid/operators/row_conv_op.cc b/paddle/fluid/operators/row_conv_op.cc index 2e3e6e70dfda550822750bca0a9e1518e17d4d31..ae8a34ec312f6c639a62577195bf6d7a364967fa 100644 --- a/paddle/fluid/operators/row_conv_op.cc +++ b/paddle/fluid/operators/row_conv_op.cc @@ -23,8 +23,6 @@ limitations under the License. */ namespace paddle { namespace operators { -using LoDTensor = phi::DenseTensor; - template @@ -84,7 +82,7 @@ class RowConvOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("X", "the input(X) is a LodTensor or tensor, LodTensor(X) supports " "variable time-length input sequences. The underlying tensor " - "in this LoDTensor is a matrix with shape (T x N), where T " + "in this phi::DenseTensor is a matrix with shape (T x N), where T " "is the total time steps in this mini-batch and N is the input " "data dimension. the shape of Tensor input(X) has shape " "(B x T x N), B is batch size;"); @@ -142,9 +140,9 @@ template class RowConvKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { - auto *x = context.Input("X"); + auto *x = context.Input("X"); auto *filter = context.Input("Filter"); - auto *out = context.Output("Out"); + auto *out = context.Output("Out"); out->mutable_data(context.GetPlace()); @@ -217,10 +215,11 @@ template class RowConvGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { - auto *x = context.Input("X"); + auto *x = context.Input("X"); auto *filter = context.Input("Filter"); - auto *d_out = context.Input(framework::GradVarName("Out")); - auto *dx = context.Output(framework::GradVarName("X")); + auto *d_out = + context.Input(framework::GradVarName("Out")); + auto *dx = context.Output(framework::GradVarName("X")); auto *d_filter = context.Output(framework::GradVarName("Filter")); diff --git a/paddle/fluid/operators/row_conv_op.cu b/paddle/fluid/operators/row_conv_op.cu index 34595180c9d721a6ddea7f0d4f0ba66ca02236a5..81f140b36fce4a6dcff48e790f211e65564a38a0 100644 --- a/paddle/fluid/operators/row_conv_op.cu +++ b/paddle/fluid/operators/row_conv_op.cu @@ -18,8 +18,6 @@ limitations under the License. */ namespace paddle { namespace operators { -using LoDTensor = phi::DenseTensor; - namespace { inline int DivUp(int x, int y) { return (x + y - 1) / y; } @@ -325,9 +323,9 @@ template class RowConvKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { - auto *X = context.Input("X"); + auto *X = context.Input("X"); auto *Filter = context.Input("Filter"); - auto *Out = context.Output("Out"); + auto *Out = context.Output("Out"); const T *in = X->data(); const T *weight = Filter->data(); @@ -379,15 +377,15 @@ template class RowConvGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { - auto *X = context.Input("X"); + auto *X = context.Input("X"); auto *Filter = context.Input("Filter"); - auto *dOut = context.Input(framework::GradVarName("Out")); + auto *dOut = context.Input(framework::GradVarName("Out")); const T *in = X->data(); const T *weights = Filter->data(); const T *dout = dOut->data(); phi::DenseTensor *dX = - context.Output(framework::GradVarName("X")); + context.Output(framework::GradVarName("X")); phi::DenseTensor *dFilter = context.Output(framework::GradVarName("Filter")); int batch_size = 0; diff --git a/paddle/fluid/operators/run_program_op.cc b/paddle/fluid/operators/run_program_op.cc index 64afb3a2b91e96c7568605dba679957d23bdf561..52e35c343063aca53c5571c0c484d48a0c4f7604 100644 --- a/paddle/fluid/operators/run_program_op.cc +++ b/paddle/fluid/operators/run_program_op.cc @@ -65,18 +65,18 @@ class RunProgramOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { AddInput("X", - "(vector)" + "(vector)" "The input tensors of RunProgram operator, also the feed targets " "of loaded program.") .AsDuplicable(); AddInput("Params", - "(vector)" + "(vector)" "The input parameter of RunProgram operator, also the parameters " "of the loaded program.") .AsDuplicable() .AsDispensable(); AddOutput("Out", - "(vector)" + "(vector)" "The output tensors of RunProgram operator, also the fetch " "targets of the loaded program.") .AsDuplicable(); @@ -87,7 +87,7 @@ class RunProgramOpMaker : public framework::OpProtoAndCheckerMaker { "NOTE: Do not use Scope directly because Scope output is not " "currently supported."); AddOutput("DOut", - "(vector)" + "(vector)" "The output tensors for GRAD Tensors in RunProgram forward " "operator, the forward operator contains GRAD Tensors when it " "computes double grad.") diff --git a/paddle/fluid/operators/run_program_op.h b/paddle/fluid/operators/run_program_op.h index 93c222cfb8f06084f24f3b2b59c21f5fb44f502a..f7d3630c019d84e42c10381911400320a50027ee 100644 --- a/paddle/fluid/operators/run_program_op.h +++ b/paddle/fluid/operators/run_program_op.h @@ -48,25 +48,24 @@ using BlockDesc = framework::BlockDesc; using ProgramDesc = framework::ProgramDesc; using Variable = framework::Variable; -using LoDTensor = phi::DenseTensor; using SelectedRows = phi::SelectedRows; namespace details { -// all input vars should be LoDTensor & is initialized +// all input vars should be phi::DenseTensor & is initialized static void CheckInputVarStatus(const Variable &var, const std::string &var_name) { + PADDLE_ENFORCE_EQ(var.IsType(), + true, + platform::errors::InvalidArgument( + "The input variable %s of " + "RunProgram(Grad)Op holds " + "wrong type. Expect type is phi::DenseTensor, but " + "receive type is %s.", + var_name, + platform::demangle(framework::ToTypeName(var.Type())))); PADDLE_ENFORCE_EQ( - var.IsType(), - true, - platform::errors::InvalidArgument( - "The input variable %s of " - "RunProgram(Grad)Op holds " - "wrong type. Expect type is LoDTensor, but receive type is %s.", - var_name, - platform::demangle(framework::ToTypeName(var.Type())))); - PADDLE_ENFORCE_EQ( - var.Get().IsInitialized(), + var.Get().IsInitialized(), true, platform::errors::InvalidArgument("The tensor in input variable %s of " "RunProgram(Grad)Op " @@ -77,17 +76,18 @@ static void CheckInputVarStatus(const Variable &var, static void CheckOutputVarStatus(const Variable &src_var, const Variable &dst_var, const std::string &var_name) { - if (dst_var.IsType()) { + if (dst_var.IsType()) { PADDLE_ENFORCE_EQ( - src_var.IsType(), + src_var.IsType(), true, platform::errors::InvalidArgument( "The output variable %s get from " "RunProgram(Grad)Op's internal scope holds " - "wrong type. Expect type is LoDTensor, but receive type is %s.", + "wrong type. Expect type is phi::DenseTensor, but receive type is " + "%s.", var_name, platform::demangle(framework::ToTypeName(src_var.Type())))); - PADDLE_ENFORCE_EQ(src_var.Get().IsInitialized(), + PADDLE_ENFORCE_EQ(src_var.Get().IsInitialized(), true, platform::errors::InvalidArgument( "The tensor in output variable %s get from " @@ -115,7 +115,7 @@ static void CheckOutputVarStatus(const Variable &src_var, } else { PADDLE_THROW(platform::errors::InvalidArgument( "The RunProgram(Grad)Op only support output " - "variable of type LoDTensor or SelectedRows, " + "variable of type phi::DenseTensor or SelectedRows, " "but received variable %s's type is %s", var_name, platform::demangle(framework::ToTypeName(dst_var.Type())))); @@ -123,12 +123,12 @@ static void CheckOutputVarStatus(const Variable &src_var, } static void VariableShare(const Variable &src_var, Variable *dst_var) { - // The previous check ensures that the variable type can only be LoDTensor or - // SelectedRows. - if (src_var.IsType()) { - auto *lod_tensor = dst_var->GetMutable(); - lod_tensor->ShareDataWith(src_var.Get()); - lod_tensor->set_lod(src_var.Get().lod()); + // The previous check ensures that the variable type can only be + // phi::DenseTensor or SelectedRows. + if (src_var.IsType()) { + auto *lod_tensor = dst_var->GetMutable(); + lod_tensor->ShareDataWith(src_var.Get()); + lod_tensor->set_lod(src_var.Get().lod()); } else if (src_var.IsType()) { auto *selected_rows = dst_var->GetMutable(); selected_rows->mutable_value()->ShareDataWith( diff --git a/paddle/fluid/operators/save_combine_op.cc b/paddle/fluid/operators/save_combine_op.cc index a25241d368affb2ddc922cefb4ff286cf77e1380..41780561144b1ac3b050d5429a2ea6937bfb7bfe 100644 --- a/paddle/fluid/operators/save_combine_op.cc +++ b/paddle/fluid/operators/save_combine_op.cc @@ -54,7 +54,7 @@ class SaveCombineOpProtoMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( SaveCombine operator -This operator will serialize and write a list of input LoDTensor variables +This operator will serialize and write a list of input phi::DenseTensor variables to a file on disk. )DOC"); AddAttr("overwrite", @@ -70,7 +70,7 @@ to a file on disk. AddAttr( "file_path", "(string)" - "The \"file_path\" where the LoDTensor variables will be saved.") + "The \"file_path\" where the phi::DenseTensor variables will be saved.") .AddCustomChecker( [](const std::string& path) { return !path.empty(); }); AddAttr("save_to_memory", diff --git a/paddle/fluid/operators/save_combine_op.h b/paddle/fluid/operators/save_combine_op.h index 20baefe5974281f1ff36ba36de611b52f1fe71e8..fd54202a75d3fab51076bed652f36b0c18d547ac 100644 --- a/paddle/fluid/operators/save_combine_op.h +++ b/paddle/fluid/operators/save_combine_op.h @@ -72,13 +72,14 @@ class SaveCombineOpKernel : public framework::OpKernel { inp_vars[i], platform::errors::InvalidArgument("Cannot find variable %s to save.", inp_var_names[i])); - PADDLE_ENFORCE_EQ(inp_vars[i]->IsType() || - inp_vars[i]->IsType(), - true, - platform::errors::InvalidArgument( - "SaveCombine operator only supports saving " - "LoDTensor or Vocab variable, %s has wrong type.", - inp_var_names[i])); + PADDLE_ENFORCE_EQ( + inp_vars[i]->IsType() || + inp_vars[i]->IsType(), + true, + platform::errors::InvalidArgument( + "SaveCombine operator only supports saving " + "phi::DenseTensor or Vocab variable, %s has wrong type.", + inp_var_names[i])); if (inp_vars[i]->IsType()) { auto &tensor = inp_vars[i]->Get(); diff --git a/paddle/fluid/operators/save_op.cc b/paddle/fluid/operators/save_op.cc index 586482ff7d77268945902b3bdcbbf288934139e3..179a18ba8d7ceed6c33e5b3da66486a58f01bce6 100644 --- a/paddle/fluid/operators/save_op.cc +++ b/paddle/fluid/operators/save_op.cc @@ -40,11 +40,12 @@ class SaveOp : public framework::OperatorWithKernel { class SaveOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddInput("X", "(Tensor ) Input LoDTensor and SelectedRows to be saved"); + AddInput("X", + "(Tensor ) Input phi::DenseTensor and SelectedRows to be saved"); AddComment(R"DOC( Save operator -This operator will serialize and write LoDTensor / SelectedRows variable to file on disk. +This operator will serialize and write phi::DenseTensor / SelectedRows variable to file on disk. )DOC"); AddAttr("overwrite", "(boolean, default true)" diff --git a/paddle/fluid/operators/save_op.h b/paddle/fluid/operators/save_op.h index ddb84af69d14a695aa6e571989f81865ed7711fd..7b78ac1ecea876c645a52018f4420067888e87c4 100644 --- a/paddle/fluid/operators/save_op.h +++ b/paddle/fluid/operators/save_op.h @@ -64,7 +64,8 @@ class SaveOpKernel : public framework::OpKernel { SaveSelectedRows(ctx, place, input_var, filename); } else { PADDLE_THROW(platform::errors::InvalidArgument( - "Save operator only supports saving LoDTensor and SelectedRows " + "Save operator only supports saving phi::DenseTensor and " + "SelectedRows " "variable, %s has wrong type", iname)); } diff --git a/paddle/fluid/operators/search_compute.h b/paddle/fluid/operators/search_compute.h index bbbcce34c4a54bcf274702039bd8fc1a75807342..34728c86c56b6e6b936ae845fe496a5ba19dbe48 100644 --- a/paddle/fluid/operators/search_compute.h +++ b/paddle/fluid/operators/search_compute.h @@ -29,7 +29,6 @@ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; using LoD = framework::LoD; template diff --git a/paddle/fluid/operators/select_input_op.cc b/paddle/fluid/operators/select_input_op.cc index 5af8ba45c496b26c7ed90c8591ec2fef5a47488a..3b00aab8c8e894217d2d46c83422e81b4f295256 100644 --- a/paddle/fluid/operators/select_input_op.cc +++ b/paddle/fluid/operators/select_input_op.cc @@ -73,7 +73,7 @@ class SelectInputOpProtoMaker : public framework::OpProtoAndCheckerMaker { // Because this op is blocking whole control flow. I am implementing MVP // (minimal viable product) here. AddComment(R"DOC( -Merge branches of LoDTensor into a single Output with a mask integer +Merge branches of phi::DenseTensor into a single Output with a mask integer specifying the output branchi. )DOC"); } diff --git a/paddle/fluid/operators/select_output_op.cc b/paddle/fluid/operators/select_output_op.cc index bfc0d4a4c1f18f9edf820bfd78e4f0e6841a385c..f57933bab0c0b8b18334bc6e9971e663275c3a3b 100644 --- a/paddle/fluid/operators/select_output_op.cc +++ b/paddle/fluid/operators/select_output_op.cc @@ -71,7 +71,9 @@ class SelectOutputOp : public framework::OperatorBase { class SelectOutputOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddInput("X", "The input LoDTensor or LoDTensorArray or SelectedRows."); + AddInput( + "X", + "The input phi::DenseTensor or phi::DenseTensorArray or SelectedRows."); AddInput("Mask", "Tensor with numel 1 specifying which branch to output"); AddOutput("Out", "The output can contains multiple variables. The output of " diff --git a/paddle/fluid/operators/shape_op.cc b/paddle/fluid/operators/shape_op.cc index 3a243f24ff8cf3c1da5246f53df029e7107a1f33..6849b4e42721e222b37f4894eee15325c73c7ebe 100644 --- a/paddle/fluid/operators/shape_op.cc +++ b/paddle/fluid/operators/shape_op.cc @@ -47,11 +47,11 @@ class ShapeOp : public framework::OperatorWithKernel { class ShapeOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddInput("Input", "(LoDTensor), The input tensor."); - AddOutput( - "Out", - "(LoDTensor), The shape of input tensor, the data type of the shape" - " is int32_t, will be on the same device with the input Tensor."); + AddInput("Input", "(phi::DenseTensor), The input tensor."); + AddOutput("Out", + "(phi::DenseTensor), The shape of input tensor, the data type of " + "the shape" + " is int32_t, will be on the same device with the input Tensor."); AddComment(R"DOC( Shape Operator. diff --git a/paddle/fluid/operators/shape_op_mlu.cc b/paddle/fluid/operators/shape_op_mlu.cc index 2863367e97e94152f82869733e2fd077404766c5..bd51b49851840b7054c6257a17115bd430101e77 100644 --- a/paddle/fluid/operators/shape_op_mlu.cc +++ b/paddle/fluid/operators/shape_op_mlu.cc @@ -21,7 +21,6 @@ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; using SelectedRows = phi::SelectedRows; template @@ -33,7 +32,7 @@ class ShapeMLUKernel : public framework::OpKernel { if (in_var->IsType()) { in_dims = in_var->Get().value().dims(); } else { - in_dims = in_var->Get().dims(); + in_dims = in_var->Get().dims(); } auto* out_t = ctx.Output("Out"); out_t->Resize({in_dims.size()}); diff --git a/paddle/fluid/operators/shard_index_op.cc b/paddle/fluid/operators/shard_index_op.cc index e601a50409936b6ac97da251bf132692a59fddaa..4c22efc2af2993164f25857f841a62ff4345c924 100644 --- a/paddle/fluid/operators/shard_index_op.cc +++ b/paddle/fluid/operators/shard_index_op.cc @@ -37,7 +37,8 @@ class ShardIndexOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { AddInput("X", - "(LoDTensor, LoDTensor) Input variable. Each value " + "(phi::DenseTensor, phi::DenseTensor) Input variable. " + "Each value " "of X is an index."); AddOutput( "Out", diff --git a/paddle/fluid/operators/shard_index_op_npu.cc b/paddle/fluid/operators/shard_index_op_npu.cc index 53a352b7fc76ad256cf1f52ee5a1a447d5012d66..3cc025ca9ed64cdcbc6516ed06387ebe2bfe85a3 100644 --- a/paddle/fluid/operators/shard_index_op_npu.cc +++ b/paddle/fluid/operators/shard_index_op_npu.cc @@ -18,15 +18,14 @@ namespace paddle { namespace operators { -using LoDTensor = phi::DenseTensor; using Tensor = phi::DenseTensor; template class ShardIndexNPUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { VLOG(4) << "start kernel"; - auto* in = context.Input("X"); - auto* out = context.Output("Out"); + auto* in = context.Input("X"); + auto* out = context.Output("Out"); int index_num = context.Attr("index_num"); int nshards = context.Attr("nshards"); int shard_id = context.Attr("shard_id"); diff --git a/paddle/fluid/operators/share_data_op.cc b/paddle/fluid/operators/share_data_op.cc index 7f903ba3440b5ade1cad537b9fbc2d6e09094a02..69e16bb7ac9c9c4b3fa344ac6185b0a0b393a671 100644 --- a/paddle/fluid/operators/share_data_op.cc +++ b/paddle/fluid/operators/share_data_op.cc @@ -34,7 +34,7 @@ class ShareDataOp : public framework::OperatorWithKernel { in_type == framework::proto::VarType::SELECTED_ROWS, true, platform::errors::InvalidArgument( - "Type of Variable[X] must be LoDTensor or SelectedRows!")); + "Type of Variable[X] must be phi::DenseTensor or SelectedRows!")); PADDLE_ENFORCE_EQ( in_type, out_type, diff --git a/paddle/fluid/operators/shrink_rnn_memory_op.cc b/paddle/fluid/operators/shrink_rnn_memory_op.cc index ad932f8ce22b577966ad80708b8fd5d27721f8f8..077e51c707cfa18215c3e405465181e967658c4e 100644 --- a/paddle/fluid/operators/shrink_rnn_memory_op.cc +++ b/paddle/fluid/operators/shrink_rnn_memory_op.cc @@ -92,12 +92,13 @@ class ShrinkRNNMemoryOp : public ArrayOp { class ShrinkRNNMemoryOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddInput("X", "(LoDTensor) The RNN step memory to be shrank."); + AddInput("X", "(phi::DenseTensor) The RNN step memory to be shrank."); AddInput("RankTable", "(LoDRankTable) The lod_rank_table of dynamic RNN."); - AddInput("I", - "(LoDTensor) The step index. The RNN step memory 'X' will be " - "shrank to match the size of the input of the index'th step."); - AddOutput("Out", "(LoDTensor) The shrank RNN step memory."); + AddInput( + "I", + "(phi::DenseTensor) The step index. The RNN step memory 'X' will be " + "shrank to match the size of the input of the index'th step."); + AddOutput("Out", "(phi::DenseTensor) The shrank RNN step memory."); AddComment(R"DOC( This operator is used to shrink output batch of memory defined in dynamic RNN. diff --git a/paddle/fluid/operators/shuffle_batch_op.cc b/paddle/fluid/operators/shuffle_batch_op.cc index 2fe8512b4b1550a07f74182950352753c6f92444..6eeec761120b04f46d0d2d0322e8f948506c4e54 100644 --- a/paddle/fluid/operators/shuffle_batch_op.cc +++ b/paddle/fluid/operators/shuffle_batch_op.cc @@ -76,17 +76,18 @@ class ShuffleBatchOp : public framework::OperatorWithKernel { class ShuffleBatchOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddInput("X", "(LoDTensor) The input tensor of shuffle_batch op."); - AddInput("Seed", "(LoDTensor) The input seed tensor."); + AddInput("X", "(phi::DenseTensor) The input tensor of shuffle_batch op."); + AddInput("Seed", "(phi::DenseTensor) The input seed tensor."); AddAttr( "startup_seed", "If input tensor 'Seed' is not initialized, the 'startup_seed' " "will be used to replace it. The seed after shuffle batch will " "be saved in 'SeedOut'. ") .SetDefault(0); - AddOutput("Out", "(LoDTensor) The output tensor of shuffle_batch op."); + AddOutput("Out", + "(phi::DenseTensor) The output tensor of shuffle_batch op."); AddOutput("ShuffleIdx", "(Tensor) Record forword shuffle order"); - AddOutput("SeedOut", "(LoDTensor) Saved new generated seed."); + AddOutput("SeedOut", "(phi::DenseTensor) Saved new generated seed."); AddComment(R"DOC( Shuffle Batch Operator. diff --git a/paddle/fluid/operators/shuffle_batch_op.h b/paddle/fluid/operators/shuffle_batch_op.h index c445648f7569e32556f665ec0df210866d01ab8b..2f1fbee16e3d9ae036aa66000557d7874a22e2f0 100644 --- a/paddle/fluid/operators/shuffle_batch_op.h +++ b/paddle/fluid/operators/shuffle_batch_op.h @@ -33,7 +33,6 @@ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; template using Vector = framework::Vector; @@ -42,11 +41,11 @@ template class ShuffleBatchKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { - auto *x = context.Input("X"); - auto *seed = context.Input("Seed"); - auto *out = context.Output("Out"); - auto *shuffleidx = context.Output("ShuffleIdx"); - auto *seed_out = context.Output("SeedOut"); + auto *x = context.Input("X"); + auto *seed = context.Input("Seed"); + auto *out = context.Output("Out"); + auto *shuffleidx = context.Output("ShuffleIdx"); + auto *seed_out = context.Output("SeedOut"); auto x_embed_size = x->dims()[x->dims().size() - 1]; auto elem_size = 1; @@ -128,9 +127,11 @@ template class ShuffleBatchGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { - auto *out_grad = context.Input(framework::GradVarName("Out")); - auto *shuffleidx = context.Input("ShuffleIdx"); - auto *x_grad = context.Output(framework::GradVarName("X")); + auto *out_grad = + context.Input(framework::GradVarName("Out")); + auto *shuffleidx = context.Input("ShuffleIdx"); + auto *x_grad = + context.Output(framework::GradVarName("X")); auto embed_size = out_grad->dims()[out_grad->dims().size() - 1]; auto elem_size = 1; diff --git a/paddle/fluid/operators/slice_op.cc b/paddle/fluid/operators/slice_op.cc index 07867f5070b3c2bfca4f75b9110c13b011266174..d6f48d334759d90012f3046c7eff40b32beaec16 100644 --- a/paddle/fluid/operators/slice_op.cc +++ b/paddle/fluid/operators/slice_op.cc @@ -203,9 +203,9 @@ class SliceOpVarTypeInference : public framework::VarTypeInference { auto not_decrease = paddle::get>(decrease_axis).size() == 0; if (not_decrease) { - // The default type of out is LoDTensor. + // The default type of out is phi::DenseTensor. // However, if no axis is decreased and the type of input is not - // LoDTensor, the type of out should be the same as input. + // phi::DenseTensor, the type of out should be the same as input. // For example, input is a LoDTensorArray and no axis is decreased, the // output should be a LoDTensorArray. ctx->SetOutputType(out_name, ctx->GetInputType(x_name)); @@ -369,8 +369,8 @@ class SliceOpGradVarTypeInference : public framework::VarTypeInference { auto d_out = framework::GradVarName("Out"); auto out = framework::GradVarName("Input"); // The types of grad_input and input should always be the same. - // The default type of out is LoDTensor, but the type of input can be - // LoDTensor or LoDTensorArray, + // The default type of out is phi::DenseTensor, but the type of input can be + // phi::DenseTensor or phi::DenseTensorArray, // so set the type of both to be the same. ctx->SetOutputType(out, ctx->GetInputType(x)); ctx->SetOutputDataType(out, ctx->GetInputDataType(d_out)); diff --git a/paddle/fluid/operators/split_lod_tensor_op.cc b/paddle/fluid/operators/split_lod_tensor_op.cc index 2a64319d9866536e40f07e4a44f3be37d5429120..e648575a1edca14e5c0825363a8430f88bc26ce8 100644 --- a/paddle/fluid/operators/split_lod_tensor_op.cc +++ b/paddle/fluid/operators/split_lod_tensor_op.cc @@ -145,20 +145,20 @@ class SplitLoDTensorOp : public framework::OperatorBase { class SplitLoDTensorOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddInput("X", "The input LoDTensor"); + AddInput("X", "The input phi::DenseTensor"); AddInput("Mask", "A bool column vector which mask the input"); - AddOutput("OutTrue", "True branch of input LoDTensor"); - AddOutput("OutFalse", "False branch of input LoDTensor"); + AddOutput("OutTrue", "True branch of input phi::DenseTensor"); + AddOutput("OutFalse", "False branch of input phi::DenseTensor"); AddAttr("level", "(int) the specific lod level to split.") .SetDefault(0) .EqualGreaterThan(0); AddComment( R"DOC( - Split a LoDTensor with a Mask at certain level. The input LoDTensor + Split a phi::DenseTensor with a Mask at certain level. The input phi::DenseTensor has 3 sequence at certain lod level. The Mask is a bool column vector, such as [0, 1, 0] at the same level. The first and third sequence will - be send to False Output LoDTensor; whereas the second sequence will - be send to True Output LoDTensor. Please refer to MergeLoDTensorOp.)DOC"); + be send to False Output phi::DenseTensor; whereas the second sequence will + be send to True Output phi::DenseTensor. Please refer to MergeLoDTensorOp.)DOC"); } }; diff --git a/paddle/fluid/operators/split_op.cc b/paddle/fluid/operators/split_op.cc index 0c2d79a664ea82dcd36ee0637f302048b9ed62f3..fc7e8a869e3ef82e22b00650881fcfd4f2d9eff3 100644 --- a/paddle/fluid/operators/split_op.cc +++ b/paddle/fluid/operators/split_op.cc @@ -21,7 +21,6 @@ limitations under the License. */ namespace paddle { namespace operators { -using LoDTensor = phi::DenseTensor; using framework::Variable; @@ -77,11 +76,11 @@ class SplitOp : public framework::OperatorWithKernel { const paddle::small_vector §ions_varptr_list = ctx->GetInputVarPtrs("SectionsTensorList"); - std::vector sections_from_tensor; + std::vector sections_from_tensor; sections_from_tensor.reserve(sections_tensor_list_size); for (const auto §ion_varptr : sections_varptr_list) { Variable *var = PADDLE_GET_CONST(Variable *, section_varptr); - sections_from_tensor.emplace_back(var->Get()); + sections_from_tensor.emplace_back(var->Get()); } sections_final = std::move(phi::IntArray(sections_from_tensor)); } else if (!ctx->IsRuntime() && ctx->HasInputs("SectionsTensorList")) { diff --git a/paddle/fluid/operators/sum_op.cc b/paddle/fluid/operators/sum_op.cc index a4ec6b6cf6d50070e4767f37741d7eb34e3fed22..098167cb69d7a85b6f49bdc041ae4fb040e7f364 100644 --- a/paddle/fluid/operators/sum_op.cc +++ b/paddle/fluid/operators/sum_op.cc @@ -128,11 +128,12 @@ class SumOp : public framework::OperatorWithKernel { class SumOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddInput("X", - "A Varaible list. The shape and data type of the list elements" - "should be consistent. Variable can be multi-dimensional Tensor" - "or LoDTensor, and data types can be: float32, float64, int32, " - "int64.") + AddInput( + "X", + "A Varaible list. The shape and data type of the list elements" + "should be consistent. Variable can be multi-dimensional Tensor" + "or phi::DenseTensor, and data types can be: float32, float64, int32, " + "int64.") .AsDuplicable(); AddOutput("Out", "the sum of input :code:`x`. its shape and data types are " @@ -145,8 +146,9 @@ class SumOpMaker : public framework::OpProtoAndCheckerMaker { "(string, default \"float32\"). Data type of mkldnn kernel") .SetDefault("float32") .InEnum({"float32", "bfloat16"}); - AddComment(R"DOC(This OP is used to sum one or more Tensor or LoDTensor - of the input. If the input is LoDTensor, the output only + AddComment( + R"DOC(This OP is used to sum one or more Tensor or phi::DenseTensor + of the input. If the input is phi::DenseTensor, the output only shares LoD information with the first input.)DOC"); } }; diff --git a/paddle/fluid/operators/sum_op_mlu.cc b/paddle/fluid/operators/sum_op_mlu.cc index af1cb524631b3e6f80d5c98344cc80ec5e60f4eb..aad62e9ce2c333e3100b253ac07d2c1484a4d1e5 100644 --- a/paddle/fluid/operators/sum_op_mlu.cc +++ b/paddle/fluid/operators/sum_op_mlu.cc @@ -21,7 +21,6 @@ namespace operators { using Tensor = phi::DenseTensor; using SelectedRows = phi::SelectedRows; -using LoDTensor = phi::DenseTensor; template class SumMLUKernel : public framework::OpKernel { diff --git a/paddle/fluid/operators/sum_op_npu.cc b/paddle/fluid/operators/sum_op_npu.cc index b6c4ddb73f20b5348baabb44a38c46bc064a3026..20cc7ec18b8b78d896d612d0013f8b3b1b83e2c9 100644 --- a/paddle/fluid/operators/sum_op_npu.cc +++ b/paddle/fluid/operators/sum_op_npu.cc @@ -25,7 +25,6 @@ namespace operators { using Tensor = phi::DenseTensor; using SelectedRows = phi::SelectedRows; -using LoDTensor = phi::DenseTensor; template class SumNPUKernel : public framework::OpKernel { diff --git a/paddle/fluid/operators/svd_helper.h b/paddle/fluid/operators/svd_helper.h index a12b70bbdae3ac12d0351bc5ba81c6ef1762f8bb..d6c306ff2a9f3e9449e79e707e56f8598bedaaed 100644 --- a/paddle/fluid/operators/svd_helper.h +++ b/paddle/fluid/operators/svd_helper.h @@ -744,7 +744,7 @@ struct DeviceIndependenceTensorOperations { const framework::AttributeMap& attrs, std::vector out_shape, NameOutTensor out_str = {"Out"}) { - // varialble set dims must be LoDTensor / SelectedRowTensor + // varialble set dims must be phi::DenseTensor / SelectedRowTensor framework::Scope& local_scope = context.scope().NewScope(); framework::VariableNameMap op_outputs; for (auto out_name : out_str) { diff --git a/paddle/fluid/operators/tdm_child_op.h b/paddle/fluid/operators/tdm_child_op.h index af417b169978e2184c4ad1dba922473a5c8ca2ef..3f781ab65eeb802fc5afeb0f3c30c5944e91c500 100644 --- a/paddle/fluid/operators/tdm_child_op.h +++ b/paddle/fluid/operators/tdm_child_op.h @@ -29,16 +29,15 @@ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; using DDim = framework::DDim; using LoD = framework::LoD; template void TDMChildInner(const framework::ExecutionContext &context, - const LoDTensor &input, - const LoDTensor &tree_info, - LoDTensor *child, - LoDTensor *mask) { + const phi::DenseTensor &input, + const phi::DenseTensor &tree_info, + phi::DenseTensor *child, + phi::DenseTensor *mask) { auto child_nums = context.Attr("child_nums"); auto info_dims = tree_info.dims(); int node_nums = info_dims[0]; @@ -114,7 +113,7 @@ class TDMChildKernel : public framework::OpKernel { auto *input_var = ctx.InputVar("X"); auto *tree_info_var = ctx.InputVar("TreeInfo"); - auto &input_tensor = input_var->Get(); + auto &input_tensor = input_var->Get(); const auto &input_type = framework::TransToProtoVarType(input_tensor.dtype()); bool input_type_match = input_type == framework::proto::VarType::INT32 || @@ -130,7 +129,7 @@ class TDMChildKernel : public framework::OpKernel { paddle::framework::DataTypeToString( framework::proto::VarType::INT64))); - auto &tree_info_tensor = tree_info_var->Get(); + auto &tree_info_tensor = tree_info_var->Get(); const auto &info_type = framework::TransToProtoVarType(tree_info_tensor.dtype()); bool info_type_match = info_type == framework::proto::VarType::INT32 || diff --git a/paddle/fluid/operators/tdm_sampler_op.h b/paddle/fluid/operators/tdm_sampler_op.h index af70476a4e536c97fff4071c30cd15a19f385b25..d98680c574154a451ffb9cccdc78d06eaacf6be7 100644 --- a/paddle/fluid/operators/tdm_sampler_op.h +++ b/paddle/fluid/operators/tdm_sampler_op.h @@ -33,17 +33,16 @@ using Tensor = phi::DenseTensor; using Sampler = math::Sampler; using DDim = framework::DDim; using LoD = framework::LoD; -using LoDTensor = phi::DenseTensor; using LoDAndOffset = std::pair>; template void TDMSamplerInner(const framework::ExecutionContext &context, - const LoDTensor &input_tensor, - const LoDTensor &travel_lod_tensor, - const LoDTensor &layer_lod_tensor, - LoDTensor *out_tensor, - LoDTensor *label_tensor, - LoDTensor *mask_tensor) { + const phi::DenseTensor &input_tensor, + const phi::DenseTensor &travel_lod_tensor, + const phi::DenseTensor &layer_lod_tensor, + phi::DenseTensor *out_tensor, + phi::DenseTensor *label_tensor, + phi::DenseTensor *mask_tensor) { auto neg_samples_num_vec = context.Attr>("neg_samples_num_list"); auto layer_offset_lod = context.Attr>("layer_offset_lod"); diff --git a/paddle/fluid/operators/transfer_layout_op.cc b/paddle/fluid/operators/transfer_layout_op.cc index 84f1948cd64a1e8ebc830743995d0ea44a9aa896..5bba1c225a58822d76add9166a494b64d8f11670 100644 --- a/paddle/fluid/operators/transfer_layout_op.cc +++ b/paddle/fluid/operators/transfer_layout_op.cc @@ -94,8 +94,9 @@ class TransferLayoutKernel { class TransferLayoutOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddInput("X", "(LoDTensor) The input Tensor"); - AddOutput("Out", "(LoDTensor) The Output Tensor with desired layout"); + AddInput("X", "(phi::DenseTensor) The input Tensor"); + AddOutput("Out", + "(phi::DenseTensor) The Output Tensor with desired layout"); // NOTE(zhiqiu): in most case, the src_layout is not needed, the op can use // the layout // of input X. However, in some mkldnn kernel, the src layout computed by diff --git a/paddle/fluid/operators/var_conv_2d_op.cc b/paddle/fluid/operators/var_conv_2d_op.cc index 2a795a21d34773690973aeee0c1758b11467e369..35118ae64876c63e4d25d7d1124335463149961a 100644 --- a/paddle/fluid/operators/var_conv_2d_op.cc +++ b/paddle/fluid/operators/var_conv_2d_op.cc @@ -25,16 +25,17 @@ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; using LoD = framework::LoD; void VarConv2dOpMaker::Make() { AddInput("X", - "X (LoDTensor, default LoDTensor) Input variable which " + "X (phi::DenseTensor, default phi::DenseTensor) Input " + "variable which " "should contain lod information."); - AddInput("ROW", "(LoDTensor) the row variable provides lod information"); + AddInput("ROW", + "(phi::DenseTensor) the row variable provides lod information"); AddInput("COLUMN", - "(LoDTensor) the column variable provides lod information"); + "(phi::DenseTensor) the column variable provides lod information"); AddInput("W", "W (Tensor), the filter."); AddAttr("InputChannel", "the input filter num").SetDefault(1); AddAttr("OutputChannel", "the output filter num").SetDefault(1); @@ -43,9 +44,12 @@ void VarConv2dOpMaker::Make() { AddAttr("KernelH", "the height of Kernel").SetDefault(1); AddAttr("KernelW", "the width of Kernel").SetDefault(1); - AddOutput("Out", "(LoDTensor, default LoDTensor) Output variable"); + AddOutput( + "Out", + "(phi::DenseTensor, default phi::DenseTensor) Output variable"); AddOutput("Col", - "(LoDTensor, default LoDTensor) the intermediate result " + "(phi::DenseTensor, default phi::DenseTensor) the " + "intermediate result " "variable"); AddComment(R"DOC( @@ -125,7 +129,7 @@ void VarConv2dOP::InferShape(framework::InferShapeContext* ctx) const { if (ctx->IsRuntime()) { framework::Variable* x_var = PADDLE_GET(framework::Variable*, ctx->GetInputVarPtrs("X")[0]); - const auto& x_lod = x_var->Get().lod(); + const auto& x_lod = x_var->Get().lod(); PADDLE_ENFORCE_EQ( !x_lod.empty(), true, @@ -146,7 +150,7 @@ void VarConv2dOP::InferShape(framework::InferShapeContext* ctx) const { framework::Variable* row_var = PADDLE_GET(framework::Variable*, ctx->GetInputVarPtrs("ROW")[0]); - const auto& row_lod = row_var->Get().lod(); + const auto& row_lod = row_var->Get().lod(); PADDLE_ENFORCE_EQ(!row_lod.empty(), true, platform::errors::InvalidArgument( @@ -155,7 +159,7 @@ void VarConv2dOP::InferShape(framework::InferShapeContext* ctx) const { framework::Variable* col_var = PADDLE_GET(framework::Variable*, ctx->GetInputVarPtrs("COLUMN")[0]); - const auto& col_lod = col_var->Get().lod(); + const auto& col_lod = col_var->Get().lod(); PADDLE_ENFORCE_EQ(!col_lod.empty(), true, platform::errors::InvalidArgument( @@ -175,11 +179,11 @@ template class CPUVarConv2dOPKernel : public framework::OpKernel { public: void Im2Col(const framework::ExecutionContext& ctx, - const LoDTensor& input, - LoDTensor* col) const { + const phi::DenseTensor& input, + phi::DenseTensor* col) const { int input_channel = ctx.Attr("InputChannel"); - auto* in_row = ctx.Input("ROW"); - auto* in_col = ctx.Input("COLUMN"); + auto* in_row = ctx.Input("ROW"); + auto* in_col = ctx.Input("COLUMN"); int kernel_h = ctx.Attr("KernelH"); int kernel_w = ctx.Attr("KernelW"); int stride_h = ctx.Attr("StrideH"); @@ -267,12 +271,12 @@ class CPUVarConv2dOPKernel : public framework::OpKernel { } void Compute(const framework::ExecutionContext& ctx) const override { - auto* bottom = ctx.Input("X"); - auto* in_row = ctx.Input("ROW"); - auto* in_col = ctx.Input("COLUMN"); + auto* bottom = ctx.Input("X"); + auto* in_row = ctx.Input("ROW"); + auto* in_col = ctx.Input("COLUMN"); auto* w = ctx.Input("W"); - auto* top = ctx.Output("Out"); - auto* col = ctx.Output("Col"); + auto* top = ctx.Output("Out"); + auto* col = ctx.Output("Col"); int output_channel = ctx.Attr("OutputChannel"); int input_channel = ctx.Attr("InputChannel"); @@ -390,10 +394,10 @@ template class CPUVarConv2dOPGradKernel : public framework::OpKernel { public: void Im2ColGrad(const framework::ExecutionContext& ctx, T* top_diff) const { - auto* x = ctx.Input("X"); - auto* in_row = ctx.Input("ROW"); - auto* in_col = ctx.Input("COLUMN"); - auto* col = ctx.Input("Col"); + auto* x = ctx.Input("X"); + auto* in_row = ctx.Input("ROW"); + auto* in_col = ctx.Input("COLUMN"); + auto* col = ctx.Input("Col"); int input_channel = ctx.Attr("InputChannel"); int kernel_h = ctx.Attr("KernelH"); @@ -401,7 +405,7 @@ class CPUVarConv2dOPGradKernel : public framework::OpKernel { int stride_h = ctx.Attr("StrideH"); int stride_w = ctx.Attr("StrideW"); - auto* dx = ctx.Output(framework::GradVarName("X")); + auto* dx = ctx.Output(framework::GradVarName("X")); auto* dx_data = dx->mutable_data(ctx.GetPlace()); memset(dx_data, 0.0, x->dims()[0] * x->dims()[1] * sizeof(T)); @@ -450,18 +454,18 @@ class CPUVarConv2dOPGradKernel : public framework::OpKernel { } void Compute(const framework::ExecutionContext& ctx) const override { - auto* x = ctx.Input("X"); + auto* x = ctx.Input("X"); auto* w = ctx.Input("W"); - auto* col = ctx.Input("Col"); - auto* out = ctx.Input("Out"); + auto* col = ctx.Input("Col"); + auto* out = ctx.Input("Out"); int output_channel = ctx.Attr("OutputChannel"); int input_channel = ctx.Attr("InputChannel"); int kernel_h = ctx.Attr("KernelH"); int kernel_w = ctx.Attr("KernelW"); - auto* d_out = ctx.Input(framework::GradVarName("Out")); - auto* dx = ctx.Output(framework::GradVarName("X")); + auto* d_out = ctx.Input(framework::GradVarName("Out")); + auto* dx = ctx.Output(framework::GradVarName("X")); auto* d_w = ctx.Output(framework::GradVarName("W")); Tensor col_grad; diff --git a/paddle/fluid/operators/var_conv_2d_op.h b/paddle/fluid/operators/var_conv_2d_op.h index 84c766767f102686f316c0307c484960660346d8..1a5fa9de2c7ced89443b0f7d6c914a6902caee0d 100644 --- a/paddle/fluid/operators/var_conv_2d_op.h +++ b/paddle/fluid/operators/var_conv_2d_op.h @@ -20,7 +20,6 @@ namespace paddle { namespace operators { using Tensor = phi::DenseTensor; -using LoDTensor = phi::DenseTensor; using LoD = framework::LoD; class VarConv2dOP : public framework::OperatorWithKernel { diff --git a/paddle/fluid/operators/warpctc_op.cc b/paddle/fluid/operators/warpctc_op.cc index 99059ffaa2897649a0793d0c35a75c98949705ac..3f09b20068975e6d4a2600240645774875f85709 100644 --- a/paddle/fluid/operators/warpctc_op.cc +++ b/paddle/fluid/operators/warpctc_op.cc @@ -44,8 +44,8 @@ class WarpCTCOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { AddInput("Logits", - "(2-D LoDTensor) or (3-D Tensor), the " - "unscaled probabilities of variable-length sequences." + "(2-D phi::DenseTensor) or (3-D phi::DenseTensor), " + "the unscaled probabilities of variable-length sequences." "When is a 2-D Tensor with LoD information, " "it's shape is [Lp, num_classes + 1], " "where Lp is the sum of all input sequences' length " @@ -56,7 +56,7 @@ class WarpCTCOpMaker : public framework::OpProtoAndCheckerMaker { "where max_logit_length is the length of the longest " "logit sequence."); AddInput("Label", - "(2-D LoDTensor) or (2-D Tensor), the " + "(2-D phi::DenseTensor), the " "ground truth of variable-length sequence. " "When it is a 2-D Tensor with LoD information, " "it is of the shape [Lg, 1], where Lg is th sum of "