diff --git a/paddle/fluid/eager/auto_code_generator/eager_generator.cc b/paddle/fluid/eager/auto_code_generator/eager_generator.cc index ab3b33d411c0e09f37885491e93144a2577d5c40..5dc8709679e25a48f2aa047b0404092ac8c1dc66 100644 --- a/paddle/fluid/eager/auto_code_generator/eager_generator.cc +++ b/paddle/fluid/eager/auto_code_generator/eager_generator.cc @@ -1227,11 +1227,11 @@ static std::pair GenerateForwardFunctionContents( // Forward Function Body // According to fwd_inputs_name_pos_map - std::map>> + std::map>> ins = { {"X" , TrySyncToVars(X)}, { "Y" , TrySyncToVars(Y)} }; - std::map>> + std::map>> outs = { {"Out0" , CreateVars(Out0Num)}, {"Out1" @@ -1316,7 +1316,7 @@ static std::pair GenerateForwardFunctionContents( const char* FWD_INS_MAP_TEMPLATE = " std::map>> ins = { " + "std::vector>> ins = { " "%s };\n"; std::string ins_map_str = paddle::string::Sprintf(FWD_INS_MAP_TEMPLATE, ins_contents_str); @@ -1353,8 +1353,9 @@ static std::pair GenerateForwardFunctionContents( if (op_passing_outs_map[op_type].count(output_name)) { const std::string output_var_name = output_name + "Var"; - // Pass Output from function argument(EagerTensor*/vector&), - // in form of shared_ptr/vector> + // Pass Output from function + // argument(EagerVariable*/vector&), + // in form of shared_ptr/vector> if (output.duplicable()) { const char* FWD_NUM_ARG_TEMPLATE = ", std::vector& %s"; @@ -1395,7 +1396,7 @@ static std::pair GenerateForwardFunctionContents( } else { const char* FWD_OUTS_CONTENT_TEMPLATE = "{ \"%s\", " - "{std::make_shared(egr::Controller::Instance()." + "{std::make_shared(egr::Controller::Instance()." "GenerateUniqueName())}},"; outs_contents_str += paddle::string::Sprintf(FWD_OUTS_CONTENT_TEMPLATE, output_name); @@ -1407,7 +1408,7 @@ static std::pair GenerateForwardFunctionContents( const char* FWD_OUTS_MAP_TEMPLATE = " std::map>> outs = { " + "std::vector>> outs = { " "%s };\n"; std::string outs_map_str = paddle::string::Sprintf(FWD_OUTS_MAP_TEMPLATE, outs_contents_str); @@ -1482,7 +1483,7 @@ static std::pair GenerateForwardFunctionContents( generated_function_body += out_tensor_str; } generated_function_body += "\n"; - VLOG(6) << "Converted Output VarBase to EagerTensor(s)"; + VLOG(6) << "Converted Output VarBase to EagerVariable(s)"; // [Generation] Handle core_ops_returns_info core_ops_returns_info[op_type] = return_contents; @@ -1627,7 +1628,7 @@ static std::string GenerateSingleOpBase( const char* BWD_INS_MAP_TEMPLATE = " std::map>> %s = { " + "std::vector>> %s = { " "%s };\n"; std::string ins_map_str = paddle::string::Sprintf(BWD_INS_MAP_TEMPLATE, ins_name, ins_contents_str); @@ -1704,7 +1705,7 @@ static std::string GenerateSingleOpBase( } else { const char* GRAD_OUTS_CONTENT_TEMPLATE = "{ \"%s\", " - "{std::make_shared(egr::Controller::Instance(" + "{std::make_shared(egr::Controller::Instance(" ")." "GenerateUniqueName())}},"; outs_contents_str += paddle::string::Sprintf( @@ -1723,7 +1724,7 @@ static std::string GenerateSingleOpBase( const char* BWD_OUTS_MAP_TEMPLATE = " std::map>> %s = { " + "std::vector>> %s = { " "%s };\n"; std::string outs_map_str = paddle::string::Sprintf( BWD_OUTS_MAP_TEMPLATE, outs_name, outs_contents_str); diff --git a/paddle/fluid/eager/eager_tensor.h b/paddle/fluid/eager/eager_tensor.h index 2326ab012e3caef34b6b70950dcc1088111ab9e5..19ce457df60cba5e1a1a044f0c7f43a7cbda06d9 100644 --- a/paddle/fluid/eager/eager_tensor.h +++ b/paddle/fluid/eager/eager_tensor.h @@ -40,36 +40,28 @@ * **/ namespace egr { -class EagerTensor final { +class EagerVariable final { public: /* Default constructor and name constructor should only be used for contruct * output and in fluid*/ - EagerTensor() = default; + EagerVariable() = default; - explicit EagerTensor(const std::string& name) : name_(name) {} + explicit EagerVariable(const std::string& name) : name_(name) {} - explicit EagerTensor(const paddle::experimental::Tensor& tensor) + explicit EagerVariable(const paddle::experimental::Tensor& tensor) : name_(tensor.name()) { if (tensor.defined()) { if (tensor.is_dense_tensor()) { - auto* framework_tensor = - var_.GetMutable(); - // Contruct framework::Tensor from egr::EagerTensor - auto tensor_dense = - std::dynamic_pointer_cast(tensor.impl()); - PADDLE_ENFORCE_EQ((tensor_dense.get() && tensor_dense), true, - paddle::platform::errors::Fatal( - "Failed to Trans Tensor to EagerVariable since " - "we got Tensor with type DenseTensor, and we got " - "EagerVariable with another type.")); - *framework_tensor = *tensor_dense; + ConstructVariableFromTensor(tensor); + } else if (tensor.is_selected_rows()) { + ConstructVariableFromSelectedRows(tensor); } else { PADDLE_THROW(paddle::platform::errors::Fatal( "Unrecognized egr::EagerVariable type, only " - "DenseTensor and SelectedRows is supported for now.")); + "DenseTensor and SelectedRows are supported for now.")); } } else { - VLOG(6) << "Build Empty EagerTensor with name " << name_; + VLOG(6) << "Build Empty EagerVariable with name " << name_; } } @@ -77,21 +69,20 @@ class EagerTensor final { std::shared_ptr GetTensorBase() { // Construct allocation only once. if (var_.IsInitialized()) { - if (var_.IsType()) { - return SetImplWithLegacyTensor(); - } else if (var_.IsType()) { - return SetImplWithLegacyTensor(); + if (var_.IsType() || + var_.IsType()) { + return SetImplWithLegacyTensor(); } else if (var_.IsType()) { - return SetImplWithSelectedRows(); + return SetImplWithLegacySelectedRows(); } else { PADDLE_THROW(paddle::platform::errors::Fatal( "Unable to fetch underlying tensor " - "from EagerTensor, only LoDTensor and " + "from EagerVariable, only LoDTensor and " "Tensor are supported for now")); } } else { PADDLE_THROW(paddle::platform::errors::Fatal( - "Can not Sync EagerTensor %s whose paddle::framework::Variable is " + "Can not Sync EagerVariable %s whose paddle::framework::Variable is " "not initialized!", name())); } @@ -107,23 +98,52 @@ class EagerTensor final { void set_name(const std::string& name) { name_ = name; } private: - template std::shared_ptr SetImplWithLegacyTensor() { - const auto& framework_tensor = var_.Get(); + const auto& framework_tensor = var_.Get(); VLOG(8) << "Sync Var to tensor for: " << name(); - return std::make_shared(std::move(framework_tensor)); + return std::make_shared(framework_tensor); } - std::shared_ptr SetImplWithSelectedRows() { - auto* selected_rows = var_.GetMutable(); - auto res = std::make_shared(selected_rows->rows_, - selected_rows->height_); - res->value_.reset(selected_rows->value_.release()); - res->id_to_index_ = std::move(selected_rows->id_to_index_); - res->rwlock_.reset(selected_rows->rwlock_.release()); + std::shared_ptr SetImplWithLegacySelectedRows() { + auto* framework_tensor = var_.GetMutable(); + VLOG(8) << "Sync SelectedRows to tensor for: " << name(); + auto res = + std::make_shared(std::move(*framework_tensor)); + var_.Clear(); return res; } + void ConstructVariableFromTensor(const paddle::experimental::Tensor& tensor) { + auto* framework_tensor = var_.GetMutable(); + // Contruct framework::Tensor from egr::EagerVariable + auto tensor_dense = + std::dynamic_pointer_cast(tensor.impl()); + PADDLE_ENFORCE_EQ( + (tensor_dense.get() && tensor_dense), true, + paddle::platform::errors::Fatal( + "Tensor %s does not hold pten::SelectedRows or pten::DenseTensor. " + "Or it holds empty impl, this should not happend since we should " + "treat all kinds of tensor as what they are.", + tensor.name())); + *framework_tensor = *tensor_dense; + } + + void ConstructVariableFromSelectedRows( + const paddle::experimental::Tensor& tensor) { + auto* framework_tensor = var_.GetMutable(); + // Contruct framework::Tensor from egr::EagerVariable + auto tensor_dense = + std::dynamic_pointer_cast(tensor.impl()); + PADDLE_ENFORCE_EQ( + (tensor_dense.get() && tensor_dense), true, + paddle::platform::errors::Fatal( + "Tensor %s does not hold pten::SelectedRows or pten::DenseTensor. " + "Or it holds empty impl, this should not happend since we should " + "treat all kinds of tensor as what they are.", + tensor.name())); + *framework_tensor = std::move(*tensor_dense); + } + private: std::string name_{""}; paddle::framework::Variable var_; diff --git a/paddle/fluid/eager/tests/data_structure_tests/eager_tensor_test.cc b/paddle/fluid/eager/tests/data_structure_tests/eager_tensor_test.cc index c27d1871e398164ad976c73919499ceed3938057..e3bb53106776604d1c2fee0a53fc6d87a9d83755 100644 --- a/paddle/fluid/eager/tests/data_structure_tests/eager_tensor_test.cc +++ b/paddle/fluid/eager/tests/data_structure_tests/eager_tensor_test.cc @@ -115,7 +115,7 @@ TEST(Tensor, MemberFunction) { CHECK_EQ(tmp_autograd_meta_test->val_, 2); } -TEST(EagerTensor, Constructor) { +TEST(EagerVariable, Constructor) { paddle::experimental::Tensor t3; pten::DenseTensorMeta meta = pten::DenseTensorMeta( pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 2})); @@ -134,7 +134,7 @@ TEST(EagerTensor, Constructor) { CHECK_EQ(t3.defined(), false); t3.set_impl(dt); - egr::EagerTensor et3 = egr::EagerTensor(t3); + egr::EagerVariable et3 = egr::EagerVariable(t3); VLOG(6) << "SyncToVar"; CHECK_EQ(et3.Var().Get().data()[0], 5.0f); diff --git a/paddle/fluid/eager/tests/task_tests/eager_utils_test.cc b/paddle/fluid/eager/tests/task_tests/eager_utils_test.cc index c11bd94ee9369f983684be38fbb811d87968791a..dcf06bffebef314840ccb72c30b22bb65e5a80b2 100644 --- a/paddle/fluid/eager/tests/task_tests/eager_utils_test.cc +++ b/paddle/fluid/eager/tests/task_tests/eager_utils_test.cc @@ -167,7 +167,7 @@ TEST(EagerUtils, PassStopGradient) { TEST(EagerUtils, TrySyncToVar) { paddle::framework::DDim ddim = paddle::framework::make_ddim({2, 4, 4, 4}); auto tensor = CreateTestCPUTensor(5.0f, ddim); - std::vector> var_bases = { + std::vector> var_bases = { egr::EagerUtils::TrySyncToVar(tensor)}; paddle::framework::Variable* var = var_bases[0]->MutableVar(); @@ -187,7 +187,7 @@ TEST(EagerUtils, TrySyncToVars) { std::vector tensors = { CreateTestCPUTensor(1.0f, ddim), CreateTestCPUTensor(2.0f, ddim)}; - std::vector> var_bases = + std::vector> var_bases = egr::EagerUtils::TrySyncToVars(tensors); { @@ -218,7 +218,7 @@ TEST(EagerUtils, TrySyncToVars) { TEST(EagerUtils, CreateVars) { VLOG(6) << "Check CreateVars"; - std::vector> outs = + std::vector> outs = egr::EagerUtils::CreateVars(2); CHECK_EQ(outs.size(), size_t(2)); CHECK(outs[0]->Var().IsInitialized() == false); diff --git a/paddle/fluid/eager/utils.cc b/paddle/fluid/eager/utils.cc index 7be70ff957565b2246e0e0fd8636816633f7e5c8..ec2ac2ee2a6e4c8416aa19fb104ba7b75560ac8e 100644 --- a/paddle/fluid/eager/utils.cc +++ b/paddle/fluid/eager/utils.cc @@ -131,17 +131,17 @@ void EagerUtils::SetOutRankWithSlot(AutogradMeta* target, size_t slot_id) { target->SetSingleOutRankWithSlot(slot_id, 0); } -std::shared_ptr EagerUtils::TrySyncToVar( +std::shared_ptr EagerUtils::TrySyncToVar( const paddle::experimental::Tensor& tensor) { - return std::make_shared(tensor); + return std::make_shared(tensor); } -std::vector> EagerUtils::TrySyncToVars( +std::vector> EagerUtils::TrySyncToVars( const paddle::experimental::Tensor& tensor) { return {TrySyncToVar(tensor)}; } -std::vector> EagerUtils::TrySyncToVars( +std::vector> EagerUtils::TrySyncToVars( paddle::experimental::Tensor* tensor) { PADDLE_ENFORCE_NOT_NULL( tensor, @@ -151,9 +151,9 @@ std::vector> EagerUtils::TrySyncToVars( return {TrySyncToVar(*tensor)}; } -std::vector> EagerUtils::TrySyncToVars( +std::vector> EagerUtils::TrySyncToVars( const std::vector& tensors) { - std::vector> res; + std::vector> res; size_t num = tensors.size(); res.reserve(num); for (size_t i = 0; i < num; i++) { @@ -169,9 +169,9 @@ std::vector> EagerUtils::TrySyncToVars( return res; } -std::vector> EagerUtils::TrySyncToVars( +std::vector> EagerUtils::TrySyncToVars( const std::vector& tensors) { - std::vector> res; + std::vector> res; size_t num = tensors.size(); res.reserve(num); for (size_t i = 0; i < num; i++) { @@ -180,19 +180,19 @@ std::vector> EagerUtils::TrySyncToVars( return res; } -std::vector> EagerUtils::CreateVars( +std::vector> EagerUtils::CreateVars( const size_t num) { - std::vector> res; + std::vector> res; res.reserve(num); for (size_t i = 0; i < num; i++) { res.emplace_back( - new EagerTensor(egr::Controller::Instance().GenerateUniqueName())); + new EagerVariable(egr::Controller::Instance().GenerateUniqueName())); } return res; } std::vector EagerUtils::GetOutputs( - const std::vector>& outs) { + const std::vector>& outs) { std::vector res; res.reserve(outs.size()); for (const auto& out : outs) { @@ -209,7 +209,7 @@ std::vector EagerUtils::GetOutputs( } paddle::experimental::Tensor EagerUtils::GetOutput( - const std::shared_ptr& out) { + const std::shared_ptr& out) { PADDLE_ENFORCE_NOT_NULL( out.get(), paddle::platform::errors::Fatal( "Eager Tensor %s is null and cannot be copied. We " @@ -219,7 +219,7 @@ paddle::experimental::Tensor EagerUtils::GetOutput( return paddle::experimental::Tensor(out->GetTensorBase(), out->name()); } -void EagerUtils::OverwriteOutputs(const std::shared_ptr& out, +void EagerUtils::OverwriteOutputs(const std::shared_ptr& out, paddle::experimental::Tensor* tensor) { PADDLE_ENFORCE_NOT_NULL( tensor, paddle::platform::errors::Fatal( @@ -231,7 +231,7 @@ void EagerUtils::OverwriteOutputs(const std::shared_ptr& out, } void EagerUtils::OverwriteOutputs( - const std::vector>& outs, + const std::vector>& outs, const std::vector& tensors) { PADDLE_ENFORCE_EQ( outs.size(), tensors.size(), diff --git a/paddle/fluid/eager/utils.h b/paddle/fluid/eager/utils.h index b0549488efc8f2e85d5550251bfffc9dac3a1af7..b6540b7e0178e03e3f4d63432e624b0619f9f04c 100644 --- a/paddle/fluid/eager/utils.h +++ b/paddle/fluid/eager/utils.h @@ -88,7 +88,7 @@ class EagerUtils { /** * We have to use autograd_meta and multi_autograd_meta to initialize * autograd_meta for tensor, since we can't init it in - * egr::EagerTensor's + * egr::EagerVariable's * constructor (it's abstract class there) * * **/ @@ -151,34 +151,35 @@ class EagerUtils { // Intermidate needed remove this once we don't need legacy // Inner Method - static std::shared_ptr TrySyncToVar( + static std::shared_ptr TrySyncToVar( const paddle::experimental::Tensor& tensor); // Basic Input - static std::vector> TrySyncToVars( + static std::vector> TrySyncToVars( const paddle::experimental::Tensor& tensor); // Basic Output - static std::vector> TrySyncToVars( + static std::vector> TrySyncToVars( paddle::experimental::Tensor* tensor); // Multi Output - static std::vector> TrySyncToVars( + static std::vector> TrySyncToVars( const std::vector& tensors); // Multi Input - static std::vector> TrySyncToVars( + static std::vector> TrySyncToVars( const std::vector& tensors); // Construct empty output - static std::vector> CreateVars(const size_t num); + static std::vector> CreateVars( + const size_t num); // Construct Tensor From var static std::vector GetOutputs( - const std::vector>& outs); + const std::vector>& outs); static paddle::experimental::Tensor GetOutput( - const std::shared_ptr& out); + const std::shared_ptr& out); // Sync Back to origin output Tensor - static void OverwriteOutputs(const std::shared_ptr& out, + static void OverwriteOutputs(const std::shared_ptr& out, paddle::experimental::Tensor* tensor); static void OverwriteOutputs(const paddle::experimental::Tensor& out, paddle::experimental::Tensor* tensor); static void OverwriteOutputs( - const std::vector>& outs, + const std::vector>& outs, const std::vector& tensors); static void OverwriteOutputs( const std::vector& outs, diff --git a/paddle/fluid/imperative/amp_auto_cast.cc b/paddle/fluid/imperative/amp_auto_cast.cc index 0913d54c8359aa48a1fd5213b87ddf632dc595d9..547fa02326bec36858717c8f66a268551423dbaa 100644 --- a/paddle/fluid/imperative/amp_auto_cast.cc +++ b/paddle/fluid/imperative/amp_auto_cast.cc @@ -340,8 +340,8 @@ NameVarMap AutoCastInputs(const std::string& op_type, } template NameVarMap AutoCastInputs( const std::string& op_type, const NameVarMap& ins); -template NameVarMap AutoCastInputs( - const std::string& op_type, const NameVarMap& ins); +template NameVarMap AutoCastInputs( + const std::string& op_type, const NameVarMap& ins); template NameVarMap CastPureFp16Inputs(const std::string& op_type, const NameVarMap& ins) { @@ -384,7 +384,7 @@ NameVarMap CastPureFp16Inputs(const std::string& op_type, } template NameVarMap CastPureFp16Inputs( const std::string& op_type, const NameVarMap& ins); -template NameVarMap CastPureFp16Inputs( - const std::string& op_type, const NameVarMap& ins); +template NameVarMap CastPureFp16Inputs( + const std::string& op_type, const NameVarMap& ins); } // namespace imperative } // namespace paddle diff --git a/paddle/fluid/imperative/layer.cc b/paddle/fluid/imperative/layer.cc index b8c423f77bd235693f8bbf90a00630a8c855e00f..ed455b7fd0314e6d1e5cd38107568d5f8e89f84d 100644 --- a/paddle/fluid/imperative/layer.cc +++ b/paddle/fluid/imperative/layer.cc @@ -177,9 +177,9 @@ std::string LayerDebugString(const std::string& op_type, } std::string LayerDebugString(const std::string& op_type, - const NameVarMap& ins, - const NameVarMap& outs) { - return LayerDebugStringImpl(op_type, ins, outs); + const NameVarMap& ins, + const NameVarMap& outs) { + return LayerDebugStringImpl(op_type, ins, outs); } template @@ -194,11 +194,16 @@ static void SetForwardDataTypeOfGradVars(const NameVarMap& outs) { } } template <> -void SetForwardDataTypeOfGradVars( - const NameVarMap& outs) { +void SetForwardDataTypeOfGradVars( + const NameVarMap& outs) { // In eager mode we don't need this. } +void TestSetForwardDataTypeOfGradVarsEager( + const NameVarMap& outs) { + SetForwardDataTypeOfGradVars(outs); +} + VarBase::VarBase(const std::shared_ptr& var) : var_(var), grad_node_(var->GetGradNode()) { if (auto grad_var = var_->GetGradVar()) { @@ -528,12 +533,12 @@ void OpBase::Run(const framework::OperatorBase& op, } void OpBase::Run(const framework::OperatorBase& op, - const NameVarMap& ins, - const NameVarMap& outs, + const NameVarMap& ins, + const NameVarMap& outs, const framework::AttributeMap& attrs, const framework::AttributeMap& default_attrs, const platform::Place& place) { - OpBaseRunImpl(op, ins, outs, attrs, default_attrs, place); + OpBaseRunImpl(op, ins, outs, attrs, default_attrs, place); } void ClearNoNeedBufferInputs(OpBase* op) { diff --git a/paddle/fluid/imperative/op_base.h b/paddle/fluid/imperative/op_base.h index 58c77d0f4b6b7b7328b5d877f5a97410728ce39e..21167605d46029d2eb9d1ea3241f8d868a6a8344 100644 --- a/paddle/fluid/imperative/op_base.h +++ b/paddle/fluid/imperative/op_base.h @@ -185,8 +185,8 @@ class OpBase { const framework::AttributeMap& default_attrs, const platform::Place& place); static void Run(const framework::OperatorBase& op, - const NameVarMap& ins, - const NameVarMap& outs, + const NameVarMap& ins, + const NameVarMap& outs, const framework::AttributeMap& attrs, const framework::AttributeMap& default_attrs, const platform::Place& place); diff --git a/paddle/fluid/imperative/prepared_operator.cc b/paddle/fluid/imperative/prepared_operator.cc index ae7d0807530618864ff951e388a5d4deaa1765a5..c56f82d0bc08429afa288bf24cd59d264af3e2ce 100644 --- a/paddle/fluid/imperative/prepared_operator.cc +++ b/paddle/fluid/imperative/prepared_operator.cc @@ -89,11 +89,16 @@ void HandleComplexGradToRealGrad(const NameVarMap& outs) { } template <> -void HandleComplexGradToRealGrad( - const NameVarMap& outs) { +void HandleComplexGradToRealGrad( + const NameVarMap& outs) { // TODO(jiabin): Support Complex here. } +void TestHandleComplexGradToRealGradEager( + const NameVarMap& outs) { + HandleComplexGradToRealGrad(outs); +} + PreparedOp::PreparedOp(const framework::OperatorBase& op, const framework::RuntimeContext& ctx, const framework::OpKernelType& kernel_type, @@ -322,14 +327,14 @@ PreparedOp PreparedOp::Prepare(const NameVarMap& ins, default_attrs); } -PreparedOp PreparedOp::Prepare(const NameVarMap& ins, - const NameVarMap& outs, +PreparedOp PreparedOp::Prepare(const NameVarMap& ins, + const NameVarMap& outs, const framework::OperatorWithKernel& op, const platform::Place& place, const framework::AttributeMap& attrs, const framework::AttributeMap& default_attrs) { - return PrepareImpl(ins, outs, op, place, attrs, - default_attrs); + return PrepareImpl(ins, outs, op, place, attrs, + default_attrs); } template static void PreparedOpRunImpl( @@ -461,18 +466,18 @@ void PreparedOp::Run(const NameVarMap& ins, } } -void PreparedOp::Run(const NameVarMap& ins, - const NameVarMap& outs, +void PreparedOp::Run(const NameVarMap& ins, + const NameVarMap& outs, const framework::AttributeMap& attrs, const framework::AttributeMap& default_attrs) { if (run_pten_kernel_) { - PreparedOpRunPtImpl( + PreparedOpRunPtImpl( op_, kernel_type_, pt_kernel_signature_, pt_kernel_, dev_ctx_, ins, outs, attrs, default_attrs); } else { - PreparedOpRunImpl(op_, ctx_, kernel_type_, func_, - dev_ctx_, ins, outs, attrs, - default_attrs); + PreparedOpRunImpl(op_, ctx_, kernel_type_, func_, + dev_ctx_, ins, outs, attrs, + default_attrs); } } diff --git a/paddle/fluid/imperative/prepared_operator.h b/paddle/fluid/imperative/prepared_operator.h index 9a4b197685ae152ab401fc56693a7a8363e2b75c..a6b80e0d4e1927a8012ff90d54ef71857d504fc6 100644 --- a/paddle/fluid/imperative/prepared_operator.h +++ b/paddle/fluid/imperative/prepared_operator.h @@ -63,8 +63,8 @@ void SetForwardDataTypeOfGradVar(const std::shared_ptr& var) { } template <> -void SetForwardDataTypeOfGradVar( - const std::shared_ptr& var) { +void SetForwardDataTypeOfGradVar( + const std::shared_ptr& var) { VLOG(10) << "Var in Eager dose not support SetForwardDataTypeOfGradVar: " << var->name(); // TODO(jiabin): SetForwardDataType of Grad var is not supported yet in @@ -171,8 +171,8 @@ class PreparedOp { const framework::AttributeMap& attrs, const framework::AttributeMap& default_attrs); - static PreparedOp Prepare(const NameVarMap& ins, - const NameVarMap& outs, + static PreparedOp Prepare(const NameVarMap& ins, + const NameVarMap& outs, const framework::OperatorWithKernel& op, const platform::Place& place, const framework::AttributeMap& attrs, @@ -187,8 +187,8 @@ class PreparedOp { const framework::AttributeMap& attrs, const framework::AttributeMap& default_attrs); - void Run(const NameVarMap& ins, - const NameVarMap& outs, + void Run(const NameVarMap& ins, + const NameVarMap& outs, const framework::AttributeMap& attrs, const framework::AttributeMap& default_attrs); diff --git a/paddle/fluid/imperative/tests/test_eager.cc b/paddle/fluid/imperative/tests/test_eager.cc index d34cb924d566322a4d37555a64281688ae8a116d..57a2149b23c1bef678bc262d1bb009ed6cfeb572 100644 --- a/paddle/fluid/imperative/tests/test_eager.cc +++ b/paddle/fluid/imperative/tests/test_eager.cc @@ -31,8 +31,8 @@ namespace paddle { namespace imperative { extern std::string LayerDebugString(const std::string& op_type, - const NameVarMap& ins, - const NameVarMap& outs); + const NameVarMap& ins, + const NameVarMap& outs); extern std::shared_ptr CreateGradOpNode( const framework::OperatorBase& op, const NameTensorMap& ins, @@ -41,20 +41,21 @@ extern std::shared_ptr CreateGradOpNode( const std::map& inplace_map); TEST(test_eager, eager_debug) { - std::shared_ptr x_in(new egr::EagerTensor("x_in")); - std::shared_ptr y_in(new egr::EagerTensor("y_in")); - std::shared_ptr vout(new egr::EagerTensor("vout")); - imperative::NameVarMap ins = {{"X", {x_in}}, {"Y", {y_in}}}; - imperative::NameVarMap outs = {{"Out", {vout}}}; + std::shared_ptr x_in(new egr::EagerVariable("x_in")); + std::shared_ptr y_in(new egr::EagerVariable("y_in")); + std::shared_ptr vout(new egr::EagerVariable("vout")); + imperative::NameVarMap ins = {{"X", {x_in}}, + {"Y", {y_in}}}; + imperative::NameVarMap outs = {{"Out", {vout}}}; LayerDebugString("mul", ins, outs); } TEST(test_create_node, eager_node) { auto op = framework::OpRegistry::CreateOp("mul", {}, {}, {}, false); framework::Scope scope; auto ctx = framework::RuntimeContext({}, {}); - imperative::NameVarMap ins = {{"X", {nullptr}}, - {"Y", {nullptr}}}; - imperative::NameVarMap outs = {{"Out", {nullptr}}}; + imperative::NameVarMap ins = {{"X", {nullptr}}, + {"Y", {nullptr}}}; + imperative::NameVarMap outs = {{"Out", {nullptr}}}; CreateGradOpNode((*op.get()), ins, outs, framework::AttributeMap{}, framework::AttributeMap{}, platform::CPUPlace(), {}); } @@ -72,26 +73,26 @@ TEST(test_var_helper, eager_var_helper) { ASSERT_ANY_THROW( InitializeVariable(&var8, paddle::framework::proto::VarType::FP64)); - auto egr_tensor = std::make_shared(); - auto egr_tensor2 = std::make_shared(); + auto egr_tensor = std::make_shared(); + auto egr_tensor2 = std::make_shared(); egr_tensor->MutableVar() ->GetMutable() ->mutable_value() ->mutable_data(platform::CPUPlace()); egr_tensor2->MutableVar()->GetMutable(); VLOG(6) << "egr_tensor create with "; - ASSERT_TRUE(platform::is_cpu_place(GetPlace(egr_tensor))); - ASSERT_TRUE(GetDataType(egr_tensor) == + ASSERT_TRUE(platform::is_cpu_place(GetPlace(egr_tensor))); + ASSERT_TRUE(GetDataType(egr_tensor) == framework::proto::VarType::FP32); - GetCachedValue( + GetCachedValue( egr_tensor, framework::OpKernelType(framework::proto::VarType::FP32, platform::CPUPlace())); - SetCachedValue( + SetCachedValue( egr_tensor, framework::OpKernelType(framework::proto::VarType::FP32, platform::CPUPlace()), egr_tensor2); - ASSERT_ANY_THROW(GetPlace(egr_tensor2)); - ASSERT_ANY_THROW(SetType( + ASSERT_ANY_THROW(GetPlace(egr_tensor2)); + ASSERT_ANY_THROW(SetType( egr_tensor, paddle::framework::proto::VarType::LOD_TENSOR_ARRAY)); } } // namespace imperative diff --git a/paddle/fluid/imperative/tests/test_layer.cc b/paddle/fluid/imperative/tests/test_layer.cc index bcd4e62e57c270c5af0e6f5632fdc5f4f803fb29..224b8228097c475bac5bb1c62d126699d975ae66 100644 --- a/paddle/fluid/imperative/tests/test_layer.cc +++ b/paddle/fluid/imperative/tests/test_layer.cc @@ -39,6 +39,8 @@ using vb_vector = std::vector>; using var_pair = std::pair; +extern void TestSetForwardDataTypeOfGradVarsEager( + const NameVarMap& outs); template class TestRuntimeInferVarTypeContext : public RuntimeInferVarTypeContext { @@ -406,6 +408,11 @@ TEST(test_layer, test_inner_op_not_inited) { ASSERT_THROW(op.CheckAttrs(), platform::EnforceNotMet); } +TEST(test_layer, test_eager) { + imperative::NameTensorMap ins = {}; + TestSetForwardDataTypeOfGradVarsEager(ins); +} + } // namespace imperative } // namespace paddle diff --git a/paddle/fluid/imperative/tests/test_prepare_op.cc b/paddle/fluid/imperative/tests/test_prepare_op.cc index fa52aa6d0af61578e18d51e8b95c13b5d383c858..a440a1f486a0c75f299a7692b61b87d393780eb6 100644 --- a/paddle/fluid/imperative/tests/test_prepare_op.cc +++ b/paddle/fluid/imperative/tests/test_prepare_op.cc @@ -32,6 +32,9 @@ namespace framework = paddle::framework; namespace paddle { namespace imperative { +extern void TestHandleComplexGradToRealGradEager( + const NameVarMap& outs); + static framework::VariableNameMap CreateVarNameMap( const framework::OpInfo& op_info, const std::string& op_type, const NameVarBaseMap& varbase_map, bool is_input) { @@ -209,6 +212,11 @@ TEST(test_prepare_op, test_prepare_data_same_place) { TestPrepareDataSamePlace({}); } +TEST(test_prepare_op, test_complex_eager) { + NameVarMap outs = {}; + TestHandleComplexGradToRealGradEager(outs); +} + #ifdef PADDLE_WITH_MKLDNN TEST(test_prepare_op, test_prepare_data_cpu_mkldnn) { TestPrepareDataSamePlace({{"use_mkldnn", true}}); diff --git a/paddle/fluid/imperative/tests/test_tracer.cc b/paddle/fluid/imperative/tests/test_tracer.cc index e26cacb894836812a4f5e99ae469a95a959cf736..ccce360269153ba2e8c6586b934f6a9bf6ace819 100644 --- a/paddle/fluid/imperative/tests/test_tracer.cc +++ b/paddle/fluid/imperative/tests/test_tracer.cc @@ -37,9 +37,10 @@ namespace paddle { namespace imperative { using vb_vector = std::vector>; - using var_pair = std::pair; +using ev_vector = std::vector>; +using ev_pair = std::pair; TEST(test_tracer, test_trace_op) { // Doing an mul imperative::Tracer tracer; @@ -546,6 +547,44 @@ TEST(test_tracer, test_execution_context) { ASSERT_EQ(dy_ctx.OutputName("Out"), framework::kEmptyVarName); } +TEST(test_tracer, eager_tracer) { + // Doing an mul + imperative::Tracer tracer; + std::shared_ptr x_in(new egr::EagerVariable("x_in")); + std::shared_ptr y_in(new egr::EagerVariable("y_in")); + std::shared_ptr vout(new egr::EagerVariable("vout")); + platform::CPUPlace place; + std::vector src_data(10, 2.0); + std::vector dims1 = {2, 5}; + std::vector dims2 = {5, 2}; + + auto* x_in_tensor = x_in->MutableVar()->GetMutable(); + auto* y_in_tensor = y_in->MutableVar()->GetMutable(); + x_in_tensor->Resize(framework::make_ddim(dims1)); + auto* mutable_x = x_in_tensor->mutable_data(place); + paddle::memory::Copy(place, mutable_x, place, src_data.data(), + sizeof(float) * src_data.size()); + y_in_tensor->Resize(framework::make_ddim(dims2)); + auto* mutable_y = y_in_tensor->mutable_data(place); + paddle::memory::Copy(place, mutable_y, place, src_data.data(), + sizeof(float) * src_data.size()); + + ev_pair x_pair = ev_pair("X", ev_vector(1, x_in)); + ev_pair y_pair = ev_pair("Y", ev_vector(1, y_in)); + ev_pair out_pair = ev_pair("Out", ev_vector(1, vout)); + imperative::NameTensorMap ins = {x_pair, y_pair}; + imperative::NameTensorMap outs = {out_pair}; + framework::AttributeMap mul_attr_map; + mul_attr_map["use_mkldnn"] = false; + tracer.TraceOp("mul", ins, outs, mul_attr_map, place, + true); + + const auto& out_tensor = vout->Var().Get(); + for (int i = 0; i < vout->Var().Get().numel(); i++) { + ASSERT_EQ(out_tensor.data()[i], 20.0); + } +} + } // namespace imperative } // namespace paddle diff --git a/paddle/fluid/imperative/tracer.cc b/paddle/fluid/imperative/tracer.cc index c2dd761c23c9f4a914f428d8e0bdb16d9b4a6cbf..a600720ef78edb5175bb7d17821f5d8e229d1a93 100644 --- a/paddle/fluid/imperative/tracer.cc +++ b/paddle/fluid/imperative/tracer.cc @@ -168,7 +168,7 @@ void Tracer::TraceOp(const std::string& type, const NameVarMap& ins, const platform::Place& place, bool trace_backward, const std::map& inplace_map, paddle::framework::AttributeMap* passed_default_attrs_, - bool override_default_attr_map) { + bool use_default_attr_map) { platform::RecordEvent op_type_record_event(type); platform::ScopedFlushDenormal flush; VLOG(1) << "Trace Op: " << type; @@ -244,7 +244,7 @@ void Tracer::TraceOp(const std::string& type, const NameVarMap& ins, "CustomPlace.")); #endif } - if (!override_default_attr_map) { + if (!use_default_attr_map) { PADDLE_ENFORCE_NOT_NULL(passed_default_attrs_, paddle::platform::errors::PermissionDenied( "Detected default_attrs = nullptr.")); @@ -280,16 +280,14 @@ void Tracer::TraceOp(const std::string& type, const NameVarMap& ins, } if (ComputeRequiredGrad(new_ins, outs, trace_backward)) { - if (!override_default_attr_map) { - PADDLE_ENFORCE_NOT_NULL(passed_default_attrs_, - paddle::platform::errors::PermissionDenied( - "Detected default_attrs = nullptr.")); - CreateGradOpNode(*op, new_ins, outs, attrs, *passed_default_attrs_, place, - inplace_map); - } else { - CreateGradOpNode(*op, new_ins, outs, attrs, default_attrs, place, - inplace_map); - } + PADDLE_ENFORCE_EQ( + passed_default_attrs_, nullptr, + paddle::platform::errors::PermissionDenied( + "We expect passed_default_attrs_ is nullptr while " + "use_default_attr_map is true, however we got not null " + "passed_default_attrs_. Please check your usage of trace_op. ")); + CreateGradOpNode(*op, new_ins, outs, attrs, default_attrs, place, + inplace_map); } else { VLOG(3) << "No Grad to track for Op: " << type; } @@ -301,16 +299,14 @@ template void Tracer::TraceOp( const NameVarMap& outs, framework::AttributeMap attrs, const platform::Place& place, bool trace_backward, const std::map& inplace_map, - paddle::framework::AttributeMap* default_attrs, - bool override_default_attr_map); + paddle::framework::AttributeMap* default_attrs, bool use_default_attr_map); -template void Tracer::TraceOp( - const std::string& type, const NameVarMap& ins, - const NameVarMap& outs, framework::AttributeMap attrs, +template void Tracer::TraceOp( + const std::string& type, const NameVarMap& ins, + const NameVarMap& outs, framework::AttributeMap attrs, const platform::Place& place, bool trace_backward, const std::map& inplace_map_, - paddle::framework::AttributeMap* default_attrs, - bool override_default_attr_map); + paddle::framework::AttributeMap* default_attrs, bool use_default_attr_map); void Tracer::TraceOp(const std::string& type, const NameVarBaseMap& ins, const NameVarBaseMap& outs, framework::AttributeMap attrs, @@ -324,13 +320,12 @@ void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins, paddle::framework::AttributeMap attrs, const paddle::platform::Place& place, paddle::framework::AttributeMap* default_attrs, - bool override_default_attr_map, + bool use_default_attr_map, const std::map& inplace_map) { - VLOG(6) << "Running On Eager TraceOp with override_default_attr_map: " - << override_default_attr_map; - TraceOp(type, ins, outs, std::move(attrs), place, false, - inplace_map, default_attrs, - override_default_attr_map); + VLOG(6) << "Running On Eager TraceOp with use_default_attr_map: " + << use_default_attr_map; + TraceOp(type, ins, outs, std::move(attrs), place, false, + inplace_map, default_attrs, use_default_attr_map); } void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins, @@ -338,8 +333,9 @@ void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins, paddle::framework::AttributeMap attrs, const std::map& inplace_map) { VLOG(6) << "Running On Eager TraceOp(less): "; - TraceOp(type, ins, outs, std::move(attrs), expected_place_, - false, inplace_map, nullptr, true); + TraceOp(type, ins, outs, std::move(attrs), + expected_place_, false, inplace_map, nullptr, + true); } void Tracer::SetExpectedPlace(platform::Place place) { diff --git a/paddle/fluid/imperative/tracer.h b/paddle/fluid/imperative/tracer.h index 4e406a9482da0da456ad43046e48b97232dff885..3a9a1b630ce9cbc89f57b746e6e1e1445f6bd318 100644 --- a/paddle/fluid/imperative/tracer.h +++ b/paddle/fluid/imperative/tracer.h @@ -69,7 +69,7 @@ class Tracer { const platform::Place& place, bool trace_backward, const std::map& inplace_map = {}, paddle::framework::AttributeMap* passed_default_attrs_ = nullptr, - bool override_default_attr_map = true); + bool use_default_attr_map = true); void TraceOp(const std::string& type, const NameVarBaseMap& ins, const NameVarBaseMap& outs, framework::AttributeMap attrs, @@ -83,7 +83,7 @@ class Tracer { const NameTensorMap& outs, paddle::framework::AttributeMap attrs, const paddle::platform::Place& place, paddle::framework::AttributeMap* default_attrs, - bool override_default_attr_map, + bool use_default_attr_map, const std::map& inplace_map = {}); bool ComputeRequiredGrad(const NameVarBaseMap& ins, diff --git a/paddle/fluid/imperative/var_helper.cc b/paddle/fluid/imperative/var_helper.cc index 3548f2eeafd24126b50329246dd85f2f0e47878b..d97f7c1ee19b33e75b11d8f7541e638c93d152f0 100644 --- a/paddle/fluid/imperative/var_helper.cc +++ b/paddle/fluid/imperative/var_helper.cc @@ -95,8 +95,8 @@ template const paddle::platform::Place &GetPlace( const std::shared_ptr &var); template const paddle::platform::Place &GetPlace( const std::shared_ptr &var); -template const paddle::platform::Place &GetPlace( - const std::shared_ptr &var); +template const paddle::platform::Place &GetPlace( + const std::shared_ptr &var); /* GetNameFromVar */ template @@ -104,8 +104,8 @@ const std::string &GetNameFromVar(std::shared_ptr var) { return var->Name(); } template <> -const std::string &GetNameFromVar( - std::shared_ptr tensor) { +const std::string &GetNameFromVar( + std::shared_ptr tensor) { return tensor->name(); } template const std::string &GetNameFromVar( @@ -120,8 +120,8 @@ void SetType(std::shared_ptr var, var->SetType(type); } template <> -void SetType(std::shared_ptr var, - framework::proto::VarType::Type type) { +void SetType(std::shared_ptr var, + framework::proto::VarType::Type type) { switch (type) { case paddle::framework::proto::VarType::LOD_TENSOR: { var->MutableVar()->GetMutable(); @@ -149,8 +149,8 @@ framework::proto::VarType::Type GetType(std::shared_ptr var) { return var->Type(); } template <> -framework::proto::VarType::Type GetType( - std::shared_ptr var) { +framework::proto::VarType::Type GetType( + std::shared_ptr var) { if (var->Var().IsInitialized()) { return paddle::framework::ToVarType(var->Var().Type()); } else { @@ -168,8 +168,8 @@ framework::proto::VarType::Type GetDataType(std::shared_ptr var) { return var->DataType(); } template <> -framework::proto::VarType::Type GetDataType( - std::shared_ptr var) { +framework::proto::VarType::Type GetDataType( + std::shared_ptr var) { if (var->Var().IsType()) { return framework::TransToProtoVarType( var->Var().Get().value().type()); @@ -197,8 +197,8 @@ bool CheckCachedKey(std::shared_ptr var, return GetVariableWrapper(var)->hasCacheKey(key); } template <> -bool CheckCachedKey( - std::shared_ptr tensor, +bool CheckCachedKey( + std::shared_ptr tensor, const paddle::framework::OpKernelType &key) { // TODO(jiabin): Support this later // VLOG(10) << "CheckCachedKey with tensor: " << tensor->name() << "and key is @@ -219,7 +219,7 @@ std::shared_ptr GetCachedValue( } template <> std::shared_ptr GetCachedValue( - std::shared_ptr var, + std::shared_ptr var, const paddle::framework::OpKernelType &key) { // TODO(jiabin): Support this later // PADDLE_THROW(platform::errors::Fatal("In eager mode program should not @@ -243,10 +243,10 @@ void SetCachedValue(std::shared_ptr var, GetVariableWrapper(var)->setCacheValue(key, GetVariableWrapper(res)); } template <> -void SetCachedValue( - std::shared_ptr tensor, +void SetCachedValue( + std::shared_ptr tensor, const paddle::framework::OpKernelType &key, - std::shared_ptr res) { + std::shared_ptr res) { // PADDLE_THROW(platform::errors::Fatal("In eager mode program should not // reach this, support cache and remove this error check later, or this // should not be supported.")); diff --git a/paddle/fluid/imperative/var_helper.h b/paddle/fluid/imperative/var_helper.h index ff228e0ab84e2aec8d3d399bc1e5ba9cb14b42c2..cbcc1a9f99daaa16d0dfc5c79f610434dd4e33a5 100644 --- a/paddle/fluid/imperative/var_helper.h +++ b/paddle/fluid/imperative/var_helper.h @@ -18,7 +18,7 @@ #include "paddle/fluid/framework/variable.h" namespace egr { -class EagerTensor; +class EagerVariable; } // namespace egr namespace pten { class DenseTensor; diff --git a/paddle/fluid/pybind/eager.cc b/paddle/fluid/pybind/eager.cc index f4e5df800dadaa9774062f704fb93b7a0ac746a9..6e882b5e0e4b07dd67a6b59747d2a89a6cc59fb7 100644 --- a/paddle/fluid/pybind/eager.cc +++ b/paddle/fluid/pybind/eager.cc @@ -45,7 +45,7 @@ PyTypeObject* p_tensor_type; extern PyTypeObject* g_vartype_pytype; extern PyTypeObject* g_framework_tensor_pytype; -PyObject* EagerTensorNew(PyTypeObject* type, PyObject* args, PyObject* kwargs) { +PyObject* TensorNew(PyTypeObject* type, PyObject* args, PyObject* kwargs) { PyObject* obj = type->tp_alloc(type, 0); if (obj) { auto v = reinterpret_cast(obj); @@ -56,14 +56,14 @@ PyObject* EagerTensorNew(PyTypeObject* type, PyObject* args, PyObject* kwargs) { } // TODO(jiabin): Overload this once we need more constructor in Python -void EmptyEagerTensorInitializer( - TensorObject* self, const std::string& name, - const paddle::platform::Place& place, bool persistable = false, - bool stop_gradient = true, framework::proto::VarType::Type dtype = - paddle::framework::proto::VarType::FP32, - const std::vector& dims = {}, - framework::proto::VarType::Type var_type = - paddle::framework::proto::VarType::LOD_TENSOR) { +void EmptyTensorInitializer(TensorObject* self, const std::string& name, + const paddle::platform::Place& place, + bool persistable = false, bool stop_gradient = true, + framework::proto::VarType::Type dtype = + paddle::framework::proto::VarType::FP32, + const std::vector& dims = {}, + framework::proto::VarType::Type var_type = + paddle::framework::proto::VarType::LOD_TENSOR) { auto ddims = paddle::framework::make_ddim(dims); PADDLE_ENFORCE_GE( paddle::framework::product(ddims), 0, @@ -98,46 +98,41 @@ void EmptyEagerTensorInitializer( } } -void InitEagerTensorWithNumpyValue(TensorObject* self, const py::object& array, - bool zero_copy = false) { +void InitTensorWithNumpyValue(TensorObject* self, const py::object& array, + bool zero_copy = false) { PADDLE_ENFORCE_EQ( self->tensor.defined(), true, paddle::platform::errors::Fatal( - "Calling InitEagerTensorWithNumpyValue of Eager Tensor without " - "EmptyEagerTensorInitializer is " + "Calling InitTensorWithNumpyValue of Eager Tensor without " + "EmptyTensorInitializer is " "forbidden. Please check your code and make sure you new a " "eager tensor before init it with NumPy.")); pten::DenseTensor* impl_ptr = static_cast(self->tensor.impl().get()); paddle::platform::Place place = impl_ptr->place(); - paddle::framework::LoDTensor temp_tensor = paddle::framework::LoDTensor(); if (platform::is_cpu_place(place)) { - SetTensorFromPyArray(&temp_tensor, array, place, - zero_copy); + SetTensorFromPyArray(impl_ptr, array, place, zero_copy); } else if (platform::is_xpu_place(place)) { - SetTensorFromPyArray(&temp_tensor, array, place, - zero_copy); + SetTensorFromPyArray(impl_ptr, array, place, zero_copy); } else if (platform::is_gpu_place(place)) { - SetTensorFromPyArray(&temp_tensor, array, place, + SetTensorFromPyArray(impl_ptr, array, place, zero_copy); } else if (platform::is_cuda_pinned_place(place)) { - SetTensorFromPyArray(&temp_tensor, array, place, + SetTensorFromPyArray(impl_ptr, array, place, zero_copy); } else if (platform::is_npu_place(place)) { - SetTensorFromPyArray(&temp_tensor, array, place, - zero_copy); + SetTensorFromPyArray(impl_ptr, array, place, zero_copy); } else { PADDLE_THROW(platform::errors::InvalidArgument( "Place should be one of " "CPUPlace/XPUPlace/CUDAPlace/CUDAPinnedPlace/NPUPlace")); } - *impl_ptr = temp_tensor; } -void InitEagerTensorWithEagerTensor(TensorObject* self, - const paddle::experimental::Tensor& src, - const paddle::platform::Place& place, - const std::string& name) { +void InitTensorWithTensor(TensorObject* self, + const paddle::experimental::Tensor& src, + const paddle::platform::Place& place, + const std::string& name) { self->tensor.set_name(name); if (place == src.inner_place()) { auto impl = std::static_pointer_cast(src.impl()); @@ -158,10 +153,10 @@ void InitEagerTensorWithEagerTensor(TensorObject* self, } } -void InitEagerTensorWithFrameworkTensor(TensorObject* self, - const framework::Tensor& src, - const paddle::platform::Place& place, - const std::string& name) { +void InitTensorWithFrameworkTensor(TensorObject* self, + const framework::Tensor& src, + const paddle::platform::Place& place, + const std::string& name) { self->tensor.set_name(name); if (place == src.place()) { self->tensor.set_impl(std::make_shared(src)); @@ -271,14 +266,14 @@ std::string ParseName(std::unordered_map kws_map, return act_name; } -// initialize EagerTensor by PyArray(first argument is PyArray, +// initialize Tensor by PyArray(first argument is PyArray, // mix args and kwargs) automatically. -void AutoInitEagerTensorByPyArray( - TensorObject* py_tensor_ptr, - std::unordered_map kws_map, PyObject* args, - bool flag_kwargs, Py_ssize_t args_num) { - // The first argument of the EagerTensor constructor is PyArray, - // there are 6 arguments to construct the new EagerTensor, +void AutoInitTensorByPyArray(TensorObject* py_tensor_ptr, + std::unordered_map kws_map, + PyObject* args, bool flag_kwargs, + Py_ssize_t args_num) { + // The first argument of the Tensor constructor is PyArray, + // there are 6 arguments to construct the new Tensor, // kw_order_map's key is every arguments of the constructor, // kw_order_map's value is the position of the arguments respectively. // If u want to update this constructor with new arguments, @@ -306,20 +301,21 @@ void AutoInitEagerTensorByPyArray( stop_gradient = ParseBooleanArgs("stop_gradient", kws_map, kw_order_map, args, flag_kwargs, args_num); - EmptyEagerTensorInitializer(py_tensor_ptr, act_name, place, persistable, - stop_gradient); - InitEagerTensorWithNumpyValue(py_tensor_ptr, numpy_value, zero_copy); + EmptyTensorInitializer(py_tensor_ptr, act_name, place, persistable, + stop_gradient); + InitTensorWithNumpyValue(py_tensor_ptr, numpy_value, zero_copy); } -// initialize EagerTensor by EagerTensor or framework::Tensor (mix args and +// initialize Tensor by Tensor or framework::Tensor (mix args and // kwargs) automatically. -void AutoInitEagerTensorByTensor( - TensorObject* py_tensor_ptr, - std::unordered_map kws_map, PyObject* args, - bool flag_kwargs, Py_ssize_t args_num, bool init_by_egr_tensor = true) { - // The first argument of the EagerTensor constructor is EagerTensor or +void AutoInitTensorByTensor(TensorObject* py_tensor_ptr, + std::unordered_map kws_map, + PyObject* args, bool flag_kwargs, + Py_ssize_t args_num, + bool init_by_egr_tensor = true) { + // The first argument of the Tensor constructor is Tensor or // framework Tensor, - // there are 3 arguments to construct the new EagerTensor, + // there are 3 arguments to construct the new Tensor, // kw_order_map's key is every arguments of the constructor, // kw_order_map's value is the position of the arguments respectively. // If u want to update this constructor with new arguments, @@ -345,14 +341,14 @@ void AutoInitEagerTensorByTensor( src_tensor = CastPyArg2Tensor(kws_map["value"], 0); } else { PADDLE_THROW(platform::errors::InvalidArgument( - "The first expected kwargs is {value: EagerTensor}, " - "but could not parse the first argument {value: EagerTensor} " + "The first expected kwargs is {value: Tensor}, " + "but could not parse the first argument {value: Tensor} " "successfully. " "Please check your input first and make sure you are on the right " "way.")); } } - InitEagerTensorWithEagerTensor(py_tensor_ptr, src_tensor, place, act_name); + InitTensorWithTensor(py_tensor_ptr, src_tensor, place, act_name); } else { // init by framework tensor framework::Tensor src_tensor; @@ -372,8 +368,7 @@ void AutoInitEagerTensorByTensor( "way.")); } } - InitEagerTensorWithFrameworkTensor(py_tensor_ptr, src_tensor, place, - act_name); + InitTensorWithFrameworkTensor(py_tensor_ptr, src_tensor, place, act_name); } } @@ -402,12 +397,12 @@ void AutoInitEagerTensorByTensor( * ** value: ndarray) * 5. * def __init__ ( - * ** tensor: EagerTensor) + * ** tensor: Tensor) * 6. (multi-place) * (should have at least one parameter, one parameter equals to case 5, zero * parameter equals to case 1.) * def __init__ ( - * ** tensor: EagerTensor, + * ** tensor: Tensor, * ** place: paddle::platform::Place, * ** name: std::string) * 7. (multi-place) (should have at least one parameter, one parameter similar @@ -417,7 +412,7 @@ void AutoInitEagerTensorByTensor( * ** place: paddle::platform::Place, * ** name: std::string) * **/ -int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { +int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { // set a flag to record use kwargs or not bool flag_kwargs = false; if (kwargs) flag_kwargs = true; @@ -427,7 +422,7 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { PyObject* kw_persistable = NULL; PyObject* kw_stop_gradient = NULL; - PyObject* kw_value = NULL; // receive PyArray or EagerTensor + PyObject* kw_value = NULL; // receive PyArray or Tensor PyObject* kw_place = NULL; PyObject* kw_name = NULL; PyObject* kw_dims = NULL; @@ -490,7 +485,7 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { if (!flag_kwargs) { // case 1 VLOG(6) << "Calling case1's initializer."; - EmptyEagerTensorInitializer( + EmptyTensorInitializer( py_tensor_ptr, egr::Controller::Instance().GenerateUniqueName("generated_tensor"), egr::Controller::Instance().GetExpectedPlace()); @@ -499,28 +494,28 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { if (kw_value != NULL) { if (pybind11::detail::npy_api::get().PyArray_Check_(kw_value)) { VLOG(6) << "Calling case3's or case4's initializer"; - AutoInitEagerTensorByPyArray(py_tensor_ptr, kws_map, args, - flag_kwargs, args_num); + AutoInitTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs, + args_num); return 0; } else if (PyObject_IsInstance( kw_value, reinterpret_cast(p_tensor_type))) { VLOG(6) << "Calling case5's or case6's initializer"; - AutoInitEagerTensorByTensor(py_tensor_ptr, kws_map, args, flag_kwargs, - args_num); + AutoInitTensorByTensor(py_tensor_ptr, kws_map, args, flag_kwargs, + args_num); return 0; } else if (PyObject_IsInstance(kw_value, reinterpret_cast( g_framework_tensor_pytype))) { VLOG(6) << "Calling case7's initializer."; - AutoInitEagerTensorByTensor( - py_tensor_ptr, kws_map, args, flag_kwargs, args_num, - /* false means not init by egr tensor*/ false); + AutoInitTensorByTensor(py_tensor_ptr, kws_map, args, flag_kwargs, + args_num, + /* false means not init by egr tensor*/ false); return 0; } else { PADDLE_THROW(platform::errors::InvalidArgument( "Could not parse the first keyword argument successfully, " "the first keyword argument is value, but it should be PyArray " - "or EagerTensor or framework::Tensor. " + "or Tensor or framework::Tensor. " "Please check your input first and make sure you are on the " "right way.")); } @@ -573,18 +568,18 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { CastPyArg2ProtoType(kw_type, 0); bool persistable = CastPyArg2AttrBoolean(kw_persistable, 0); - EmptyEagerTensorInitializer( - py_tensor_ptr, act_name, - egr::Controller::Instance().GetExpectedPlace(), persistable, - /* stop_gradient */ true, dtype, dims, var_type); + EmptyTensorInitializer(py_tensor_ptr, act_name, + egr::Controller::Instance().GetExpectedPlace(), + persistable, + /* stop_gradient */ true, dtype, dims, var_type); return 0; } else { PADDLE_THROW(platform::errors::InvalidArgument( - "We not only support construct EagerTensor from numpy value " - "or tensor(EagerTensor or framework::Tensor) " + "We not only support construct Tensor from numpy value " + "or tensor(Tensor or framework::Tensor) " "with python kwargs by this initializer, " - "but also even support dtype to init a empty EagerTensor. " + "but also even support dtype to init a empty Tensor. " "Please check your input first and make sure you call the existed " "constructor.")); } @@ -595,28 +590,28 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0); if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) { VLOG(6) << "Calling case3's or case4's initializer."; - AutoInitEagerTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs, - args_num); + AutoInitTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs, + args_num); return 0; } else if (PyObject_IsInstance( arg0_ptr, reinterpret_cast(p_tensor_type))) { VLOG(6) << "Calling case5's or case6's initializer."; - AutoInitEagerTensorByTensor(py_tensor_ptr, kws_map, args, flag_kwargs, - args_num); + AutoInitTensorByTensor(py_tensor_ptr, kws_map, args, flag_kwargs, + args_num); return 0; } else if (PyObject_IsInstance(arg0_ptr, reinterpret_cast( g_framework_tensor_pytype))) { VLOG(6) << "Calling case7's initializer."; - AutoInitEagerTensorByTensor( - py_tensor_ptr, kws_map, args, flag_kwargs, args_num, - /* false means not init by egr tensor*/ false); + AutoInitTensorByTensor(py_tensor_ptr, kws_map, args, flag_kwargs, + args_num, + /* false means not init by egr tensor*/ false); return 0; } else { PADDLE_THROW(platform::errors::InvalidArgument( - "We support construct EagerTensor from numpy value " - "or tensor(EagerTensor or framework::Tensor) " + "We support construct Tensor from numpy value " + "or tensor(Tensor or framework::Tensor) " "with python args and kwargs by this initializer, " - "but the first argument should be PyArray or EagerTensor or " + "but the first argument should be PyArray or Tensor or " "framework::Tensor. " "Please check your input first and make sure you call the existed " "constructor.")); @@ -626,8 +621,8 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0); if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) { VLOG(6) << "Calling case3's or case4's initializer."; - AutoInitEagerTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs, - args_num); + AutoInitTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs, + args_num); return 0; } else { PADDLE_THROW(platform::errors::InvalidArgument( @@ -658,15 +653,14 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { paddle::framework::proto::VarType::Type var_type = CastPyArg2ProtoType(PyTuple_GET_ITEM(args, 3), 3); bool persistable = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 4), 4); - EmptyEagerTensorInitializer( - py_tensor_ptr, act_name, - egr::Controller::Instance().GetExpectedPlace(), persistable, true, - dtype, dims, var_type); + EmptyTensorInitializer(py_tensor_ptr, act_name, + egr::Controller::Instance().GetExpectedPlace(), + persistable, true, dtype, dims, var_type); return 0; } else if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) { VLOG(6) << "Calling case3's initializer."; - AutoInitEagerTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs, - args_num); + AutoInitTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs, + args_num); return 0; } else { PADDLE_THROW(platform::errors::InvalidArgument( @@ -680,8 +674,8 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0); if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) { VLOG(6) << "Calling case3's or case4's initializer"; - AutoInitEagerTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs, - args_num); + AutoInitTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs, + args_num); return 0; } else { PADDLE_THROW(platform::errors::InvalidArgument( @@ -696,8 +690,8 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { if (!flag_kwargs) { // case 3 VLOG(6) << "Calling case3's initializer."; - AutoInitEagerTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs, - args_num); + AutoInitTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs, + args_num); return 0; } else { // six position args, remainting arguments are kwargs, but this // is not a right way @@ -716,7 +710,7 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { return 1; } -static void EagerTensorDealloc(TensorObject* self) { +static void TensorDealloc(TensorObject* self) { self->tensor.~Tensor(); Py_TYPE(self)->tp_free(reinterpret_cast(self)); } @@ -735,19 +729,19 @@ void BindEager(pybind11::module* module) { auto& internals = pybind11::detail::get_internals(); auto heap_type = reinterpret_cast( internals.default_metaclass->tp_alloc(internals.default_metaclass, 0)); - heap_type->ht_name = ToPyObject("EagerTensor"); - heap_type->ht_qualname = ToPyObject("EagerTensor"); + heap_type->ht_name = ToPyObject("Tensor"); + heap_type->ht_qualname = ToPyObject("Tensor"); auto type = &heap_type->ht_type; - type->tp_name = "EagerTensor"; + type->tp_name = "Tensor"; type->tp_basicsize = sizeof(TensorObject); - type->tp_dealloc = (destructor)EagerTensorDealloc; + type->tp_dealloc = (destructor)TensorDealloc; type->tp_as_number = &number_methods; type->tp_as_sequence = &sequence_methods; type->tp_as_mapping = &mapping_methods; type->tp_methods = variable_methods; type->tp_getset = variable_properties; - type->tp_init = EagerTensorInit; - type->tp_new = EagerTensorNew; + type->tp_init = TensorInit; + type->tp_new = TensorNew; Py_INCREF(internals.instance_base); type->tp_base = reinterpret_cast(internals.instance_base); type->tp_flags |= @@ -764,8 +758,8 @@ void BindEager(pybind11::module* module) { } Py_INCREF(type); - if (PyModule_AddObject(m.ptr(), "EagerTensor", - reinterpret_cast(type)) < 0) { + if (PyModule_AddObject(m.ptr(), "Tensor", reinterpret_cast(type)) < + 0) { Py_DECREF(type); Py_DECREF(m.ptr()); PADDLE_THROW(platform::errors::Fatal( diff --git a/paddle/fluid/pybind/eager_functions.cc b/paddle/fluid/pybind/eager_functions.cc index a32edae2ad23cc215a0e91756fd6b54b145debda..c3f0aa2ec9c49d144f45d73d275c964f341a384b 100644 --- a/paddle/fluid/pybind/eager_functions.cc +++ b/paddle/fluid/pybind/eager_functions.cc @@ -145,9 +145,8 @@ static PyObject* eager_api_tensor_copy(PyObject* self, PyObject* args, EAGER_CATCH_AND_THROW_RETURN_NULL } -static PyObject* eager_api_read_next_eager_tensor_list(PyObject* self, - PyObject* args, - PyObject* kwargs) { +static PyObject* eager_api_read_next_tensor_list(PyObject* self, PyObject* args, + PyObject* kwargs) { EAGER_TRY auto tensor_base_list = CastPyArg2VectorOfTensorBase(PyTuple_GET_ITEM(args, 0), 0); @@ -182,8 +181,8 @@ PyMethodDef variable_functions[] = { METH_VARARGS | METH_KEYWORDS, NULL}, {"tensor_copy", (PyCFunction)(void (*)(void))eager_api_tensor_copy, METH_VARARGS | METH_KEYWORDS, NULL}, - {"read_next_eager_tensor_list", - (PyCFunction)(void (*)(void))eager_api_read_next_eager_tensor_list, + {"read_next_tensor_list", + (PyCFunction)(void (*)(void))eager_api_read_next_tensor_list, METH_VARARGS | METH_KEYWORDS, NULL}, {NULL, NULL, 0, NULL}}; diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc index 68653790366084ed8cce1cb007cd975fd0a4bc59..b8f462dfd51d1234b86a6b294628bbefd8a5c021 100644 --- a/paddle/fluid/pybind/eager_method.cc +++ b/paddle/fluid/pybind/eager_method.cc @@ -35,15 +35,15 @@ limitations under the License. */ namespace paddle { namespace pybind { -extern void InitEagerTensorWithNumpyValue(TensorObject* self, - const pybind11::object& array, - bool zero_copy); +extern void InitTensorWithNumpyValue(TensorObject* self, + const pybind11::object& array, + bool zero_copy); extern PyTypeObject* p_tensor_type; -static PyObject* eager_tensor_method_numpy(TensorObject* self, PyObject* args, - PyObject* kwargs) { - EAGER_SYNC_TRY +static PyObject* tensor_method_numpy(TensorObject* self, PyObject* args, + PyObject* kwargs) { + EAGER_TRY PADDLE_ENFORCE_EQ( self->tensor.initialized(), true, platform::errors::InvalidArgument( @@ -99,18 +99,17 @@ static PyObject* eager_tensor_method_numpy(TensorObject* self, PyObject* args, EAGER_CATCH_AND_THROW_RETURN_NULL } -static PyObject* eager_tensor_method__is_initialized(TensorObject* self, - PyObject* args, - PyObject* kwargs) { - EAGER_SYNC_TRY +static PyObject* tensor_method__is_initialized(TensorObject* self, + PyObject* args, + PyObject* kwargs) { + EAGER_TRY return ToPyObject(self->tensor.initialized()); EAGER_CATCH_AND_THROW_RETURN_NULL } -static PyObject* eager_tensor_method__copy_to(TensorObject* self, - PyObject* args, - PyObject* kwargs) { - EAGER_SYNC_TRY +static PyObject* tensor_method__copy_to(TensorObject* self, PyObject* args, + PyObject* kwargs) { + EAGER_TRY bool blocking = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 0), 0); auto place = CastPyArg2Place(PyTuple_GET_ITEM(args, 1), 1); auto cp_tensor = @@ -123,10 +122,10 @@ static PyObject* eager_tensor_method__copy_to(TensorObject* self, EAGER_CATCH_AND_THROW_RETURN_NULL } -static PyObject* eager_tensor_method_reconstruct_from_(TensorObject* self, - PyObject* args, - PyObject* kwargs) { - EAGER_SYNC_TRY +static PyObject* tensor_method_reconstruct_from_(TensorObject* self, + PyObject* args, + PyObject* kwargs) { + EAGER_TRY paddle::experimental::Tensor src_tensor = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0); std::string orig_name = self->tensor.name(); @@ -144,9 +143,9 @@ static PyObject* eager_tensor_method_reconstruct_from_(TensorObject* self, EAGER_CATCH_AND_THROW_RETURN_NULL } -static PyObject* eager_tensor_method_copy_(TensorObject* self, PyObject* args, - PyObject* kwargs) { - EAGER_SYNC_TRY +static PyObject* tensor_method_copy_(TensorObject* self, PyObject* args, + PyObject* kwargs) { + EAGER_TRY paddle::experimental::Tensor src_tensor = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0); bool blocking = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 1), 1); @@ -170,8 +169,8 @@ static PyObject* eager_tensor_method_copy_(TensorObject* self, PyObject* args, EAGER_CATCH_AND_THROW_RETURN_NULL } -static PyObject* eager_tensor_retain_grads(TensorObject* self, PyObject* args, - PyObject* kwargs) { +static PyObject* tensor_retain_grads(TensorObject* self, PyObject* args, + PyObject* kwargs) { EAGER_TRY if (egr::Controller::Instance().HasGrad()) { auto meta = egr::EagerUtils::autograd_meta(&(self->tensor)); @@ -187,10 +186,9 @@ static PyObject* eager_tensor_retain_grads(TensorObject* self, PyObject* args, EAGER_CATCH_AND_THROW_RETURN_NULL } -static PyObject* eager_tensor__clear_gradient(TensorObject* self, - PyObject* args, - PyObject* kwargs) { - EAGER_SYNC_TRY +static PyObject* tensor__clear_gradient(TensorObject* self, PyObject* args, + PyObject* kwargs) { + EAGER_TRY VLOG(4) << "ClearGradient " << self->tensor.name(); paddle::experimental::Tensor* grad; @@ -223,8 +221,8 @@ static PyObject* eager_tensor__clear_gradient(TensorObject* self, EAGER_CATCH_AND_THROW_RETURN_NULL } -static PyObject* eager_tensor__zero_grads(TensorObject* self, PyObject* args, - PyObject* kwargs) { +static PyObject* tensor__zero_grads(TensorObject* self, PyObject* args, + PyObject* kwargs) { EAGER_TRY VLOG(4) << "ZeroGrads " << self->tensor.name(); @@ -257,10 +255,9 @@ static PyObject* eager_tensor__zero_grads(TensorObject* self, PyObject* args, EAGER_CATCH_AND_THROW_RETURN_NULL } -static PyObject* eager_tensor__share_buffer_to(TensorObject* self, - PyObject* args, - PyObject* kwargs) { - EAGER_SYNC_TRY +static PyObject* tensor__share_buffer_to(TensorObject* self, PyObject* args, + PyObject* kwargs) { + EAGER_TRY paddle::experimental::Tensor* dst_ptr = &(reinterpret_cast(PyTuple_GET_ITEM(args, 0))->tensor); PADDLE_ENFORCE_EQ(self->tensor.initialized(), true, @@ -279,10 +276,10 @@ static PyObject* eager_tensor__share_buffer_to(TensorObject* self, EAGER_CATCH_AND_THROW_RETURN_NULL } -static PyObject* eager_tensor__is_shared_buffer_with(TensorObject* self, - PyObject* args, - PyObject* kwargs) { - EAGER_SYNC_TRY +static PyObject* tensor__is_shared_buffer_with(TensorObject* self, + PyObject* args, + PyObject* kwargs) { + EAGER_TRY paddle::experimental::Tensor* dst_ptr = &(reinterpret_cast(PyTuple_GET_ITEM(args, 0))->tensor); PADDLE_ENFORCE_EQ(self->tensor.initialized(), true, @@ -303,10 +300,10 @@ static PyObject* eager_tensor__is_shared_buffer_with(TensorObject* self, EAGER_CATCH_AND_THROW_RETURN_NULL } -static PyObject* eager_tensor__share_underline_tensor_to(TensorObject* self, - PyObject* args, - PyObject* kwargs) { - EAGER_SYNC_TRY +static PyObject* tensor__share_underline_tensor_to(TensorObject* self, + PyObject* args, + PyObject* kwargs) { + EAGER_TRY paddle::experimental::Tensor* src_ptr = &(reinterpret_cast(PyTuple_GET_ITEM(args, 0))->tensor); PADDLE_ENFORCE_EQ(self->tensor.initialized(), true, @@ -320,9 +317,10 @@ static PyObject* eager_tensor__share_underline_tensor_to(TensorObject* self, EAGER_CATCH_AND_THROW_RETURN_NULL } -static PyObject* eager_tensor__is_shared_underline_tensor_with( - TensorObject* self, PyObject* args, PyObject* kwargs) { - EAGER_SYNC_TRY +static PyObject* tensor__is_shared_underline_tensor_with(TensorObject* self, + PyObject* args, + PyObject* kwargs) { + EAGER_TRY paddle::experimental::Tensor src_tensor = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0); PADDLE_ENFORCE_EQ(src_tensor.initialized(), true, @@ -339,9 +337,9 @@ static PyObject* eager_tensor__is_shared_underline_tensor_with( EAGER_CATCH_AND_THROW_RETURN_NULL } -static PyObject* eager_tensor_method_detach(TensorObject* self, PyObject* args, - PyObject* kwargs) { - EAGER_SYNC_TRY +static PyObject* tensor_method_detach(TensorObject* self, PyObject* args, + PyObject* kwargs) { + EAGER_TRY PADDLE_ENFORCE_EQ( self->tensor.initialized(), true, platform::errors::InvalidArgument("Tensor %s has not been initialized!", @@ -365,10 +363,10 @@ static PyObject* eager_tensor_method_detach(TensorObject* self, PyObject* args, EAGER_CATCH_AND_THROW_RETURN_NULL } -static PyObject* eager_tensor_method_get_underline_tensor(TensorObject* self, - PyObject* args, - PyObject* kwargs) { - EAGER_SYNC_TRY +static PyObject* tensor_method_get_underline_tensor(TensorObject* self, + PyObject* args, + PyObject* kwargs) { + EAGER_TRY if (self->tensor.is_dense_tensor()) { auto* tensor = static_cast(self->tensor.impl().get()); @@ -382,57 +380,54 @@ static PyObject* eager_tensor_method_get_underline_tensor(TensorObject* self, } // NOTE(wuweilong): Set value and not change self's original place -static PyObject* eager_tensor_method_set_value(TensorObject* self, - PyObject* args, - PyObject* kwargs) { +static PyObject* tensor_method_set_value(TensorObject* self, PyObject* args, + PyObject* kwargs) { EAGER_TRY VLOG(4) << "Value " << self->tensor.name(); pybind11::object numpy_value = pybind11::object(pybind11::handle(PyTuple_GET_ITEM(args, 0)), true); - InitEagerTensorWithNumpyValue(self, numpy_value, false); + InitTensorWithNumpyValue(self, numpy_value, false); Py_INCREF(Py_None); return Py_None; EAGER_CATCH_AND_THROW_RETURN_NULL } PyMethodDef variable_methods[] = { - {"numpy", (PyCFunction)(void (*)(void))eager_tensor_method_numpy, + {"numpy", (PyCFunction)(void (*)(void))tensor_method_numpy, METH_VARARGS | METH_KEYWORDS, NULL}, {"_is_initialized", - (PyCFunction)(void (*)(void))eager_tensor_method__is_initialized, + (PyCFunction)(void (*)(void))tensor_method__is_initialized, METH_VARARGS | METH_KEYWORDS, NULL}, - {"_copy_to", (PyCFunction)(void (*)(void))eager_tensor_method__copy_to, + {"_copy_to", (PyCFunction)(void (*)(void))tensor_method__copy_to, METH_VARARGS | METH_KEYWORDS, NULL}, - {"copy_", (PyCFunction)(void (*)(void))eager_tensor_method_copy_, + {"copy_", (PyCFunction)(void (*)(void))tensor_method_copy_, METH_VARARGS | METH_KEYWORDS, NULL}, {"reconstruct_from_", - (PyCFunction)(void (*)(void))eager_tensor_method_reconstruct_from_, + (PyCFunction)(void (*)(void))tensor_method_reconstruct_from_, METH_VARARGS | METH_KEYWORDS, NULL}, - {"retain_grads", (PyCFunction)(void (*)(void))eager_tensor_retain_grads, + {"retain_grads", (PyCFunction)(void (*)(void))tensor_retain_grads, METH_VARARGS | METH_KEYWORDS, NULL}, - {"_clear_gradient", - (PyCFunction)(void (*)(void))eager_tensor__clear_gradient, + {"_clear_gradient", (PyCFunction)(void (*)(void))tensor__clear_gradient, METH_VARARGS | METH_KEYWORDS, NULL}, - {"_zero_grads", (PyCFunction)(void (*)(void))eager_tensor__zero_grads, + {"_zero_grads", (PyCFunction)(void (*)(void))tensor__zero_grads, METH_VARARGS | METH_KEYWORDS, NULL}, - {"_share_buffer_to", - (PyCFunction)(void (*)(void))eager_tensor__share_buffer_to, + {"_share_buffer_to", (PyCFunction)(void (*)(void))tensor__share_buffer_to, METH_VARARGS | METH_KEYWORDS, NULL}, {"_is_shared_buffer_with", - (PyCFunction)(void (*)(void))eager_tensor__is_shared_buffer_with, + (PyCFunction)(void (*)(void))tensor__is_shared_buffer_with, METH_VARARGS | METH_KEYWORDS, NULL}, {"_share_underline_tensor_to", - (PyCFunction)(void (*)(void))eager_tensor__share_underline_tensor_to, + (PyCFunction)(void (*)(void))tensor__share_underline_tensor_to, METH_VARARGS | METH_KEYWORDS, NULL}, {"_is_shared_underline_tensor_with", - (PyCFunction)(void (*)(void))eager_tensor__is_shared_underline_tensor_with, + (PyCFunction)(void (*)(void))tensor__is_shared_underline_tensor_with, METH_VARARGS | METH_KEYWORDS, NULL}, - {"detach", (PyCFunction)(void (*)(void))eager_tensor_method_detach, + {"detach", (PyCFunction)(void (*)(void))tensor_method_detach, METH_VARARGS | METH_KEYWORDS, NULL}, {"get_tensor", - (PyCFunction)(void (*)(void))eager_tensor_method_get_underline_tensor, + (PyCFunction)(void (*)(void))tensor_method_get_underline_tensor, METH_VARARGS | METH_KEYWORDS, NULL}, - {"_set_value", (PyCFunction)(void (*)(void))eager_tensor_method_set_value, + {"_set_value", (PyCFunction)(void (*)(void))tensor_method_set_value, METH_VARARGS | METH_KEYWORDS, NULL}, {NULL, NULL, 0, NULL}}; diff --git a/paddle/fluid/pybind/eager_op_function_generator.cc b/paddle/fluid/pybind/eager_op_function_generator.cc index cd3617287d326fd45eb1386096d72d750a021e8f..8fea463baae5276d0c80a24057466b72ff32731b 100644 --- a/paddle/fluid/pybind/eager_op_function_generator.cc +++ b/paddle/fluid/pybind/eager_op_function_generator.cc @@ -79,10 +79,10 @@ const char* CAST_VAR_LIST_TEMPLATE = R"( auto %s = GetTensorListFromArgs("%s", "%s", args, %d, %s);)"; const char* CAST_VAR_PTR_TEMPLATE = R"( - auto %s = GetEagerTensorPtrFromArgs("%s", "%s", args, %d, %s);)"; + auto %s = GetTensorPtrFromArgs("%s", "%s", args, %d, %s);)"; const char* CAST_VAR_PTR_LIST_TEMPLATE = R"( - auto %s = GetEagerTensorPtrListFromArgs("%s", "%s", args, %d, %s);)"; + auto %s = GetTensorPtrListFromArgs("%s", "%s", args, %d, %s);)"; const char* CAST_SIZE_T_TEMPLATE = R"( auto %s = GetUnsignedLongFromArgs("%s", "%s", args, %d, %s);)"; diff --git a/paddle/fluid/pybind/eager_properties.cc b/paddle/fluid/pybind/eager_properties.cc index 942df3f69dac04fc91c524c0a3bb85bdad552dd0..fb1dc4d26b5ff8dbc88754984ab643e0b194b941 100644 --- a/paddle/fluid/pybind/eager_properties.cc +++ b/paddle/fluid/pybind/eager_properties.cc @@ -35,14 +35,14 @@ namespace pybind { extern PyTypeObject* p_tensor_type; -PyObject* eager_tensor_properties_get_name(TensorObject* self, void* closure) { - EAGER_SYNC_TRY +PyObject* tensor_properties_get_name(TensorObject* self, void* closure) { + EAGER_TRY return ToPyObject(self->tensor.name()); EAGER_CATCH_AND_THROW_RETURN_NULL } -PyObject* eager_tensor_properties_get_type(TensorObject* self, void* closure) { - EAGER_SYNC_TRY +PyObject* tensor_properties_get_type(TensorObject* self, void* closure) { + EAGER_TRY if (self->tensor.is_dense_tensor()) { return ToPyObject(paddle::framework::proto::VarType::LOD_TENSOR); } else { @@ -52,24 +52,24 @@ PyObject* eager_tensor_properties_get_type(TensorObject* self, void* closure) { EAGER_CATCH_AND_THROW_RETURN_NULL } -int eager_tensor_properties_set_name(TensorObject* self, PyObject* value, - void* closure) { - EAGER_SYNC_TRY +int tensor_properties_set_name(TensorObject* self, PyObject* value, + void* closure) { + EAGER_TRY self->tensor.set_name(CastPyArg2AttrString(value, 0)); return 0; EAGER_CATCH_AND_THROW_RETURN_ZERO } -PyObject* eager_tensor_properties_get_stop_gradient(TensorObject* self, - void* closure) { - EAGER_SYNC_TRY +PyObject* tensor_properties_get_stop_gradient(TensorObject* self, + void* closure) { + EAGER_TRY auto meta = egr::EagerUtils::autograd_meta(&self->tensor); return ToPyObject(meta->StopGradient()); EAGER_CATCH_AND_THROW_RETURN_NULL } -PyObject* eager_tensor_properties_get_grad(TensorObject* self, void* closure) { - EAGER_SYNC_TRY +PyObject* tensor_properties_get_grad(TensorObject* self, void* closure) { + EAGER_TRY if (egr::egr_utils_api::IsLeafTensor(self->tensor)) { std::shared_ptr grad_node = egr::EagerUtils::grad_node(self->tensor); @@ -94,9 +94,9 @@ PyObject* eager_tensor_properties_get_grad(TensorObject* self, void* closure) { EAGER_CATCH_AND_THROW_RETURN_NULL } -int eager_tensor_properties_set_grad(TensorObject* self, PyObject* value, - void* closure) { - EAGER_SYNC_TRY +int tensor_properties_set_grad(TensorObject* self, PyObject* value, + void* closure) { + EAGER_TRY auto src = CastPyArg2Tensor(value, 0); PADDLE_ENFORCE( egr::egr_utils_api::IsLeafTensor(self->tensor), @@ -115,34 +115,33 @@ int eager_tensor_properties_set_grad(TensorObject* self, PyObject* value, EAGER_CATCH_AND_THROW_RETURN_ZERO } -int eager_tensor_properties_set_stop_gradient(TensorObject* self, - PyObject* value, void* closure) { - EAGER_SYNC_TRY +int tensor_properties_set_stop_gradient(TensorObject* self, PyObject* value, + void* closure) { + EAGER_TRY auto meta = egr::EagerUtils::autograd_meta(&self->tensor); meta->SetStopGradient(CastPyArg2AttrBoolean(value, 0)); return 0; EAGER_CATCH_AND_THROW_RETURN_ZERO } -PyObject* eager_tensor_properties_get_persistable(TensorObject* self, - void* closure) { - EAGER_SYNC_TRY +PyObject* tensor_properties_get_persistable(TensorObject* self, void* closure) { + EAGER_TRY auto meta = egr::EagerUtils::autograd_meta(&self->tensor); return ToPyObject(meta->Persistable()); EAGER_CATCH_AND_THROW_RETURN_NULL } -int eager_tensor_properties_set_persistable(TensorObject* self, PyObject* value, - void* closure) { - EAGER_SYNC_TRY +int tensor_properties_set_persistable(TensorObject* self, PyObject* value, + void* closure) { + EAGER_TRY auto meta = egr::EagerUtils::autograd_meta(&self->tensor); meta->SetPersistable(CastPyArg2AttrBoolean(value, 0)); return 0; EAGER_CATCH_AND_THROW_RETURN_ZERO } -PyObject* eager_tensor_properties_get_shape(TensorObject* self, void* closure) { - EAGER_SYNC_TRY +PyObject* tensor_properties_get_shape(TensorObject* self, void* closure) { + EAGER_TRY auto ddim = self->tensor.shape(); std::vector value; size_t rank = static_cast(ddim.size()); @@ -155,50 +154,45 @@ PyObject* eager_tensor_properties_get_shape(TensorObject* self, void* closure) { EAGER_CATCH_AND_THROW_RETURN_NULL } -PyObject* eager_tensor_properties_get_place(TensorObject* self, void* closure) { - EAGER_SYNC_TRY +PyObject* tensor_properties_get_place(TensorObject* self, void* closure) { + EAGER_TRY return ToPyObject(self->tensor.inner_place()); EAGER_CATCH_AND_THROW_RETURN_NULL } -PyObject* eager_tensor_properties_get_place_str(TensorObject* self, - void* closure) { - EAGER_SYNC_TRY +PyObject* tensor_properties_get_place_str(TensorObject* self, void* closure) { + EAGER_TRY std::stringstream ostr; ostr << self->tensor.inner_place(); return ToPyObject(ostr.str()); EAGER_CATCH_AND_THROW_RETURN_NULL } -PyObject* eager_tensor_properties_get_dtype(TensorObject* self, void* closure) { - EAGER_SYNC_TRY +PyObject* tensor_properties_get_dtype(TensorObject* self, void* closure) { + EAGER_TRY return ToPyObject( paddle::framework::TransToProtoVarType(self->tensor.type())); EAGER_CATCH_AND_THROW_RETURN_NULL } struct PyGetSetDef variable_properties[] = { - {"grad", (getter)eager_tensor_properties_get_grad, - (setter)eager_tensor_properties_set_grad, nullptr, nullptr}, - {"name", (getter)eager_tensor_properties_get_name, - (setter)eager_tensor_properties_set_name, nullptr, nullptr}, - {"stop_gradient", (getter)eager_tensor_properties_get_stop_gradient, - (setter)eager_tensor_properties_set_stop_gradient, nullptr, nullptr}, - {"persistable", (getter)eager_tensor_properties_get_persistable, - (setter)eager_tensor_properties_set_persistable, nullptr, nullptr}, - {"shape", (getter)eager_tensor_properties_get_shape, nullptr, nullptr, - nullptr}, - // {"is_leaf", (getter)eager_tensor_properties_get_is_leaf, nullptr, + {"grad", (getter)tensor_properties_get_grad, + (setter)tensor_properties_set_grad, nullptr, nullptr}, + {"name", (getter)tensor_properties_get_name, + (setter)tensor_properties_set_name, nullptr, nullptr}, + {"stop_gradient", (getter)tensor_properties_get_stop_gradient, + (setter)tensor_properties_set_stop_gradient, nullptr, nullptr}, + {"persistable", (getter)tensor_properties_get_persistable, + (setter)tensor_properties_set_persistable, nullptr, nullptr}, + {"shape", (getter)tensor_properties_get_shape, nullptr, nullptr, nullptr}, + // {"is_leaf", (getter)tensor_properties_get_is_leaf, nullptr, // nullptr, // nullptr}, - {"place", (getter)eager_tensor_properties_get_place, nullptr, nullptr, - nullptr}, - {"_place_str", (getter)eager_tensor_properties_get_place_str, nullptr, - nullptr, nullptr}, - {"dtype", (getter)eager_tensor_properties_get_dtype, nullptr, nullptr, - nullptr}, - {"type", (getter)eager_tensor_properties_get_type, nullptr, nullptr, + {"place", (getter)tensor_properties_get_place, nullptr, nullptr, nullptr}, + {"_place_str", (getter)tensor_properties_get_place_str, nullptr, nullptr, nullptr}, + {"dtype", (getter)tensor_properties_get_dtype, nullptr, nullptr, nullptr}, + {"type", (getter)tensor_properties_get_type, nullptr, nullptr, nullptr}, {nullptr, nullptr, nullptr, nullptr, nullptr}}; } // namespace pybind diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index 85a39710564bc8c1b56a76035f7b2c56628ecf95..dd882ab6d970aa0572e69706ee3e90b539bf7951 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -179,7 +179,7 @@ paddle::experimental::Tensor CastPyArg2Tensor(PyObject* obj, ssize_t arg_pos) { } else { PADDLE_THROW(platform::errors::InvalidArgument( "argument (position %d) must be " - "EagerTensor, but got %s", + "EagerVariable, but got %s", arg_pos + 1, reinterpret_cast(obj->ob_type)->tp_name)); } } @@ -309,7 +309,7 @@ framework::Tensor CastPyArg2FrameworkTensor(PyObject* obj, ssize_t arg_pos) { } else { PADDLE_THROW(platform::errors::InvalidArgument( "argument (position %d) must be " - "EagerTensor, but got %s", + "EagerVariable, but got %s", arg_pos + 1, reinterpret_cast(obj->ob_type)->tp_name)); } } @@ -597,6 +597,7 @@ std::vector GetTensorListFromArgs( if (PyList_Check(list)) { Py_ssize_t len = PyList_Size(list); + result.reserve(static_cast(len)); if (len == 0) { PADDLE_THROW(platform::errors::InvalidArgument( "%s(): argument '%s' (position %d) must be list of Tensors, but got " @@ -609,6 +610,7 @@ std::vector GetTensorListFromArgs( } } else if (PyTuple_Check(list)) { Py_ssize_t len = PyTuple_Size(list); + result.reserve(static_cast(len)); if (len == 0) { PADDLE_THROW(platform::errors::InvalidArgument( "%s(): argument '%s' (position %d) must be list of Tensors, but got " @@ -632,9 +634,11 @@ std::vector GetTensorListFromArgs( return result; } -paddle::experimental::Tensor* GetEagerTensorPtrFromArgs( - const std::string& op_type, const std::string& arg_name, PyObject* args, - ssize_t arg_idx, bool dispensable) { +paddle::experimental::Tensor* GetTensorPtrFromArgs(const std::string& op_type, + const std::string& arg_name, + PyObject* args, + ssize_t arg_idx, + bool dispensable) { PyObject* obj = PyTuple_GET_ITEM(args, arg_idx); if (PyTuple_Check(obj)) { @@ -654,7 +658,7 @@ paddle::experimental::Tensor* GetEagerTensorPtrFromArgs( return &(reinterpret_cast(obj)->tensor); } -std::vector GetEagerTensorPtrListFromArgs( +std::vector GetTensorPtrListFromArgs( const std::string& op_type, const std::string& arg_name, PyObject* args, ssize_t arg_idx, bool dispensable) { PyObject* list = PyTuple_GET_ITEM(args, arg_idx); diff --git a/paddle/fluid/pybind/eager_utils.h b/paddle/fluid/pybind/eager_utils.h index ead9f474f675b8e1f5b6949ff59a8f185839cb43..f2429768fa998bef97ca772004fa4b30d76d026d 100644 --- a/paddle/fluid/pybind/eager_utils.h +++ b/paddle/fluid/pybind/eager_utils.h @@ -65,15 +65,15 @@ PyObject* ToPyObject( const std::unordered_map>& value); template -struct TupleEagerTensorResult { +struct TupleTensorResult { static void Run(const Tuple& out, PyObject* result) { - TupleEagerTensorResult::Run(out, result); + TupleTensorResult::Run(out, result); PyTuple_SET_ITEM(result, N - 1, ToPyObject(std::get(out))); } }; template -struct TupleEagerTensorResult { +struct TupleTensorResult { static void Run(const Tuple& out, PyObject* result) { PyTuple_SET_ITEM(result, 0, ToPyObject(std::get<0>(out))); } @@ -84,7 +84,7 @@ PyObject* ToPyObject(const std::tuple& out) { auto len = sizeof...(Args); PyObject* result = PyTuple_New(len); - TupleEagerTensorResult::Run(out, result); + TupleTensorResult::Run(out, result); return result; } @@ -97,10 +97,12 @@ std::vector GetTensorListFromArgs( const std::string& op_type, const std::string& arg_name, PyObject* args, ssize_t arg_idx, bool dispensable = false); -paddle::experimental::Tensor* GetEagerTensorPtrFromArgs( - const std::string& op_type, const std::string& arg_name, PyObject* args, - ssize_t arg_idx, bool dispensable = false); -std::vector GetEagerTensorPtrListFromArgs( +paddle::experimental::Tensor* GetTensorPtrFromArgs(const std::string& op_type, + const std::string& arg_name, + PyObject* args, + ssize_t arg_idx, + bool dispensable = false); +std::vector GetTensorPtrListFromArgs( const std::string& op_type, const std::string& arg_name, PyObject* args, ssize_t arg_idx, bool dispensable = false); diff --git a/paddle/fluid/pybind/exception.h b/paddle/fluid/pybind/exception.h index 7e44841e670939ef00d010c0c1fadaccd501f6ca..cf82f464a11f292b8ba09dc4cdba4eb3db6e1d96 100644 --- a/paddle/fluid/pybind/exception.h +++ b/paddle/fluid/pybind/exception.h @@ -19,7 +19,6 @@ limitations under the License. */ #include "pybind11/pybind11.h" #define EAGER_TRY try { -#define EAGER_SYNC_TRY try { #define EAGER_CATCH_AND_THROW_RETURN_NULL \ } \ catch (...) { \ diff --git a/paddle/pten/api/include/tensor.h b/paddle/pten/api/include/tensor.h index 900de42bbac9577f25f625d4643ef7734ece9f12..1872fcc0da4d72a569083f967ed94320606ed64c 100644 --- a/paddle/pten/api/include/tensor.h +++ b/paddle/pten/api/include/tensor.h @@ -222,6 +222,14 @@ class PADDLE_API Tensor final { */ bool is_dense_tensor() const; + /** + * @brief Determine whether tensor is SelectedRows + * + * @return true + * @return false + */ + bool is_selected_rows() const; + /* Part 3: Device and Backend methods */ /** diff --git a/paddle/pten/api/lib/tensor.cc b/paddle/pten/api/lib/tensor.cc index 6fb0d2706ca90267cc2e06a06ba9b570f275da2c..40f35896323b98543364428c99b20d03571dbbd7 100644 --- a/paddle/pten/api/lib/tensor.cc +++ b/paddle/pten/api/lib/tensor.cc @@ -29,7 +29,6 @@ limitations under the License. */ #include "paddle/pten/core/tensor_base.h" #include "paddle/pten/core/tensor_meta.h" #include "paddle/pten/core/tensor_utils.h" - /** * [ Why still include the fluid headers? ] * @@ -133,7 +132,9 @@ DataLayout Tensor::layout() const { return impl_->layout(); } bool Tensor::is_dense_tensor() const { return pten::DenseTensor::classof(impl_.get()); } - +bool Tensor::is_selected_rows() const { + return pten::SelectedRows::classof(impl_.get()); +} /* Part 3: Device and Backend methods */ PlaceType Tensor::place() const { diff --git a/paddle/pten/core/compat/type_defs.h b/paddle/pten/core/compat/type_defs.h index eb5459b1b6ea723d7118a2a05addc1988987efcc..c9d7d5bb54b620ceeac55de21a28e2440a15186b 100644 --- a/paddle/pten/core/compat/type_defs.h +++ b/paddle/pten/core/compat/type_defs.h @@ -24,7 +24,7 @@ limitations under the License. */ #include namespace egr { -class EagerTensor; +class EagerVariable; } namespace paddle { namespace framework { @@ -76,9 +76,9 @@ struct NameVarMapTrait { }; template <> -struct NameVarMapTrait { +struct NameVarMapTrait { using Type = - std::map>>; + std::map>>; }; } // namespace details @@ -88,7 +88,7 @@ using NameVarMap = typename details::NameVarMapTrait::Type; using NameVarBaseMap = NameVarMap; using NameVariableWrapperMap = NameVarMap; -using NameTensorMap = NameVarMap; +using NameTensorMap = NameVarMap; using VariableWrapperList = std::vector>; diff --git a/paddle/pten/core/selected_rows.h b/paddle/pten/core/selected_rows.h index 4a05d7ed3153f1e20926bb95eaac5d2c3b5ca5db..8250179b7a28b25a673f84c235c6d0c3eeb3043c 100644 --- a/paddle/pten/core/selected_rows.h +++ b/paddle/pten/core/selected_rows.h @@ -29,10 +29,6 @@ limitations under the License. */ // See Note [ Why still include the fluid headers? ] #include "paddle/fluid/framework/mixed_vector.h" - -namespace egr { -class EagerTensor; -} // namespace egr namespace pten { class SelectedRows : public TensorBase, public TypeInfoTraits { @@ -199,39 +195,6 @@ class SelectedRows : public TensorBase, std::unique_ptr value_{nullptr}; int64_t height_; // height indicates the underline tensor's height std::unique_ptr rwlock_{nullptr}; - // TODO(jiabin): Remove this when we don't need EagerTensor support - // SelectedRows which is expected in next version. - /** Why we need this weird friend class? - * In eager mode, since some of ops doesn't support C++ API for now we need to - *use 'imperative::TraceOp' to run it. - * So, we need to support get a SelectedRows from egr::EagerTensor's - *framework::Variable obj and used it to reconstruct - * a new paddle::experimental::Tensor to support framework usage. However, we - *got 2 problems here. - * First, we got 2 unique_ptr in SelectedRows so that we can't support - *std::make_shared in EagerTensor's SetImplWithSelectedRows method, - * since we have to construct a shared_ptr for paddle::experimental::Tensor's - *impl. - * Second, when we are trying to support move constructor for SelectedRows we - *found that we can't get its rvalue from - * framework::Variable because it holds an obj of target type. - * - * - * The only three way to solve this problem is: - * 1. Just like what we have done, using friend class and just copy/move each - *member. In this way, we can avoid additional API - * and symbols. - * 2. Make pten::SelectedRows's member from unique_ptr to shared_ptr. However, - *this may cause some cost of performance. - * 3. Add some api to return or move member of framework::SelectedRows. - *However, it's not as safe as first solution. - * 4. Support all framework::SelectedRows related ops and make sure - *EagerTensor never holds framework::SelectedRows. - * - * If anyone got better ideas, welcome to contact JiabinYang, we are open for - *your help. - **/ - friend class egr::EagerTensor; }; } // namespace pten diff --git a/python/paddle/fluid/data_feeder.py b/python/paddle/fluid/data_feeder.py index 26371d0d6ee7353f5660e55a6e381a177f378fd9..3bcefc41d2e781aa904f7ab581af3d72bc97b0d9 100644 --- a/python/paddle/fluid/data_feeder.py +++ b/python/paddle/fluid/data_feeder.py @@ -104,14 +104,14 @@ def check_type(input, input_name, expected_type, op_name, extra_message=''): expected_type += (core.VarBase, ) # TODO(jiabin): uncomment it when we support declarative mode in eager # if _in_eager_mode(): - # expected_type += (core.eager.EagerTensor, ) + # expected_type += (core.eager.Tensor, ) elif isinstance(input, core.VarBase): raise TypeError( "Please use `with fluid.dygraph.guard()` as context or `fluid.enable_dygraph()` to switch to imperative mode firstly. " "Because received '{}' in {} is a imperative Variable.".format( input_name, op_name)) elif hasattr(core, "eager"): - if isinstance(input, core.eager.EagerTensor): + if isinstance(input, core.eager.Tensor): raise TypeError( "Please use `with fluid.dygraph.guard()` as context or `fluid.enable_dygraph()` to switch to imperative mode firstly. " "Because received '{}' in {} is a imperative Variable.".format( diff --git a/python/paddle/fluid/dataloader/dataloader_iter.py b/python/paddle/fluid/dataloader/dataloader_iter.py index f4ccd033aa5fc41f67d63802bc1abdc6722adb3a..706ec0d523b938fda0501dfd04f1fc976bf6a26b 100644 --- a/python/paddle/fluid/dataloader/dataloader_iter.py +++ b/python/paddle/fluid/dataloader/dataloader_iter.py @@ -253,7 +253,7 @@ class _DataLoaderIterSingleProcess(_DataLoaderIterBase): try: if in_dygraph_mode(): if _in_eager_mode(): - data = core.eager.read_next_eager_tensor_list( + data = core.eager.read_next_tensor_list( self._reader.read_next_list()[0]) else: data = self._reader.read_next_var_list() @@ -449,7 +449,7 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase): while self._blocking_queue.size() >= len(self._places): if in_dygraph_mode(): if _in_eager_mode(): - data = core.eager.read_next_eager_tensor_list( + data = core.eager.read_next_tensor_list( self._reader.read_next_list()[0]) else: self._reader.read_next_var_list() @@ -705,7 +705,7 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase): if in_dygraph_mode(): if _in_eager_mode(): - data = core.eager.read_next_eager_tensor_list( + data = core.eager.read_next_tensor_list( self._reader.read_next_list()[0]) else: data = self._reader.read_next_var_list() diff --git a/python/paddle/fluid/dygraph/base.py b/python/paddle/fluid/dygraph/base.py index 9234577b8cc23a6bd2ed8986dfdcce0d21eeb3b3..8c2ff140ea4d5531a0ab6e284b1661573d9a2670 100644 --- a/python/paddle/fluid/dygraph/base.py +++ b/python/paddle/fluid/dygraph/base.py @@ -721,10 +721,9 @@ def to_variable(value, name=None, zero_copy=None, dtype=None): value = value.astype(dtype) if _in_eager_mode(): - return core.eager.EagerTensor(value, - framework._current_expected_place(), - False, zero_copy, name - if name else None, True) + return core.eager.Tensor(value, + framework._current_expected_place(), False, + zero_copy, name if name else None, True) else: py_var = core.VarBase( value=value, diff --git a/python/paddle/fluid/dygraph/math_op_patch.py b/python/paddle/fluid/dygraph/math_op_patch.py index 64c418fabb11f6a82ca328aa74ac540480477fba..d93791a1f083a56f2f9f7b8d1c09e675c490e9e8 100644 --- a/python/paddle/fluid/dygraph/math_op_patch.py +++ b/python/paddle/fluid/dygraph/math_op_patch.py @@ -222,7 +222,7 @@ def monkey_patch_math_varbase(): # 2. create varbase for scalar lhs_dtype = self.dtype if _in_eager_mode(): - other_var_should_be = core.eager.EagerTensor + other_var_should_be = core.eager.Tensor else: other_var_should_be = core.VarBase if not isinstance(other_var, other_var_should_be): @@ -343,7 +343,7 @@ def monkey_patch_math_varbase(): if core._in_eager_mode(): local_already_patch = _already_patch_eager_tensor _already_patch_eager_tensor = True - local_tensor = core.eager.EagerTensor + local_tensor = core.eager.Tensor else: local_already_patch = _already_patch_varbase _already_patch_varbase = True diff --git a/python/paddle/fluid/dygraph/varbase_patch_methods.py b/python/paddle/fluid/dygraph/varbase_patch_methods.py index f5d569828775e6bcc90ffecb3d820696bf0e56c0..6f0305f4774d6429951ee69a5b3a9db1bed18131 100644 --- a/python/paddle/fluid/dygraph/varbase_patch_methods.py +++ b/python/paddle/fluid/dygraph/varbase_patch_methods.py @@ -150,7 +150,7 @@ def monkey_patch_varbase(): """ if core._in_eager_mode(): - base_tensor = core.eager.EagerTensor + base_tensor = core.eager.Tensor else: base_tensor = core.VarBase assert isinstance(value, (np.ndarray, base_tensor, dict, str)), \ @@ -180,9 +180,9 @@ def monkey_patch_varbase(): "Variable dtype not match, Variable [ {} ] need tensor with dtype {} but load tensor with dtype {}".format( self.name, self_tensor_np.dtype, value_np.dtype) - # NOTE(wuweilong): self could be VarBase or EagerTensor, the subsequent behavior are defined in different files + # NOTE(wuweilong): self could be VarBase or Tensor, the subsequent behavior are defined in different files # if self is VarBase, method value() return Variable that bindded in imperative.cc, get_tensor() bindded in pybind.cc - # if self is EagerTensor, method value() return self that defined in this file, get_tensor() defined in eager_method.cc + # if self is Tensor, method value() return self that defined in this file, get_tensor() defined in eager_method.cc # this Interface behavior will be unifed in the future. self.value().get_tensor().set(value_np, framework._current_expected_place()) @@ -244,8 +244,8 @@ def monkey_patch_varbase(): if grad_tensor is not None: if core._in_eager_mode(): assert isinstance( - grad_tensor, core.eager.EagerTensor - ), "The type of grad_tensor must be paddle.Tensor" + grad_tensor, core.eager. + Tensor), "The type of grad_tensor must be paddle.Tensor" else: assert isinstance( grad_tensor, paddle. @@ -592,8 +592,8 @@ def monkey_patch_varbase(): # [0.79010487, 0.53972793, 0.09495186, 0.44267157, 0.72112119]]) """ if core._in_eager_mode(): - from paddle.tensor.to_string import eager_tensor_to_string - return eager_tensor_to_string(self) + from paddle.tensor.to_string import tensor_to_string + return tensor_to_string(self) else: from paddle.tensor.to_string import to_string return to_string(self) @@ -624,7 +624,7 @@ def monkey_patch_varbase(): "Only Leaf Tensor support the deepcopy at the moment, non-Leaf Tensors contains graph information that does't support deepcopy" ) if core._in_eager_mode(): - new_varbase = core.eager.EagerTensor() + new_varbase = core.eager.Tensor() else: new_varbase = core.VarBase() new_varbase.name = self.name + unique_name.generate("_deepcopy") @@ -808,16 +808,16 @@ def monkey_patch_varbase(): ("__getitem__", __getitem__), ("item", item), ("__setitem__", __setitem__), ("_to", _to)): if core._in_eager_mode(): - setattr(core.eager.EagerTensor, method_name, method) + setattr(core.eager.Tensor, method_name, method) else: setattr(core.VarBase, method_name, method) if core._in_eager_mode(): - setattr(core.eager.EagerTensor, "_grad_ivar", _grad_ivar) - setattr(core.eager.EagerTensor, "_set_grad_ivar", _set_grad_ivar) - setattr(core.eager.EagerTensor, "clear_gradient", clear_gradient) - setattr(core.eager.EagerTensor, "clone", clone) - setattr(core.eager.EagerTensor, "value", value) + setattr(core.eager.Tensor, "_grad_ivar", _grad_ivar) + setattr(core.eager.Tensor, "_set_grad_ivar", _set_grad_ivar) + setattr(core.eager.Tensor, "clear_gradient", clear_gradient) + setattr(core.eager.Tensor, "clone", clone) + setattr(core.eager.Tensor, "value", value) else: setattr(core.VarBase, "__name__", "Tensor") setattr(core.VarBase, "grad", grad) diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index b8854dfd2ad551d2fcb30fe8c7a490a7377f00dd..780b8acc4fde67f4b47589869b258dd99a022125 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -1057,7 +1057,7 @@ def _varbase_creator(type=core.VarDesc.VarType.LOD_TENSOR, dtype = convert_np_dtype_to_dtype_(dtype) if _in_eager_mode(): - eager_tensor = core.eager.EagerTensor( + eager_tensor = core.eager.Tensor( dtype if dtype else core.VarDesc.VarType.FP32, list(shape) if shape else [], name, type if type else core.VarDesc.VarType.LOD_TENSOR, True @@ -1076,7 +1076,7 @@ class VariableMetaClass(type): t = type(instance) if in_dygraph_mode(): if _in_eager_mode(): - return issubclass(t, core.eager.EagerTensor) + return issubclass(t, core.eager.Tensor) return issubclass(t, core.VarBase) else: return issubclass(t, Variable) @@ -6412,7 +6412,7 @@ class ParamBase(core.VarBase): if hasattr(core, "eager"): - _core_eager_eagertensor = core.eager.EagerTensor + _core_eager_eagertensor = core.eager.Tensor else: _core_eager_eagertensor = object diff --git a/python/paddle/fluid/layer_helper_base.py b/python/paddle/fluid/layer_helper_base.py index 67fcd901dedc964eedad2e1720a44cfa01037574..9f54a3547d39547e3d5540981d05d862573ea214 100644 --- a/python/paddle/fluid/layer_helper_base.py +++ b/python/paddle/fluid/layer_helper_base.py @@ -85,10 +85,9 @@ class LayerHelperBase(object): assert in_dygraph_mode( ), "to_variable could only be called in dygraph mode" if _in_eager_mode(): - return core.eager.EagerTensor(value, - _current_expected_place(), False, - False, name - if name else None, True) + return core.eager.Tensor(value, + _current_expected_place(), False, + False, name if name else None, True) else: py_var = core.VarBase( value=value, diff --git a/python/paddle/fluid/reader.py b/python/paddle/fluid/reader.py index dde39b2dfdb6866df3bd92bba5f0c223c0a1a243..727ceca72d1f1cfc0c34dae4e516568052136ba4 100644 --- a/python/paddle/fluid/reader.py +++ b/python/paddle/fluid/reader.py @@ -972,7 +972,7 @@ class DygraphGeneratorLoader(DataLoaderBase): def __next__(self): try: if _in_eager_mode(): - return core.eager.read_next_eager_tensor_list( + return core.eager.read_next_tensor_list( self._reader.read_next_list()[0]) else: return self._reader.read_next_var_list() diff --git a/python/paddle/fluid/tests/unittests/test_egr_python_api.py b/python/paddle/fluid/tests/unittests/test_egr_python_api.py index d6bf768bee7744524d33082b2cda81ea4870e534..252482fa6d270edbc1bec3a0d6023933521d7f7e 100644 --- a/python/paddle/fluid/tests/unittests/test_egr_python_api.py +++ b/python/paddle/fluid/tests/unittests/test_egr_python_api.py @@ -109,26 +109,26 @@ class EagerDtypeTestCase(unittest.TestCase): core.VarDesc.VarType.COMPLEX128) -class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): +class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): def constructor(self, place): - egr_tensor = core.eager.EagerTensor() + egr_tensor = core.eager.Tensor() self.assertEqual(egr_tensor.persistable, False) self.assertTrue("generated" in egr_tensor.name) self.assertEqual(egr_tensor.shape, []) self.assertEqual(egr_tensor.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor.stop_gradient, True) - egr_tensor0 = core.eager.EagerTensor( - core.VarDesc.VarType.FP32, [4, 16, 16, 32], "test_eager_tensor", - core.VarDesc.VarType.LOD_TENSOR, True) + egr_tensor0 = core.eager.Tensor(core.VarDesc.VarType.FP32, + [4, 16, 16, 32], "test_eager_tensor", + core.VarDesc.VarType.LOD_TENSOR, True) self.assertEqual(egr_tensor0.persistable, True) self.assertEqual(egr_tensor0.name, "test_eager_tensor") self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32]) self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP32) arr0 = np.random.rand(4, 16, 16, 32).astype('float32') - egr_tensor1 = core.eager.EagerTensor(arr0, place, True, False, - "numpy_tensor1", False) + egr_tensor1 = core.eager.Tensor(arr0, place, True, False, + "numpy_tensor1", False) self.assertEqual(egr_tensor1.persistable, True) self.assertEqual(egr_tensor1.name, "numpy_tensor1") self.assertEqual(egr_tensor1.shape, [4, 16, 16, 32]) @@ -138,8 +138,8 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): self.assertTrue(np.array_equal(egr_tensor1.numpy(), arr0)) arr1 = np.random.randint(100, size=(4, 16, 16, 32), dtype=np.int64) - egr_tensor2 = core.eager.EagerTensor(arr1, place, False, True, - "numpy_tensor2", True) + egr_tensor2 = core.eager.Tensor(arr1, place, False, True, + "numpy_tensor2", True) self.assertEqual(egr_tensor2.persistable, False) self.assertEqual(egr_tensor2.name, "numpy_tensor2") self.assertEqual(egr_tensor2.shape, [4, 16, 16, 32]) @@ -149,7 +149,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): self.assertTrue(np.array_equal(egr_tensor2.numpy(), arr1)) arr2 = np.random.rand(4, 16, 16, 32, 64).astype('float32') - egr_tensor3 = core.eager.EagerTensor(arr2) + egr_tensor3 = core.eager.Tensor(arr2) self.assertEqual(egr_tensor3.persistable, False) self.assertTrue("generated_tensor" in egr_tensor3.name) self.assertEqual(egr_tensor3.shape, [4, 16, 16, 32, 64]) @@ -161,7 +161,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): self.assertTrue(np.array_equal(egr_tensor3.numpy(), arr2)) egr_tensor3.stop_gradient = False - egr_tensor4 = core.eager.EagerTensor(egr_tensor3) + egr_tensor4 = core.eager.Tensor(egr_tensor3) self.assertEqual(egr_tensor4.persistable, False) self.assertTrue("generated_tensor" in egr_tensor4.name) self.assertEqual(egr_tensor4.shape, egr_tensor3.shape) @@ -174,7 +174,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): np.array_equal(egr_tensor4.numpy(), egr_tensor3.numpy())) arr4 = np.random.rand(4, 16, 16, 32).astype('float32') - egr_tensor5 = core.eager.EagerTensor(arr4, place) + egr_tensor5 = core.eager.Tensor(arr4, place) self.assertEqual(egr_tensor5.persistable, False) self.assertTrue("generated_tensor" in egr_tensor5.name) self.assertEqual(egr_tensor5.shape, [4, 16, 16, 32]) @@ -183,7 +183,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): self.assertTrue(egr_tensor5.place._equals(place)) self.assertTrue(np.array_equal(egr_tensor5.numpy(), arr4)) - egr_tensor6 = core.eager.EagerTensor(egr_tensor5, core.CPUPlace()) + egr_tensor6 = core.eager.Tensor(egr_tensor5, core.CPUPlace()) self.assertEqual(egr_tensor6.persistable, False) self.assertTrue("generated_tensor" in egr_tensor6.name) self.assertEqual(egr_tensor6.shape, [4, 16, 16, 32]) @@ -193,7 +193,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): self.assertTrue( np.array_equal(egr_tensor6.numpy(), egr_tensor5.numpy())) - egr_tensor7 = core.eager.EagerTensor(arr4, place, True) + egr_tensor7 = core.eager.Tensor(arr4, place, True) self.assertEqual(egr_tensor7.persistable, True) self.assertTrue("generated_tensor" in egr_tensor7.name) self.assertEqual(egr_tensor7.shape, [4, 16, 16, 32]) @@ -202,7 +202,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): self.assertTrue(egr_tensor7.place._equals(place)) self.assertTrue(np.array_equal(egr_tensor7.numpy(), arr4)) - egr_tensor8 = core.eager.EagerTensor(egr_tensor6, place, "egr_tensor8") + egr_tensor8 = core.eager.Tensor(egr_tensor6, place, "egr_tensor8") self.assertEqual(egr_tensor8.persistable, False) self.assertEqual(egr_tensor8.name, "egr_tensor8") self.assertEqual(egr_tensor8.shape, [4, 16, 16, 32]) @@ -212,7 +212,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): self.assertTrue( np.array_equal(egr_tensor8.numpy(), egr_tensor5.numpy())) - egr_tensor9 = core.eager.EagerTensor(arr4, place, True, True) + egr_tensor9 = core.eager.Tensor(arr4, place, True, True) self.assertEqual(egr_tensor9.persistable, True) self.assertTrue("generated_tensor" in egr_tensor9.name) self.assertEqual(egr_tensor9.shape, [4, 16, 16, 32]) @@ -224,7 +224,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): x = np.random.rand(3, 3).astype('float32') t = paddle.fluid.Tensor() t.set(x, paddle.fluid.CPUPlace()) - egr_tensor10 = core.eager.EagerTensor(t, place) + egr_tensor10 = core.eager.Tensor(t, place) self.assertEqual(egr_tensor10.persistable, False) self.assertTrue("generated_tensor" in egr_tensor10.name) self.assertEqual(egr_tensor10.shape, [3, 3]) @@ -233,7 +233,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): self.assertTrue(egr_tensor10.place._equals(place)) self.assertTrue(np.array_equal(egr_tensor10.numpy(), x)) - egr_tensor11 = core.eager.EagerTensor(t, place, "framework_constructed") + egr_tensor11 = core.eager.Tensor(t, place, "framework_constructed") self.assertEqual(egr_tensor11.persistable, False) self.assertTrue("framework_constructed" in egr_tensor11.name) self.assertEqual(egr_tensor11.shape, [3, 3]) @@ -242,7 +242,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): self.assertTrue(egr_tensor11.place._equals(place)) self.assertTrue(np.array_equal(egr_tensor11.numpy(), x)) - egr_tensor12 = core.eager.EagerTensor(t) + egr_tensor12 = core.eager.Tensor(t) self.assertEqual(egr_tensor12.persistable, False) self.assertTrue("generated_tensor" in egr_tensor12.name) self.assertEqual(egr_tensor12.shape, [3, 3]) @@ -290,10 +290,10 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): self.constructor(p) def constructor_with_kwargs(self, place): - # init EagerTensor by Python array + # init Tensor by Python array arr = np.random.rand(4, 16, 16, 32).astype('float32') - egr_tensor0 = core.eager.EagerTensor(value=arr) + egr_tensor0 = core.eager.Tensor(value=arr) self.assertEqual(egr_tensor0.persistable, False) self.assertTrue("generated" in egr_tensor0.name) self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32]) @@ -303,7 +303,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor0.stop_gradient, True) - egr_tensor1 = core.eager.EagerTensor(value=arr, place=place) + egr_tensor1 = core.eager.Tensor(value=arr, place=place) self.assertEqual(egr_tensor1.persistable, False) self.assertTrue("generated" in egr_tensor1.name) self.assertEqual(egr_tensor1.shape, [4, 16, 16, 32]) @@ -311,7 +311,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor1.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor1.stop_gradient, True) - egr_tensor2 = core.eager.EagerTensor(arr, place=place) + egr_tensor2 = core.eager.Tensor(arr, place=place) self.assertEqual(egr_tensor2.persistable, False) self.assertTrue("generated" in egr_tensor2.name) self.assertEqual(egr_tensor2.shape, [4, 16, 16, 32]) @@ -319,7 +319,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor2.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor2.stop_gradient, True) - egr_tensor3 = core.eager.EagerTensor( + egr_tensor3 = core.eager.Tensor( arr, place=place, name="new_eager_tensor") self.assertEqual(egr_tensor3.persistable, False) self.assertTrue("new_eager_tensor" in egr_tensor3.name) @@ -328,7 +328,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor3.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor3.stop_gradient, True) - egr_tensor4 = core.eager.EagerTensor( + egr_tensor4 = core.eager.Tensor( arr, place=place, persistable=True, name="new_eager_tensor") self.assertEqual(egr_tensor4.persistable, True) self.assertTrue("new_eager_tensor" in egr_tensor4.name) @@ -337,7 +337,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor4.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor4.stop_gradient, True) - egr_tensor5 = core.eager.EagerTensor( + egr_tensor5 = core.eager.Tensor( arr, core.CPUPlace(), persistable=True, @@ -350,7 +350,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor5.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor5.stop_gradient, True) - egr_tensor6 = core.eager.EagerTensor( + egr_tensor6 = core.eager.Tensor( arr, place=core.CPUPlace(), persistable=True, @@ -363,7 +363,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor6.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor6.stop_gradient, True) - egr_tensor7 = core.eager.EagerTensor( + egr_tensor7 = core.eager.Tensor( arr, place=place, persistable=True, @@ -376,7 +376,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor7.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor7.stop_gradient, True) - egr_tensor8 = core.eager.EagerTensor( + egr_tensor8 = core.eager.Tensor( arr, place=place, persistable=True, @@ -390,7 +390,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor8.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor8.stop_gradient, False) - egr_tensor9 = core.eager.EagerTensor( + egr_tensor9 = core.eager.Tensor( arr, place, True, True, "new_eager_tensor", stop_gradient=False) self.assertEqual(egr_tensor9.persistable, True) self.assertTrue("new_eager_tensor" in egr_tensor9.name) @@ -399,7 +399,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor9.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor9.stop_gradient, False) - egr_tensor10 = core.eager.EagerTensor( + egr_tensor10 = core.eager.Tensor( arr, place, True, @@ -413,7 +413,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor10.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor10.stop_gradient, False) - egr_tensor11 = core.eager.EagerTensor( + egr_tensor11 = core.eager.Tensor( arr, place, True, @@ -427,7 +427,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor11.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor11.stop_gradient, False) - egr_tensor12 = core.eager.EagerTensor( + egr_tensor12 = core.eager.Tensor( arr, place, persistable=True, @@ -441,7 +441,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor12.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor12.stop_gradient, False) - egr_tensor13 = core.eager.EagerTensor( + egr_tensor13 = core.eager.Tensor( value=arr, place=place, persistable=True, @@ -456,7 +456,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor13.stop_gradient, False) # special case - egr_tensor14 = core.eager.EagerTensor( + egr_tensor14 = core.eager.Tensor( dtype=core.VarDesc.VarType.FP32, dims=[4, 16, 16, 32], name="special_eager_tensor", @@ -467,8 +467,8 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor14.shape, [4, 16, 16, 32]) self.assertEqual(egr_tensor14.dtype, core.VarDesc.VarType.FP32) - # init EagerTensor by EagerTensor - egr_tensor15 = core.eager.EagerTensor(value=egr_tensor4) + # init Tensor by Tensor + egr_tensor15 = core.eager.Tensor(value=egr_tensor4) self.assertEqual(egr_tensor15.persistable, True) self.assertTrue("generated" in egr_tensor15.name) self.assertEqual(egr_tensor15.shape, egr_tensor4.shape) @@ -480,7 +480,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): self.assertTrue( np.array_equal(egr_tensor15.numpy(), egr_tensor4.numpy())) - egr_tensor16 = core.eager.EagerTensor( + egr_tensor16 = core.eager.Tensor( value=egr_tensor4, name="new_eager_tensor") self.assertEqual(egr_tensor16.persistable, True) self.assertTrue("new_eager_tensor" in egr_tensor16.name) @@ -493,7 +493,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): self.assertTrue( np.array_equal(egr_tensor16.numpy(), egr_tensor4.numpy())) - egr_tensor17 = core.eager.EagerTensor( + egr_tensor17 = core.eager.Tensor( value=egr_tensor4, place=place, name="new_eager_tensor", ) @@ -506,7 +506,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): self.assertTrue( np.array_equal(egr_tensor17.numpy(), egr_tensor4.numpy())) - egr_tensor18 = core.eager.EagerTensor( + egr_tensor18 = core.eager.Tensor( egr_tensor4, place=place, name="new_eager_tensor", ) @@ -519,7 +519,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): self.assertTrue( np.array_equal(egr_tensor18.numpy(), egr_tensor4.numpy())) - egr_tensor19 = core.eager.EagerTensor( + egr_tensor19 = core.eager.Tensor( egr_tensor4, place, name="new_eager_tensor", ) @@ -536,7 +536,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): x = np.random.rand(3, 3).astype('float32') t = paddle.fluid.Tensor() t.set(x, paddle.fluid.CPUPlace()) - egr_tensor20 = core.eager.EagerTensor(value=t) + egr_tensor20 = core.eager.Tensor(value=t) self.assertEqual(egr_tensor20.persistable, False) self.assertTrue("generated_tensor" in egr_tensor20.name) self.assertEqual(egr_tensor20.shape, [3, 3]) @@ -547,7 +547,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): paddle.fluid.framework._current_expected_place())) self.assertTrue(np.array_equal(egr_tensor20.numpy(), x)) - egr_tensor21 = core.eager.EagerTensor(value=t, place=place) + egr_tensor21 = core.eager.Tensor(value=t, place=place) self.assertEqual(egr_tensor21.persistable, False) self.assertTrue("generated_tensor" in egr_tensor21.name) self.assertEqual(egr_tensor21.shape, [3, 3]) @@ -556,7 +556,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): self.assertTrue(egr_tensor21.place._equals(place)) self.assertTrue(np.array_equal(egr_tensor21.numpy(), x)) - egr_tensor22 = core.eager.EagerTensor(t, place=place) + egr_tensor22 = core.eager.Tensor(t, place=place) self.assertEqual(egr_tensor22.persistable, False) self.assertTrue("generated_tensor" in egr_tensor22.name) self.assertEqual(egr_tensor22.shape, [3, 3]) @@ -565,8 +565,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): self.assertTrue(egr_tensor22.place._equals(place)) self.assertTrue(np.array_equal(egr_tensor22.numpy(), x)) - egr_tensor23 = core.eager.EagerTensor( - t, place, name="from_framework_tensor") + egr_tensor23 = core.eager.Tensor(t, place, name="from_framework_tensor") self.assertEqual(egr_tensor23.persistable, False) self.assertTrue("from_framework_tensor" in egr_tensor23.name) self.assertEqual(egr_tensor23.shape, [3, 3]) @@ -575,7 +574,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): self.assertTrue(egr_tensor23.place._equals(place)) self.assertTrue(np.array_equal(egr_tensor23.numpy(), x)) - egr_tensor24 = core.eager.EagerTensor( + egr_tensor24 = core.eager.Tensor( value=t, place=place, name="from_framework_tensor") self.assertEqual(egr_tensor24.persistable, False) self.assertTrue("from_framework_tensor" in egr_tensor24.name) @@ -587,7 +586,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): # Bad usage # SyntaxError: positional argument follows keyword argument - # egr_tensor25 = core.eager.EagerTensor(value=t, place) + # egr_tensor25 = core.eager.Tensor(value=t, place) def test_constructor_with_kwargs(self): print("Test_constructor_with_kwargs") @@ -655,7 +654,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): tensor2 = None tensor = paddle.to_tensor(arr, core.VarDesc.VarType.FP32, core.CPUPlace()) - tensor3 = core.eager.EagerTensor() + tensor3 = core.eager.Tensor() if core.is_compiled_with_cuda(): tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32, core.CUDAPlace(0)) @@ -683,7 +682,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): tensor2 = None tensor = paddle.to_tensor(arr, core.VarDesc.VarType.FP32, core.CPUPlace()) - tensor3 = core.eager.EagerTensor() + tensor3 = core.eager.Tensor() if core.is_compiled_with_cuda(): tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32, core.CUDAPlace(0)) @@ -748,7 +747,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): with _test_eager_guard(): arr = np.random.rand(4, 16, 16, 32).astype('float64') - egr_tensor0 = core.eager.EagerTensor(value=arr) + egr_tensor0 = core.eager.Tensor(value=arr) self.assertEqual(egr_tensor0.persistable, False) self.assertTrue("generated" in egr_tensor0.name) self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32]) @@ -766,7 +765,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): def test_set_value(self): with _test_eager_guard(): ori_arr = np.random.rand(4, 16, 16, 32).astype('float32') - egr_tensor = core.eager.EagerTensor(value=ori_arr) + egr_tensor = core.eager.Tensor(value=ori_arr) self.assertEqual(egr_tensor.stop_gradient, True) self.assertEqual(egr_tensor.shape, [4, 16, 16, 32]) self.assertTrue(np.array_equal(egr_tensor.numpy(), ori_arr)) @@ -859,7 +858,7 @@ class EagerParamBaseUsageTestCase(unittest.TestCase): def test_backward_with_single_tensor(self): with _test_eager_guard(): arr4 = np.random.rand(4, 16, 16, 32).astype('float32') - egr_tensor12 = core.eager.EagerTensor(arr4, core.CPUPlace()) + egr_tensor12 = core.eager.Tensor(arr4, core.CPUPlace()) egr_tensor12.retain_grads() arr = np.ones([4, 16, 16, 32]).astype('float32') self.assertEqual(egr_tensor12.persistable, False) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_basic.py b/python/paddle/fluid/tests/unittests/test_imperative_basic.py index 92d3dd7b6054b685cb5b560c20ebf2e249f640fe..a36b10f58ffaa503b6ccca580843f07b4bbfc2ac 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_basic.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_basic.py @@ -203,7 +203,7 @@ class TestImperative(unittest.TestCase): with fluid.dygraph.guard(): if fluid.framework._in_eager_mode(): var_base = paddle.to_tensor(np.array([3, 4, 5])) - self.assertTrue(isinstance(var_base, core.eager.EagerTensor)) + self.assertTrue(isinstance(var_base, core.eager.Tensor)) else: var_base = paddle.to_tensor(np.array([3, 4, 5])) self.assertTrue(isinstance(var_base, core.VarBase)) @@ -221,13 +221,13 @@ class TestImperative(unittest.TestCase): t.set(x, fluid.CPUPlace()) if _in_eager_mode(): # TODO(jiabin): Support Kwargs and uncomment these tests - # egr_tmp = fluid.core.eager.EagerTensor(value=x, place=fluid.core.CPUPlace()) - egr_tmp2 = fluid.core.eager.EagerTensor(y, fluid.core.CPUPlace()) + # egr_tmp = fluid.core.eager.Tensor(value=x, place=fluid.core.CPUPlace()) + egr_tmp2 = fluid.core.eager.Tensor(y, fluid.core.CPUPlace()) egr_tmp3 = paddle.to_tensor(x) - egr_tmp4 = fluid.core.eager.EagerTensor(y) - # egr_tmp5 = fluid.core.eager.EagerTensor(value=x) + egr_tmp4 = fluid.core.eager.Tensor(y) + # egr_tmp5 = fluid.core.eager.Tensor(value=x) # TODO(jiabin): Support it when we merge LoDTensor with DenseTensor - egr_tmp6 = fluid.core.eager.EagerTensor(t) + egr_tmp6 = fluid.core.eager.Tensor(t) # self.assertTrue(np.array_equal(x, egr_tmp.numpy())) self.assertTrue(np.array_equal(y, egr_tmp2.numpy())) @@ -953,8 +953,7 @@ class TestMetaclass(unittest.TestCase): self.assertNotEqual(type(MyLayer).__name__, 'pybind11_type') if core._in_eager_mode(): self.assertEqual( - type(paddle.fluid.core.eager.EagerTensor).__name__, - 'pybind11_type') + type(paddle.fluid.core.eager.Tensor).__name__, 'pybind11_type') else: self.assertEqual( type(paddle.fluid.core.VarBase).__name__, 'pybind11_type') diff --git a/python/paddle/fluid/tests/unittests/test_imperative_numpy_bridge.py b/python/paddle/fluid/tests/unittests/test_imperative_numpy_bridge.py index 7b8d31ff030e503f872b9afd923ce4c6252a026a..1881f1bbbd4c330c522a6304ea3fe004fafbeb3b 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_numpy_bridge.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_numpy_bridge.py @@ -41,7 +41,7 @@ class TestImperativeNumpyBridge(unittest.TestCase): data_np[0][0] = -1 self.assertEqual(data_np[0][0], -1) if _in_eager_mode(): - # eager_mode, var2 is EagerTensor, is not subscriptable + # eager_mode, var2 is Tensor, is not subscriptable # TODO(wuweilong): to support slice in eager mode later self.assertNotEqual(var2.numpy()[0][0], -1) else: diff --git a/python/paddle/optimizer/lr.py b/python/paddle/optimizer/lr.py index 79bacc0dfb6a7e714b292ded6f99889a43a3690b..9d55b8d1d2f12ac9a83cac33de014462173987e5 100644 --- a/python/paddle/optimizer/lr.py +++ b/python/paddle/optimizer/lr.py @@ -1358,7 +1358,7 @@ class ReduceOnPlateau(LRScheduler): self.last_epoch = epoch if _in_eager_mode(): - tmp = core.eager.EagerTensor + tmp = core.eager.Tensor else: tmp = Tensor # loss must be float, numpy.ndarray or 1-D Tensor with shape [1] diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index c121d7b6b83ec6fbde5b50852293901db9d61686..dd56b391d10ff8dc47abaa0dc963b49d4e7961a9 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -169,8 +169,7 @@ def to_tensor(data, dtype=None, place=None, stop_gradient=True): # TOOD(jiabin): Support kwargs in eager tensor constructor if _in_eager_mode() and isinstance(data, np.ndarray): - return core.eager.EagerTensor(data, place, False, False, None, - stop_gradient) + return core.eager.Tensor(data, place, False, False, None, stop_gradient) else: return paddle.Tensor( value=data, diff --git a/python/paddle/tensor/to_string.py b/python/paddle/tensor/to_string.py index af0f33f97ab4f59e79ce4d247d0e648147613283..0e76d92ca73ef35ede331d19683cbd6e22013141 100644 --- a/python/paddle/tensor/to_string.py +++ b/python/paddle/tensor/to_string.py @@ -263,7 +263,7 @@ def to_string(var, prefix='Tensor'): data=data) -def eager_tensor_to_string(tensor, prefix='Tensor'): +def tensor_to_string(tensor, prefix='Tensor'): indent = len(prefix) + 1 _template = "{prefix}(shape={shape}, dtype={dtype}, place={place}, stop_gradient={stop_gradient},\n{indent}{data})"